summaryrefslogtreecommitdiffstats
path: root/src/lib/Bcfg2/Server
diff options
context:
space:
mode:
Diffstat (limited to 'src/lib/Bcfg2/Server')
-rw-r--r--src/lib/Bcfg2/Server/Admin.py1206
-rw-r--r--src/lib/Bcfg2/Server/Admin/Backup.py22
-rw-r--r--src/lib/Bcfg2/Server/Admin/Client.py50
-rw-r--r--src/lib/Bcfg2/Server/Admin/Compare.py147
-rw-r--r--src/lib/Bcfg2/Server/Admin/Init.py349
-rw-r--r--src/lib/Bcfg2/Server/Admin/Minestruct.py56
-rw-r--r--src/lib/Bcfg2/Server/Admin/Perf.py37
-rw-r--r--src/lib/Bcfg2/Server/Admin/Pull.py147
-rw-r--r--src/lib/Bcfg2/Server/Admin/Reports.py262
-rw-r--r--src/lib/Bcfg2/Server/Admin/Snapshots.py162
-rw-r--r--src/lib/Bcfg2/Server/Admin/Syncdb.py29
-rw-r--r--src/lib/Bcfg2/Server/Admin/Viz.py113
-rw-r--r--src/lib/Bcfg2/Server/Admin/Xcmd.py44
-rw-r--r--src/lib/Bcfg2/Server/Admin/__init__.py142
-rw-r--r--src/lib/Bcfg2/Server/BuiltinCore.py50
-rw-r--r--src/lib/Bcfg2/Server/Cache.py180
-rw-r--r--src/lib/Bcfg2/Server/CherrypyCore.py (renamed from src/lib/Bcfg2/Server/CherryPyCore.py)53
-rw-r--r--src/lib/Bcfg2/Server/Core.py623
-rwxr-xr-xsrc/lib/Bcfg2/Server/Encryption.py681
-rw-r--r--src/lib/Bcfg2/Server/FileMonitor/Fam.py105
-rw-r--r--src/lib/Bcfg2/Server/FileMonitor/Gamin.py15
-rw-r--r--src/lib/Bcfg2/Server/FileMonitor/Inotify.py6
-rw-r--r--src/lib/Bcfg2/Server/FileMonitor/Pseudo.py3
-rw-r--r--src/lib/Bcfg2/Server/FileMonitor/__init__.py61
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/.gitignore3
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/__init__.py0
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/backends.py63
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/__init__.py0
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/admin.py15
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/models.py210
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/sql/zone.sql2
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/urls.py68
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/views.py970
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/base.html34
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/confirm.html117
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/copy.html122
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/dns.html40
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/dnsedit.html98
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/edit.html191
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/errors.html31
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/host.html80
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/hostbase/host_confirm_delete.html89
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/hostbase/log_detail.html23
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/index.html16
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/login.html37
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/logout.html13
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/logout.tmpl6
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/logviewer.html27
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/navbar.tmpl5
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/new.html102
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/remove.html89
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/results.html45
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/search.html57
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zoneedit.html81
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zonenew.html43
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zones.html37
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zoneview.html71
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/ldapauth.py179
-rwxr-xr-xsrc/lib/Bcfg2/Server/Hostbase/manage.py11
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/media/base.css5
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/media/boxypastel.css179
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/media/global.css8
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/media/layout.css62
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/nisauth.py40
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/regex.py6
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/settings.py143
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/templates/batchadd.tmpl29
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/templates/dhcpd.conf.head5
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/templates/dhcpd.tmpl17
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/templates/hosts.tmpl26
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/templates/hostsappend.tmpl5
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/templates/named.tmpl69
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/templates/namedviews.tmpl92
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/templates/reverseappend.tmpl4
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/templates/reversesoa.tmpl13
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/templates/zone.tmpl18
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/urls.py27
-rw-r--r--src/lib/Bcfg2/Server/Info.py879
-rw-r--r--src/lib/Bcfg2/Server/Lint/AWSTags.py33
-rw-r--r--src/lib/Bcfg2/Server/Lint/Bundler.py59
-rw-r--r--src/lib/Bcfg2/Server/Lint/Cfg.py118
-rw-r--r--src/lib/Bcfg2/Server/Lint/Comments.py116
-rw-r--r--src/lib/Bcfg2/Server/Lint/Crypto.py61
-rw-r--r--[-rwxr-xr-x]src/lib/Bcfg2/Server/Lint/Genshi.py62
-rw-r--r--src/lib/Bcfg2/Server/Lint/GroupNames.py14
-rw-r--r--src/lib/Bcfg2/Server/Lint/GroupPatterns.py44
-rw-r--r--src/lib/Bcfg2/Server/Lint/InfoXML.py30
-rw-r--r--src/lib/Bcfg2/Server/Lint/Jinja2.py41
-rw-r--r--src/lib/Bcfg2/Server/Lint/MergeFiles.py103
-rw-r--r--src/lib/Bcfg2/Server/Lint/Metadata.py172
-rw-r--r--src/lib/Bcfg2/Server/Lint/Pkgmgr.py50
-rw-r--r--src/lib/Bcfg2/Server/Lint/RequiredAttrs.py44
-rw-r--r--src/lib/Bcfg2/Server/Lint/TemplateAbuse.py17
-rw-r--r--src/lib/Bcfg2/Server/Lint/TemplateHelper.py97
-rw-r--r--src/lib/Bcfg2/Server/Lint/Validate.py44
-rw-r--r--src/lib/Bcfg2/Server/Lint/ValidateJSON.py6
-rw-r--r--src/lib/Bcfg2/Server/Lint/__init__.py225
-rw-r--r--src/lib/Bcfg2/Server/MultiprocessingCore.py114
-rw-r--r--src/lib/Bcfg2/Server/Plugin/__init__.py32
-rw-r--r--src/lib/Bcfg2/Server/Plugin/base.py66
-rw-r--r--src/lib/Bcfg2/Server/Plugin/helpers.py866
-rw-r--r--src/lib/Bcfg2/Server/Plugin/interfaces.py106
-rw-r--r--src/lib/Bcfg2/Server/Plugins/ACL.py146
-rw-r--r--src/lib/Bcfg2/Server/Plugins/AWSTags.py61
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Account.py102
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Base.py33
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Bundler.py299
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Bzr.py10
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgAuthorizedKeysGenerator.py29
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgCatFilter.py28
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgCheetahGenerator.py33
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgDiffFilter.py35
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenerator.py22
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenshiGenerator.py20
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedJinja2Generator.py25
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgExternalCommandVerifier.py26
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgGenshiGenerator.py136
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgInfoXML.py16
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgJinja2Generator.py91
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgLegacyInfo.py46
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgPrivateKeyCreator.py196
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgPublicKeyCreator.py11
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgSSLCACertCreator.py255
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgSSLCAKeyCreator.py36
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py449
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cvs.py21
-rw-r--r--src/lib/Bcfg2/Server/Plugins/DBStats.py4
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Darcs.py24
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Decisions.py56
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Defaults.py2
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Deps.py93
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Editor.py80
-rw-r--r--src/lib/Bcfg2/Server/Plugins/FileProbes.py27
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Fossil.py28
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Git.py45
-rw-r--r--src/lib/Bcfg2/Server/Plugins/GroupLogic.py32
-rw-r--r--src/lib/Bcfg2/Server/Plugins/GroupPatterns.py119
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Guppy.py48
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Hg.py17
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Hostbase.py599
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Ldap.py45
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Metadata.py321
-rw-r--r--src/lib/Bcfg2/Server/Plugins/NagiosGen.py42
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Ohai.py10
-rw-r--r--src/lib/Bcfg2/Server/Plugins/POSIXCompat.py4
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Apt.py34
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Collection.py99
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Pac.py11
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py41
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Pkgng.py86
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Source.py120
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Yum.py195
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/YumHelper.py399
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/__init__.py246
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Pkgmgr.py263
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Probes.py528
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Properties.py101
-rw-r--r--src/lib/Bcfg2/Server/Plugins/PuppetENC.py36
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Reporting.py41
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Rules.py48
-rw-r--r--src/lib/Bcfg2/Server/Plugins/SEModules.py3
-rw-r--r--src/lib/Bcfg2/Server/Plugins/SSHbase.py173
-rw-r--r--src/lib/Bcfg2/Server/Plugins/SSLCA.py372
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Snapshots.py129
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Statistics.py160
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Svn.py141
-rw-r--r--src/lib/Bcfg2/Server/Plugins/TCheetah.py79
-rw-r--r--src/lib/Bcfg2/Server/Plugins/TGenshi.py139
-rw-r--r--src/lib/Bcfg2/Server/Plugins/TemplateHelper.py157
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Trigger.py28
-rw-r--r--src/lib/Bcfg2/Server/Plugins/__init__.py4
-rw-r--r--src/lib/Bcfg2/Server/Reports/reports/models.py30
-rw-r--r--src/lib/Bcfg2/Server/Reports/updatefix.py43
-rw-r--r--src/lib/Bcfg2/Server/SSLServer.py464
-rw-r--r--src/lib/Bcfg2/Server/Snapshots/__init__.py31
-rw-r--r--src/lib/Bcfg2/Server/Snapshots/model.py323
-rw-r--r--src/lib/Bcfg2/Server/Statistics.py124
-rw-r--r--src/lib/Bcfg2/Server/Test.py287
-rw-r--r--src/lib/Bcfg2/Server/__init__.py5
-rw-r--r--src/lib/Bcfg2/Server/models.py89
180 files changed, 9099 insertions, 11317 deletions
diff --git a/src/lib/Bcfg2/Server/Admin.py b/src/lib/Bcfg2/Server/Admin.py
new file mode 100644
index 000000000..c294e6be5
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Admin.py
@@ -0,0 +1,1206 @@
+""" Subcommands and helpers for bcfg2-admin """
+
+import os
+import sys
+import time
+import glob
+import stat
+import random
+import socket
+import string
+import getpass
+import difflib
+import tarfile
+import argparse
+import lxml.etree
+import Bcfg2.Logger
+import Bcfg2.Options
+import Bcfg2.DBSettings
+import Bcfg2.Server.Core
+import Bcfg2.Client.Proxy
+from Bcfg2.Server.Plugin import PullSource, Generator, MetadataConsistencyError
+from Bcfg2.Utils import hostnames2ranges, Executor, safe_input
+import Bcfg2.Server.Plugins.Metadata
+
+try:
+ from django.core.exceptions import ImproperlyConfigured
+ from django.core import management
+ import django.conf
+ import Bcfg2.Server.models
+
+ HAS_DJANGO = True
+ try:
+ import south # pylint: disable=W0611
+ HAS_REPORTS = True
+ except ImportError:
+ HAS_REPORTS = False
+except ImportError:
+ HAS_DJANGO = False
+ HAS_REPORTS = False
+
+
+class ccolors: # pylint: disable=C0103,W0232
+ """ ANSI color escapes to make colorizing text easier """
+ # pylint: disable=W1401
+ ADDED = '\033[92m'
+ CHANGED = '\033[93m'
+ REMOVED = '\033[91m'
+ ENDC = '\033[0m'
+ # pylint: enable=W1401
+
+ @classmethod
+ def disable(cls):
+ """ Disable all coloration """
+ cls.ADDED = ''
+ cls.CHANGED = ''
+ cls.REMOVED = ''
+ cls.ENDC = ''
+
+
+def gen_password(length):
+ """Generates a random alphanumeric password with length characters."""
+ chars = string.ascii_letters + string.digits
+ return "".join(random.choice(chars) for i in range(length))
+
+
+def print_table(rows, justify='left', hdr=True, vdelim=" ", padding=1):
+ """Pretty print a table
+
+ rows - list of rows ([[row 1], [row 2], ..., [row n]])
+ hdr - if True the first row is treated as a table header
+ vdelim - vertical delimiter between columns
+ padding - # of spaces around the longest element in the column
+ justify - may be left,center,right
+
+ """
+ hdelim = "="
+ justify = {'left': str.ljust,
+ 'center': str.center,
+ 'right': str.rjust}[justify.lower()]
+
+ # Calculate column widths (longest item in each column
+ # plus padding on both sides)
+ cols = list(zip(*rows))
+ col_widths = [max([len(str(item)) + 2 * padding
+ for item in col]) for col in cols]
+ borderline = vdelim.join([w * hdelim for w in col_widths])
+
+ # Print out the table
+ print(borderline)
+ for row in rows:
+ print(vdelim.join([justify(str(item), width)
+ for (item, width) in zip(row, col_widths)]))
+ if hdr:
+ print(borderline)
+ hdr = False
+
+
+class AdminCmd(Bcfg2.Options.Subcommand): # pylint: disable=W0223
+ """ Base class for all bcfg2-admin modes """
+ def setup(self):
+ """ Perform post-init (post-options parsing), pre-run setup
+ tasks """
+ pass
+
+ def errExit(self, emsg):
+ """ exit with an error """
+ print(emsg)
+ raise SystemExit(1)
+
+
+class _ServerAdminCmd(AdminCmd): # pylint: disable=W0223
+ """ Base class for admin modes that run a Bcfg2 server. """
+ __plugin_whitelist__ = None
+ __plugin_blacklist__ = None
+
+ options = AdminCmd.options + Bcfg2.Server.Core.Core.options
+
+ def __init__(self):
+ AdminCmd.__init__(self)
+ self.metadata = None
+
+ def setup(self):
+ if self.__plugin_whitelist__ is not None:
+ Bcfg2.Options.setup.plugins = [
+ p for p in Bcfg2.Options.setup.plugins
+ if p.name in self.__plugin_whitelist__]
+ elif self.__plugin_blacklist__ is not None:
+ Bcfg2.Options.setup.plugins = [
+ p for p in Bcfg2.Options.setup.plugins
+ if p.name not in self.__plugin_blacklist__]
+
+ try:
+ self.core = Bcfg2.Server.Core.Core()
+ except Bcfg2.Server.Core.CoreInitError:
+ msg = sys.exc_info()[1]
+ self.errExit("Core load failed: %s" % msg)
+ self.core.load_plugins()
+ self.core.fam.handle_event_set()
+ self.metadata = self.core.metadata
+
+ def shutdown(self):
+ self.core.shutdown()
+
+
+class _ProxyAdminCmd(AdminCmd): # pylint: disable=W0223
+ """ Base class for admin modes that proxy to a running Bcfg2 server """
+
+ options = AdminCmd.options + Bcfg2.Client.Proxy.ComponentProxy.options
+
+ def __init__(self):
+ AdminCmd.__init__(self)
+ self.proxy = None
+
+ def setup(self):
+ self.proxy = Bcfg2.Client.Proxy.ComponentProxy()
+
+
+class Backup(AdminCmd):
+ """ Make a backup of the Bcfg2 repository """
+
+ options = AdminCmd.options + [Bcfg2.Options.Common.repository]
+
+ def run(self, setup):
+ timestamp = time.strftime('%Y%m%d%H%M%S')
+ datastore = setup.repository
+ fmt = 'gz'
+ mode = 'w:' + fmt
+ filename = timestamp + '.tar' + '.' + fmt
+ out = tarfile.open(os.path.join(datastore, filename), mode=mode)
+ out.add(datastore, os.path.basename(datastore))
+ out.close()
+ print("Archive %s was stored under %s" % (filename, datastore))
+
+
+class Client(_ServerAdminCmd):
+ """ Create, modify, delete, or list client entries """
+
+ __plugin_whitelist__ = ["Metadata"]
+ options = _ServerAdminCmd.options + [
+ Bcfg2.Options.PositionalArgument(
+ "mode",
+ choices=["add", "del", "delete", "remove", "rm", "up", "update",
+ "list"]),
+ Bcfg2.Options.PositionalArgument("hostname", nargs='?'),
+ Bcfg2.Options.PositionalArgument("attributes", metavar="KEY=VALUE",
+ nargs='*')]
+
+ valid_attribs = ['profile', 'uuid', 'password', 'floating', 'secure',
+ 'address', 'auth']
+
+ def get_attribs(self, setup):
+ """ Get attributes for adding or updating a client from the command
+ line """
+ attr_d = {}
+ for i in setup.attributes:
+ attr, val = i.split('=', 1)
+ if attr not in self.valid_attribs:
+ print("Attribute %s unknown. Valid attributes: %s" %
+ (attr, self.valid_attribs))
+ raise SystemExit(1)
+ attr_d[attr] = val
+ return attr_d
+
+ def run(self, setup):
+ if setup.mode != 'list' and not setup.hostname:
+ self.parser.error("<hostname> is required in %s mode" % setup.mode)
+ elif setup.mode == 'list' and setup.hostname:
+ self.logger.warning("<hostname> is not honored in list mode")
+
+ if setup.mode == 'list':
+ for client in self.metadata.list_clients():
+ print(client)
+ else:
+ include_attribs = True
+ if setup.mode == 'add':
+ func = self.metadata.add_client
+ action = "adding"
+ elif setup.mode in ['up', 'update']:
+ func = self.metadata.update_client
+ action = "updating"
+ elif setup.mode in ['del', 'delete', 'rm', 'remove']:
+ func = self.metadata.remove_client
+ include_attribs = False
+ action = "deleting"
+
+ if include_attribs:
+ args = (setup.hostname, self.get_attribs(setup))
+ else:
+ args = (setup.hostname,)
+ try:
+ func(*args)
+ except MetadataConsistencyError:
+ err = sys.exc_info()[1]
+ self.errExit("Error %s client %s: %s" % (setup.hostname,
+ action, err))
+
+
+class Compare(AdminCmd):
+ """ Compare two hosts or two versions of a host specification """
+
+ help = "Given two XML files (as produced by bcfg2-info build or bcfg2 " + \
+ "-qnc) or two directories containing XML files (as produced by " + \
+ "bcfg2-info buildall or bcfg2-info builddir), output a detailed, " + \
+ "Bcfg2-centric diff."
+
+ options = AdminCmd.options + [
+ Bcfg2.Options.Option(
+ "-d", "--diff-lines", type=int,
+ help="Show only N lines of a diff"),
+ Bcfg2.Options.BooleanOption(
+ "-c", "--color", help="Use colors even if not run from a TTY"),
+ Bcfg2.Options.BooleanOption(
+ "-q", "--quiet",
+ help="Only show that entries differ, not how they differ"),
+ Bcfg2.Options.PathOption("path1", metavar="<file-or-dir>"),
+ Bcfg2.Options.PathOption("path2", metavar="<file-or-dir>")]
+
+ changes = dict()
+
+ def removed(self, msg, host):
+ """ Record a removed element """
+ self.record("%sRemoved: %s%s" % (ccolors.REMOVED, msg, ccolors.ENDC),
+ host)
+
+ def added(self, msg, host):
+ """ Record an removed element """
+ self.record("%sAdded: %s%s" % (ccolors.ADDED, msg, ccolors.ENDC), host)
+
+ def changed(self, msg, host):
+ """ Record a changed element """
+ self.record("%sChanged: %s%s" % (ccolors.CHANGED, msg, ccolors.ENDC),
+ host)
+
+ def record(self, msg, host):
+ """ Record a new removed/added/changed message for the given
+ host """
+ if msg not in self.changes:
+ self.changes[msg] = [host]
+ else:
+ self.changes[msg].append(host)
+
+ def udiff(self, lines1, lines2, **kwargs):
+ """ get a unified diff with control lines stripped """
+ lines = None
+ if "lines" in kwargs:
+ if kwargs['lines'] is not None:
+ lines = int(kwargs['lines'])
+ del kwargs['lines']
+ if lines == 0:
+ return []
+ kwargs['n'] = 0
+ diff = []
+ for line in difflib.unified_diff(lines1, lines2, **kwargs):
+ if (line.startswith("--- ") or line.startswith("+++ ") or
+ line.startswith("@@ ")):
+ continue
+ if lines is not None and len(diff) > lines:
+ diff.append(" ...")
+ break
+ if line.startswith("+"):
+ diff.extend(" %s%s%s" % (ccolors.ADDED, l, ccolors.ENDC)
+ for l in line.splitlines())
+ elif line.startswith("-"):
+ diff.extend(" %s%s%s" % (ccolors.REMOVED, l, ccolors.ENDC)
+ for l in line.splitlines())
+ return diff
+
+ def _bundletype(self, el):
+ """ Get a human-friendly representation of the type of the
+ given bundle -- independent or not """
+ if el.get("tag") == "Independent":
+ return "Independent bundle"
+ else:
+ return "Bundle"
+
+ def _get_filelists(self, setup):
+ """ Get a list of 2-tuples of files to compare """
+ files = []
+ if os.path.isdir(setup.path1) and os.path.isdir(setup.path1):
+ for fpath in glob.glob(os.path.join(setup.path1, '*')):
+ fname = os.path.basename(fpath)
+ if os.path.exists(os.path.join(setup.path2, fname)):
+ files.append((os.path.join(setup.path1, fname),
+ os.path.join(setup.path2, fname)))
+ else:
+ if fname.endswith(".xml"):
+ host = fname[0:-4]
+ else:
+ host = fname
+ self.removed(host, '')
+ for fpath in glob.glob(os.path.join(setup.path2, '*')):
+ fname = os.path.basename(fpath)
+ if not os.path.exists(os.path.join(setup.path1, fname)):
+ if fname.endswith(".xml"):
+ host = fname[0:-4]
+ else:
+ host = fname
+ self.added(host, '')
+ elif os.path.isfile(setup.path1) and os.path.isfile(setup.path2):
+ files.append((setup.path1, setup.path2))
+ else:
+ self.errExit("Cannot diff a file and a directory")
+ return files
+
+ def run(self, setup): # pylint: disable=R0912,R0914,R0915
+ if not sys.stdout.isatty() and not setup.color:
+ ccolors.disable()
+
+ files = self._get_filelists(setup)
+ for file1, file2 in files:
+ host = None
+ if os.path.basename(file1) == os.path.basename(file2):
+ fname = os.path.basename(file1)
+ if fname.endswith(".xml"):
+ host = fname[0:-4]
+ else:
+ host = fname
+
+ xdata1 = lxml.etree.parse(file1).getroot()
+ xdata2 = lxml.etree.parse(file2).getroot()
+
+ elements1 = dict()
+ elements2 = dict()
+ bundles1 = [el.get("name") for el in xdata1.iterchildren()]
+ bundles2 = [el.get("name") for el in xdata2.iterchildren()]
+ for el in xdata1.iterchildren():
+ if el.get("name") not in bundles2:
+ self.removed("%s %s" % (self._bundletype(el),
+ el.get("name")),
+ host)
+ for el in xdata2.iterchildren():
+ if el.get("name") not in bundles1:
+ self.added("%s %s" % (self._bundletype(el),
+ el.get("name")),
+ host)
+
+ for bname in bundles1:
+ bundle = xdata1.find("*[@name='%s']" % bname)
+ for el in bundle.getchildren():
+ elements1["%s:%s" % (el.tag, el.get("name"))] = el
+ for bname in bundles2:
+ bundle = xdata2.find("*[@name='%s']" % bname)
+ for el in bundle.getchildren():
+ elements2["%s:%s" % (el.tag, el.get("name"))] = el
+
+ for el in elements1.values():
+ elid = "%s:%s" % (el.tag, el.get("name"))
+ if elid not in elements2:
+ self.removed("Element %s" % elid, host)
+ else:
+ el2 = elements2[elid]
+ if (el.getparent().get("name") !=
+ el2.getparent().get("name")):
+ self.changed(
+ "Element %s was in bundle %s, "
+ "now in bundle %s" % (elid,
+ el.getparent().get("name"),
+ el2.getparent().get("name")),
+ host)
+ attr1 = sorted(["%s=\"%s\"" % (attr, el.get(attr))
+ for attr in el.attrib])
+ attr2 = sorted(["%s=\"%s\"" % (attr, el.get(attr))
+ for attr in el2.attrib])
+ if attr1 != attr2:
+ err = ["Element %s has different attributes" % elid]
+ if not setup.quiet:
+ err.extend(self.udiff(attr1, attr2))
+ self.changed("\n".join(err), host)
+
+ if el.text != el2.text:
+ if el.text is None:
+ self.changed("Element %s content was added" % elid,
+ host)
+ elif el2.text is None:
+ self.changed("Element %s content was removed" %
+ elid, host)
+ else:
+ err = ["Element %s has different content" %
+ elid]
+ if not setup.quiet:
+ err.extend(
+ self.udiff(el.text.splitlines(),
+ el2.text.splitlines(),
+ lines=setup.diff_lines))
+ self.changed("\n".join(err), host)
+
+ for el in elements2.values():
+ elid = "%s:%s" % (el.tag, el.get("name"))
+ if elid not in elements2:
+ self.removed("Element %s" % elid, host)
+
+ for change, hosts in self.changes.items():
+ hlist = [h for h in hosts if h is not None]
+ if len(files) > 1 and len(hlist):
+ print("===== %s =====" %
+ "\n ".join(hostnames2ranges(hlist)))
+ print(change)
+ if len(files) > 1 and len(hlist):
+ print("")
+
+
+class Init(AdminCmd):
+ """Interactively initialize a new repository."""
+
+ options = AdminCmd.options + [
+ Bcfg2.Options.Common.repository, Bcfg2.Options.Common.plugins]
+
+ # default config file
+ config = '''[server]
+repository = %s
+plugins = %s
+# Uncomment the following to listen on all interfaces
+#listen_all = true
+
+[database]
+#engine = sqlite3
+# 'postgresql', 'mysql', 'mysql_old', 'sqlite3' or 'ado_mssql'.
+#name =
+# Or path to database file if using sqlite3.
+#<repository>/etc/bcfg2.sqlite is default path if left empty
+#user =
+# Not used with sqlite3.
+#password =
+# Not used with sqlite3.
+#host =
+# Not used with sqlite3.
+#port =
+
+[reporting]
+transport = LocalFilesystem
+
+[communication]
+password = %s
+certificate = %s
+key = %s
+ca = %s
+
+[components]
+bcfg2 = %s
+'''
+
+ # Default groups
+ groups = '''<Groups>
+ <Group profile='true' public='true' default='true' name='basic'/>
+</Groups>
+'''
+
+ # Default contents of clients.xml
+ clients = '''<Clients>
+ <Client profile="basic" name="%s"/>
+</Clients>
+'''
+
+ def __init__(self):
+ AdminCmd.__init__(self)
+ self.data = dict()
+
+ def _set_defaults(self, setup):
+ """Set default parameters."""
+ self.data['plugins'] = setup.plugins
+ self.data['configfile'] = setup.config
+ self.data['repopath'] = setup.repository
+ self.data['password'] = gen_password(8)
+ self.data['shostname'] = socket.getfqdn()
+ self.data['server_uri'] = "https://%s:6789" % self.data['shostname']
+ self.data['country'] = 'US'
+ self.data['state'] = 'Illinois'
+ self.data['location'] = 'Argonne'
+ if os.path.exists("/etc/pki/tls"):
+ self.data['keypath'] = "/etc/pki/tls/private/bcfg2.key"
+ self.data['certpath'] = "/etc/pki/tls/certs/bcfg2.crt"
+ elif os.path.exists("/etc/ssl"):
+ self.data['keypath'] = "/etc/ssl/bcfg2.key"
+ self.data['certpath'] = "/etc/ssl/bcfg2.crt"
+ else:
+ basepath = os.path.dirname(self.data['configfile'])
+ self.data['keypath'] = os.path.join(basepath, "bcfg2.key")
+ self.data['certpath'] = os.path.join(basepath, 'bcfg2.crt')
+
+ def input_with_default(self, msg, default_name):
+ """ Prompt for input with the given message, taking the
+ default from ``self.data`` """
+ val = safe_input("%s [%s]: " % (msg, self.data[default_name]))
+ if val:
+ self.data[default_name] = val
+
+ def run(self, setup):
+ self._set_defaults(setup)
+
+ # Prompt the user for input
+ self._prompt_server()
+ self._prompt_config()
+ self._prompt_repopath()
+ self._prompt_password()
+ self._prompt_keypath()
+ self._prompt_certificate()
+
+ # Initialize the repository
+ self.init_repo()
+
+ def _prompt_server(self):
+ """Ask for the server name and URI."""
+ self.input_with_default("What is the server's hostname", 'shostname')
+ # reset default server URI
+ self.data['server_uri'] = "https://%s:6789" % self.data['shostname']
+ self.input_with_default("Server location", 'server_uri')
+
+ def _prompt_config(self):
+ """Ask for the configuration file path."""
+ self.input_with_default("Path to Bcfg2 configuration", 'configfile')
+
+ def _prompt_repopath(self):
+ """Ask for the repository path."""
+ while True:
+ self.input_with_default("Location of Bcfg2 repository", 'repopath')
+ if os.path.isdir(self.data['repopath']):
+ response = safe_input("Directory %s exists. Overwrite? [y/N]:"
+ % self.data['repopath'])
+ if response.lower().strip() == 'y':
+ break
+ else:
+ break
+
+ def _prompt_password(self):
+ """Ask for a password or generate one if none is provided."""
+ newpassword = getpass.getpass(
+ "Input password used for communication verification "
+ "(without echoing; leave blank for random): ").strip()
+ if len(newpassword) != 0:
+ self.data['password'] = newpassword
+
+ def _prompt_certificate(self):
+ """Ask for the key details (country, state, and location)."""
+ print("The following questions affect SSL certificate generation.")
+ print("If no data is provided, the default values are used.")
+ self.input_with_default("Country code for certificate", 'country')
+ self.input_with_default("State or Province Name (full name) for "
+ "certificate", 'state')
+ self.input_with_default("Locality Name (e.g., city) for certificate",
+ 'location')
+
+ def _prompt_keypath(self):
+ """ Ask for the key pair location. Try to use sensible
+ defaults depending on the OS """
+ self.input_with_default("Path where Bcfg2 server private key will be "
+ "created", 'keypath')
+ self.input_with_default("Path where Bcfg2 server cert will be created",
+ 'certpath')
+
+ def _init_plugins(self):
+ """Initialize each plugin-specific portion of the repository."""
+ for plugin in self.data['plugins']:
+ kwargs = dict()
+ if issubclass(plugin, Bcfg2.Server.Plugins.Metadata.Metadata):
+ kwargs.update(
+ dict(groups_xml=self.groups,
+ clients_xml=self.clients % self.data['shostname']))
+ plugin.init_repo(self.data['repopath'], **kwargs)
+
+ def create_conf(self):
+ """ create the config file """
+ confdata = self.config % (
+ self.data['repopath'],
+ ','.join(p.__name__ for p in self.data['plugins']),
+ self.data['password'],
+ self.data['certpath'],
+ self.data['keypath'],
+ self.data['certpath'],
+ self.data['server_uri'])
+
+ # Don't overwrite existing bcfg2.conf file
+ if os.path.exists(self.data['configfile']):
+ result = safe_input("\nWarning: %s already exists. "
+ "Overwrite? [y/N]: " % self.data['configfile'])
+ if result not in ['Y', 'y']:
+ print("Leaving %s unchanged" % self.data['configfile'])
+ return
+ try:
+ open(self.data['configfile'], "w").write(confdata)
+ os.chmod(self.data['configfile'],
+ stat.S_IRUSR | stat.S_IWUSR) # 0600
+ except: # pylint: disable=W0702
+ self.errExit("Error trying to write configuration file '%s': %s" %
+ (self.data['configfile'], sys.exc_info()[1]))
+
+ def init_repo(self):
+ """Setup a new repo and create the content of the
+ configuration file."""
+ # Create the repository
+ path = os.path.join(self.data['repopath'], 'etc')
+ try:
+ os.makedirs(path)
+ self._init_plugins()
+ print("Repository created successfuly in %s" %
+ self.data['repopath'])
+ except OSError:
+ print("Failed to create %s." % path)
+
+ # Create the configuration file and SSL key
+ self.create_conf()
+ self.create_key()
+
+ def create_key(self):
+ """Creates a bcfg2.key at the directory specifed by keypath."""
+ cmd = Executor(timeout=120)
+ subject = "/C=%s/ST=%s/L=%s/CN=%s'" % (
+ self.data['country'], self.data['state'], self.data['location'],
+ self.data['shostname'])
+ key = cmd.run(["openssl", "req", "-batch", "-x509", "-nodes",
+ "-subj", subject, "-days", "1000",
+ "-newkey", "rsa:2048",
+ "-keyout", self.data['keypath'], "-noout"])
+ if not key.success:
+ print("Error generating key: %s" % key.error)
+ return
+ os.chmod(self.data['keypath'], stat.S_IRUSR | stat.S_IWUSR) # 0600
+ csr = cmd.run(["openssl", "req", "-batch", "-new", "-subj", subject,
+ "-key", self.data['keypath']])
+ if not csr.success:
+ print("Error generating certificate signing request: %s" %
+ csr.error)
+ return
+ cert = cmd.run(["openssl", "x509", "-req", "-days", "1000",
+ "-signkey", self.data['keypath'],
+ "-out", self.data['certpath']],
+ inputdata=csr.stdout)
+ if not cert.success:
+ print("Error signing certificate: %s" % cert.error)
+ return
+
+
+class Minestruct(_ServerAdminCmd):
+ """ Extract extra entry lists from statistics """
+
+ options = _ServerAdminCmd.options + [
+ Bcfg2.Options.PathOption(
+ "-f", "--outfile", type=argparse.FileType('w'), default=sys.stdout,
+ help="Write to the given file"),
+ Bcfg2.Options.Option(
+ "-g", "--groups", help="Only build config for groups",
+ type=Bcfg2.Options.Types.colon_list, default=[]),
+ Bcfg2.Options.PositionalArgument("hostname")]
+
+ def run(self, setup):
+ try:
+ extra = set()
+ for source in self.core.plugins_by_type(PullSource):
+ for item in source.GetExtra(setup.hostname):
+ extra.add(item)
+ except: # pylint: disable=W0702
+ self.errExit("Failed to find extra entry info for client %s: %s" %
+ (setup.hostname, sys.exc_info()[1]))
+ root = lxml.etree.Element("Base")
+ self.logger.info("Found %d extra entries" % len(extra))
+ add_point = root
+ for grp in setup.groups:
+ add_point = lxml.etree.SubElement(add_point, "Group", name=grp)
+ for tag, name in extra:
+ self.logger.info("%s: %s" % (tag, name))
+ lxml.etree.SubElement(add_point, tag, name=name)
+
+ lxml.etree.ElementTree(root).write(setup.outfile, pretty_print=True)
+
+
+class Perf(_ProxyAdminCmd):
+ """ Get performance data from server """
+
+ def run(self, setup):
+ output = [('Name', 'Min', 'Max', 'Mean', 'Count')]
+ data = self.proxy.get_statistics()
+ for key in sorted(data.keys()):
+ output.append(
+ (key, ) +
+ tuple(["%.06f" % item
+ for item in data[key][:-1]] + [data[key][-1]]))
+ print_table(output)
+
+
+class Pull(_ServerAdminCmd):
+ """ Retrieves entries from clients and integrates the information
+ into the repository """
+
+ options = _ServerAdminCmd.options + [
+ Bcfg2.Options.Common.interactive,
+ Bcfg2.Options.BooleanOption(
+ "-s", "--stdin",
+ help="Read lists of <hostname> <entrytype> <entryname> from stdin "
+ "instead of the command line"),
+ Bcfg2.Options.PositionalArgument("hostname", nargs='?'),
+ Bcfg2.Options.PositionalArgument("entrytype", nargs='?'),
+ Bcfg2.Options.PositionalArgument("entryname", nargs='?')]
+
+ def __init__(self):
+ _ServerAdminCmd.__init__(self)
+ self.interactive = False
+
+ def setup(self):
+ if (not Bcfg2.Options.setup.stdin and
+ not (Bcfg2.Options.setup.hostname and
+ Bcfg2.Options.setup.entrytype and
+ Bcfg2.Options.setup.entryname)):
+ print("You must specify either --stdin or a hostname, entry type, "
+ "and entry name on the command line.")
+ self.errExit(self.usage())
+ _ServerAdminCmd.setup(self)
+
+ def run(self, setup):
+ self.interactive = setup.interactive
+ if setup.stdin:
+ for line in sys.stdin:
+ try:
+ self.PullEntry(*line.split(None, 3))
+ except SystemExit:
+ print(" for %s" % line)
+ except:
+ print("Bad entry: %s" % line.strip())
+ else:
+ self.PullEntry(setup.hostname, setup.entrytype, setup.entryname)
+
+ def BuildNewEntry(self, client, etype, ename):
+ """Construct a new full entry for
+ given client/entry from statistics.
+ """
+ new_entry = {'type': etype, 'name': ename}
+ pull_sources = self.core.plugins_by_type(PullSource)
+ for plugin in pull_sources:
+ try:
+ (owner, group, mode, contents) = \
+ plugin.GetCurrentEntry(client, etype, ename)
+ break
+ except Bcfg2.Server.Plugin.PluginExecutionError:
+ if plugin == pull_sources[-1]:
+ self.errExit("Pull Source failure; could not fetch "
+ "current state")
+
+ try:
+ data = {'owner': owner,
+ 'group': group,
+ 'mode': mode,
+ 'text': contents}
+ except UnboundLocalError:
+ self.errExit("Unable to build entry")
+ for key, val in list(data.items()):
+ if val:
+ new_entry[key] = val
+ return new_entry
+
+ def Choose(self, choices):
+ """Determine where to put pull data."""
+ if self.interactive:
+ for choice in choices:
+ print("Plugin returned choice:")
+ if id(choice) == id(choices[0]):
+ print("(current entry) ")
+ if choice.all:
+ print(" => global entry")
+ elif choice.group:
+ print(" => group entry: %s (prio %d)" %
+ (choice.group, choice.prio))
+ else:
+ print(" => host entry: %s" % (choice.hostname))
+
+ # flush input buffer
+ ans = safe_input("Use this entry? [yN]: ") in ['y', 'Y']
+ if ans:
+ return choice
+ return False
+ else:
+ if not choices:
+ return False
+ return choices[0]
+
+ def PullEntry(self, client, etype, ename):
+ """Make currently recorded client state correct for entry."""
+ new_entry = self.BuildNewEntry(client, etype, ename)
+
+ meta = self.core.build_metadata(client)
+ # Find appropriate plugin in core
+ glist = [gen for gen in self.core.plugins_by_type(Generator)
+ if ename in gen.Entries.get(etype, {})]
+ if len(glist) != 1:
+ self.errExit("Got wrong numbers of matching generators for entry:"
+ "%s" % ([g.name for g in glist]))
+ plugin = glist[0]
+ if not isinstance(plugin, Bcfg2.Server.Plugin.PullTarget):
+ self.errExit("Configuration upload not supported by plugin %s" %
+ plugin.name)
+ try:
+ choices = plugin.AcceptChoices(new_entry, meta)
+ specific = self.Choose(choices)
+ if specific:
+ plugin.AcceptPullData(specific, new_entry, self.logger)
+ except Bcfg2.Server.Plugin.PluginExecutionError:
+ self.errExit("Configuration upload not supported by plugin %s" %
+ plugin.name)
+
+ # Commit if running under a VCS
+ for vcsplugin in list(self.core.plugins.values()):
+ if isinstance(vcsplugin, Bcfg2.Server.Plugin.Version):
+ files = "%s/%s" % (plugin.data, ename)
+ comment = 'file "%s" pulled from host %s' % (files, client)
+ vcsplugin.commit_data([files], comment)
+
+
+class _ReportsCmd(AdminCmd): # pylint: disable=W0223
+ """ Base command for all admin modes dealing with the reporting
+ subsystem """
+ def __init__(self):
+ AdminCmd.__init__(self)
+ self.reports_entries = ()
+ self.reports_classes = ()
+
+ def setup(self):
+ # this has to be imported after options are parsed, because
+ # Django finalizes its settings as soon as it's loaded, which
+ # means that if we import this before Bcfg2.DBSettings has
+ # been populated, Django gets a null configuration, and
+ # subsequent updates to Bcfg2.DBSettings won't help.
+ import Bcfg2.Reporting.models # pylint: disable=W0621
+ self.reports_entries = (Bcfg2.Reporting.models.Group,
+ Bcfg2.Reporting.models.Bundle,
+ Bcfg2.Reporting.models.FailureEntry,
+ Bcfg2.Reporting.models.ActionEntry,
+ Bcfg2.Reporting.models.PathEntry,
+ Bcfg2.Reporting.models.PackageEntry,
+ Bcfg2.Reporting.models.PathEntry,
+ Bcfg2.Reporting.models.ServiceEntry)
+ self.reports_classes = self.reports_entries + (
+ Bcfg2.Reporting.models.Client,
+ Bcfg2.Reporting.models.Interaction,
+ Bcfg2.Reporting.models.Performance)
+
+
+if HAS_DJANGO:
+ class _DjangoProxyCmd(AdminCmd):
+ """ Base for admin modes that proxy a command through the
+ Django management system """
+ command = None
+ args = []
+
+ def run(self, _):
+ '''Call a django command'''
+ if self.command is not None:
+ command = self.command
+ else:
+ command = self.__class__.__name__.lower()
+ args = [command] + self.args
+ management.call_command(*args)
+
+ class DBShell(_DjangoProxyCmd):
+ """ Call the Django 'dbshell' command on the database """
+
+ class Shell(_DjangoProxyCmd):
+ """ Call the Django 'shell' command on the database """
+
+ class ValidateDB(_DjangoProxyCmd):
+ """ Call the Django 'validate' command on the database """
+ command = "validate"
+
+ class Syncdb(AdminCmd):
+ """ Sync the Django ORM with the configured database """
+
+ def run(self, setup):
+ Bcfg2.Server.models.load_models()
+ try:
+ Bcfg2.DBSettings.sync_databases(
+ interactive=False,
+ verbosity=setup.verbose + setup.debug)
+ except ImproperlyConfigured:
+ err = sys.exc_info()[1]
+ self.logger.error("Django configuration problem: %s" % err)
+ raise SystemExit(1)
+ except:
+ err = sys.exc_info()[1]
+ self.logger.error("Database update failed: %s" % err)
+ raise SystemExit(1)
+
+
+if HAS_REPORTS:
+ import datetime
+
+ class ScrubReports(_ReportsCmd):
+ """ Perform a thorough scrub and cleanup of the Reporting
+ database """
+
+ def setup(self):
+ _ReportsCmd.setup(self)
+ # this has to be imported after options are parsed,
+ # because Django finalizes its settings as soon as it's
+ # loaded, which means that if we import this before
+ # Bcfg2.DBSettings has been populated, Django gets a null
+ # configuration, and subsequent updates to
+ # Bcfg2.DBSettings won't help.
+ from Bcfg2.Reporting.Compat import transaction
+ self.run = transaction.atomic(self.run)
+
+ def run(self, _): # pylint: disable=E0202
+ # Cleanup unused entries
+ for cls in self.reports_entries:
+ try:
+ start_count = cls.objects.count()
+ cls.prune_orphans()
+ self.logger.info("Pruned %d %s records" %
+ (start_count - cls.objects.count(),
+ cls.__name__))
+ except: # pylint: disable=W0702
+ print("Failed to prune %s: %s" %
+ (cls.__name__, sys.exc_info()[1]))
+
+ class InitReports(AdminCmd):
+ """ Initialize the Reporting database """
+ def run(self, setup):
+ verbose = setup.verbose + setup.debug
+ try:
+ Bcfg2.DBSettings.sync_databases(interactive=False,
+ verbosity=verbose)
+ Bcfg2.DBSettings.migrate_databases(interactive=False,
+ verbosity=verbose)
+ except: # pylint: disable=W0702
+ self.errExit("%s failed: %s" %
+ (self.__class__.__name__.title(),
+ sys.exc_info()[1]))
+
+ class UpdateReports(InitReports):
+ """ Apply updates to the reporting database """
+
+ class ReportsStats(_ReportsCmd):
+ """ Print Reporting database statistics """
+ def run(self, _):
+ for cls in self.reports_classes:
+ print("%s has %s records" % (cls.__name__,
+ cls.objects.count()))
+
+ class PurgeReports(_ReportsCmd):
+ """ Purge records from the Reporting database """
+
+ options = AdminCmd.options + [
+ Bcfg2.Options.Option("--client", help="Client to operate on"),
+ Bcfg2.Options.Option("--days", type=int, metavar='N',
+ help="Records older than N days"),
+ Bcfg2.Options.ExclusiveOptionGroup(
+ Bcfg2.Options.BooleanOption("--expired",
+ help="Expired clients only"),
+ Bcfg2.Options.Option("--state", help="Purge entries in state",
+ choices=['dirty', 'clean', 'modified']),
+ required=False)]
+
+ def run(self, setup):
+ if setup.days:
+ maxdate = datetime.datetime.now() - \
+ datetime.timedelta(days=setup.days)
+ else:
+ maxdate = None
+
+ starts = {}
+ for cls in self.reports_classes:
+ starts[cls] = cls.objects.count()
+ if setup.expired:
+ self.purge_expired(maxdate)
+ else:
+ self.purge(setup.client, maxdate, setup.state)
+ for cls in self.reports_classes:
+ self.logger.info("Purged %s %s records" %
+ (starts[cls] - cls.objects.count(),
+ cls.__name__))
+
+ def purge(self, client=None, maxdate=None, state=None):
+ '''Purge historical data from the database'''
+ # indicates whether or not a client should be deleted
+ filtered = False
+
+ if not client and not maxdate and not state:
+ self.errExit("Refusing to prune all data. Specify an option "
+ "to %s" % self.__class__.__name__.lower())
+
+ ipurge = Bcfg2.Reporting.models.Interaction.objects
+ if client:
+ try:
+ cobj = Bcfg2.Reporting.models.Client.objects.get(
+ name=client)
+ ipurge = ipurge.filter(client=cobj)
+ except Bcfg2.Reporting.models.Client.DoesNotExist:
+ self.errExit("Client %s not in database" % client)
+ self.logger.debug("Filtering by client: %s" % client)
+
+ if maxdate:
+ filtered = True
+ self.logger.debug("Filtering by maxdate: %s" % maxdate)
+ ipurge = ipurge.filter(timestamp__lt=maxdate)
+
+ if django.conf.settings.DATABASES['default']['ENGINE'] == \
+ 'django.db.backends.sqlite3':
+ grp_limit = 100
+ else:
+ grp_limit = 1000
+ if state:
+ filtered = True
+ self.logger.debug("Filtering by state: %s" % state)
+ ipurge = ipurge.filter(state=state)
+
+ count = ipurge.count()
+ rnum = 0
+ try:
+ while rnum < count:
+ grp = list(ipurge[:grp_limit].values("id"))
+ # just in case...
+ if not grp:
+ break
+ Bcfg2.Reporting.models.Interaction.objects.filter(
+ id__in=[x['id'] for x in grp]).delete()
+ rnum += len(grp)
+ self.logger.debug("Deleted %s of %s" % (rnum, count))
+ except: # pylint: disable=W0702
+ self.logger.error("Failed to remove interactions: %s" %
+ sys.exc_info()[1])
+
+ # Prune any orphaned ManyToMany relations
+ for m2m in self.reports_entries:
+ self.logger.debug("Pruning any orphaned %s objects" %
+ m2m.__name__)
+ m2m.prune_orphans()
+
+ if client and not filtered:
+ # Delete the client, ping data is automatic
+ try:
+ self.logger.debug("Purging client %s" % client)
+ cobj.delete()
+ except: # pylint: disable=W0702
+ self.logger.error("Failed to delete client %s: %s" %
+ (client, sys.exc_info()[1]))
+
+ def purge_expired(self, maxdate=None):
+ """ Purge expired clients from the Reporting database """
+
+ if maxdate:
+ if not isinstance(maxdate, datetime.datetime):
+ raise TypeError("maxdate is not a DateTime object")
+ self.logger.debug("Filtering by maxdate: %s" % maxdate)
+ clients = Bcfg2.Reporting.models.Client.objects.filter(
+ expiration__lt=maxdate)
+ else:
+ clients = Bcfg2.Reporting.models.Client.objects.filter(
+ expiration__isnull=False)
+
+ for client in clients:
+ self.logger.debug("Purging client %s" % client)
+ Bcfg2.Reporting.models.Interaction.objects.filter(
+ client=client).delete()
+ client.delete()
+
+ class ReportsSQLAll(_DjangoProxyCmd):
+ """ Call the Django 'sqlall' command on the Reporting database """
+ args = ["Reporting"]
+
+
+class Viz(_ServerAdminCmd):
+ """ Produce graphviz diagrams of metadata structures """
+
+ options = _ServerAdminCmd.options + [
+ Bcfg2.Options.BooleanOption(
+ "-H", "--includehosts",
+ help="Include hosts in the viz output"),
+ Bcfg2.Options.BooleanOption(
+ "-b", "--includebundles",
+ help="Include bundles in the viz output"),
+ Bcfg2.Options.BooleanOption(
+ "-k", "--includekey",
+ help="Show a key for different digraph shapes"),
+ Bcfg2.Options.Option(
+ "-c", "--only-client", metavar="<hostname>",
+ help="Only show groups and bundles for the named client"),
+ Bcfg2.Options.PathOption(
+ "-o", "--outfile",
+ help="Write viz output to an output file")]
+
+ colors = ['steelblue1', 'chartreuse', 'gold', 'magenta',
+ 'indianred1', 'limegreen', 'orange1', 'lightblue2',
+ 'green1', 'blue1', 'yellow1', 'darkturquoise', 'gray66']
+
+ __plugin_blacklist__ = ['DBStats', 'Cfg', 'Pkgmgr', 'Packages', 'Rules',
+ 'Decisions', 'Deps', 'Git', 'Svn', 'Fossil', 'Bzr',
+ 'Bundler']
+
+ def run(self, setup):
+ if setup.outfile:
+ fmt = setup.outfile.split('.')[-1]
+ else:
+ fmt = 'png'
+
+ exc = Executor()
+ cmd = ["dot", "-T", fmt]
+ if setup.outfile:
+ cmd.extend(["-o", setup.outfile])
+ inputlist = ["digraph groups {",
+ '\trankdir="LR";',
+ self.metadata.viz(setup.includehosts,
+ setup.includebundles,
+ setup.includekey,
+ setup.only_client,
+ self.colors)]
+ if setup.includekey:
+ inputlist.extend(
+ ["\tsubgraph cluster_key {",
+ '\tstyle="filled";',
+ '\tcolor="lightblue";',
+ '\tBundle [ shape="septagon" ];',
+ '\tGroup [shape="ellipse"];',
+ '\tGroup Category [shape="trapezium"];\n',
+ '\tProfile [style="bold", shape="ellipse"];',
+ '\tHblock [label="Host1|Host2|Host3",shape="record"];',
+ '\tlabel="Key";',
+ "\t}"])
+ inputlist.append("}")
+ idata = "\n".join(inputlist)
+ try:
+ result = exc.run(cmd, inputdata=idata)
+ except OSError:
+ # on some systems (RHEL 6), you cannot run dot with
+ # shell=True. on others (Gentoo with Python 2.7), you
+ # must. In yet others (RHEL 5), either way works. I have
+ # no idea what the difference is, but it's kind of a PITA.
+ result = exc.run(cmd, shell=True, inputdata=idata)
+ if not result.success:
+ self.errExit("Error running %s: %s" % (cmd, result.error))
+ if not setup.outfile:
+ print(result.stdout)
+
+
+class Xcmd(_ProxyAdminCmd):
+ """ XML-RPC Command Interface """
+
+ options = _ProxyAdminCmd.options + [
+ Bcfg2.Options.PositionalArgument("command"),
+ Bcfg2.Options.PositionalArgument("arguments", nargs='*')]
+
+ def run(self, setup):
+ try:
+ data = getattr(self.proxy, setup.command)(*setup.arguments)
+ except Bcfg2.Client.Proxy.ProxyError:
+ self.errExit("Proxy Error: %s" % sys.exc_info()[1])
+
+ if data is not None:
+ print(data)
+
+
+class CLI(Bcfg2.Options.CommandRegistry):
+ """ CLI class for bcfg2-admin """
+
+ def __init__(self):
+ Bcfg2.Options.CommandRegistry.__init__(self)
+ self.register_commands(globals().values(), parent=AdminCmd)
+ parser = Bcfg2.Options.get_parser(
+ description="Manage a running Bcfg2 server",
+ components=[self])
+ parser.add_options(self.subcommand_options)
+ parser.parse()
+
+ def run(self):
+ """ Run bcfg2-admin """
+ try:
+ cmd = self.commands[Bcfg2.Options.setup.subcommand]
+ if hasattr(cmd, 'setup'):
+ cmd.setup()
+ return self.runcommand()
+ finally:
+ self.shutdown()
diff --git a/src/lib/Bcfg2/Server/Admin/Backup.py b/src/lib/Bcfg2/Server/Admin/Backup.py
deleted file mode 100644
index 0a04df98b..000000000
--- a/src/lib/Bcfg2/Server/Admin/Backup.py
+++ /dev/null
@@ -1,22 +0,0 @@
-""" Make a backup of the Bcfg2 repository """
-
-import os
-import time
-import tarfile
-import Bcfg2.Server.Admin
-import Bcfg2.Options
-
-
-class Backup(Bcfg2.Server.Admin.MetadataCore):
- """ Make a backup of the Bcfg2 repository """
-
- def __call__(self, args):
- datastore = self.setup['repo']
- timestamp = time.strftime('%Y%m%d%H%M%S')
- fmt = 'gz'
- mode = 'w:' + fmt
- filename = timestamp + '.tar' + '.' + fmt
- out = tarfile.open(os.path.join(datastore, filename), mode=mode)
- out.add(datastore, os.path.basename(datastore))
- out.close()
- print("Archive %s was stored under %s" % (filename, datastore))
diff --git a/src/lib/Bcfg2/Server/Admin/Client.py b/src/lib/Bcfg2/Server/Admin/Client.py
deleted file mode 100644
index 325b7ae6e..000000000
--- a/src/lib/Bcfg2/Server/Admin/Client.py
+++ /dev/null
@@ -1,50 +0,0 @@
-""" Create, delete, or list client entries """
-
-import sys
-import Bcfg2.Server.Admin
-from Bcfg2.Server.Plugin import MetadataConsistencyError
-
-
-def get_attribs(args):
- """ Get a list of attributes to set on a client when adding/updating it """
- attr_d = {}
- for i in args[2:]:
- attr, val = i.split('=', 1)
- if attr not in ['profile', 'uuid', 'password', 'floating', 'secure',
- 'address', 'auth']:
- print("Attribute %s unknown" % attr)
- raise SystemExit(1)
- attr_d[attr] = val
- return attr_d
-
-
-class Client(Bcfg2.Server.Admin.MetadataCore):
- """ Create, delete, or list client entries """
- __usage__ = "[options] [add|del|update|list] [attr=val]"
- __plugin_whitelist__ = ["Metadata"]
-
- def __call__(self, args):
- if len(args) == 0:
- self.errExit("No argument specified.\n"
- "Usage: %s" % self.__usage__)
- if args[0] == 'add':
- try:
- self.metadata.add_client(args[1], get_attribs(args))
- except MetadataConsistencyError:
- self.errExit("Error adding client: %s" % sys.exc_info()[1])
- elif args[0] in ['update', 'up']:
- try:
- self.metadata.update_client(args[1], get_attribs(args))
- except MetadataConsistencyError:
- self.errExit("Error updating client: %s" % sys.exc_info()[1])
- elif args[0] in ['delete', 'remove', 'del', 'rm']:
- try:
- self.metadata.remove_client(args[1])
- except MetadataConsistencyError:
- self.errExit("Error deleting client: %s" %
- sys.exc_info()[1])
- elif args[0] in ['list', 'ls']:
- for client in self.metadata.list_clients():
- print(client)
- else:
- self.errExit("No command specified")
diff --git a/src/lib/Bcfg2/Server/Admin/Compare.py b/src/lib/Bcfg2/Server/Admin/Compare.py
deleted file mode 100644
index d7285284a..000000000
--- a/src/lib/Bcfg2/Server/Admin/Compare.py
+++ /dev/null
@@ -1,147 +0,0 @@
-import lxml.etree
-import os
-import Bcfg2.Server.Admin
-
-
-class Compare(Bcfg2.Server.Admin.Mode):
- """ Determine differences between files or directories of client
- specification instances """
- __usage__ = ("<old> <new>\n\n"
- " -r\trecursive")
-
- def __init__(self, setup):
- Bcfg2.Server.Admin.Mode.__init__(self, setup)
- self.important = {'Path': ['name', 'type', 'owner', 'group', 'mode',
- 'important', 'paranoid', 'sensitive',
- 'dev_type', 'major', 'minor', 'prune',
- 'encoding', 'empty', 'to', 'recursive',
- 'vcstype', 'sourceurl', 'revision',
- 'secontext'],
- 'Package': ['name', 'type', 'version', 'simplefile',
- 'verify'],
- 'Service': ['name', 'type', 'status', 'mode',
- 'target', 'sequence', 'parameters'],
- 'Action': ['name', 'timing', 'when', 'status',
- 'command'],
- 'PostInstall': ['name']
- }
-
- def compareStructures(self, new, old):
- if new.tag == 'Independent':
- bundle = 'Base'
- else:
- bundle = new.get('name')
-
- identical = True
-
- for child in new.getchildren():
- if child.tag not in self.important:
- print(" %s in (new) bundle %s:\n tag type not handled!" %
- (child.tag, bundle))
- continue
- equiv = old.xpath('%s[@name="%s"]' %
- (child.tag, child.get('name')))
- if len(equiv) == 0:
- print(" %s %s in bundle %s:\n only in new configuration" %
- (child.tag, child.get('name'), bundle))
- identical = False
- continue
- diff = []
- if child.tag == 'Path' and child.get('type') == 'file' and \
- child.text != equiv[0].text:
- diff.append('contents')
- attrdiff = [field for field in self.important[child.tag] if \
- child.get(field) != equiv[0].get(field)]
- if attrdiff:
- diff.append('attributes (%s)' % ', '.join(attrdiff))
- if diff:
- print(" %s %s in bundle %s:\n %s differ" % (child.tag, \
- child.get('name'), bundle, ' and '.join(diff)))
- identical = False
-
- for child in old.getchildren():
- if child.tag not in self.important:
- print(" %s in (old) bundle %s:\n tag type not handled!" %
- (child.tag, bundle))
- elif len(new.xpath('%s[@name="%s"]' %
- (child.tag, child.get('name')))) == 0:
- print(" %s %s in bundle %s:\n only in old configuration" %
- (child.tag, child.get('name'), bundle))
- identical = False
-
- return identical
-
- def compareSpecifications(self, path1, path2):
- try:
- new = lxml.etree.parse(path1).getroot()
- except IOError:
- print("Failed to read %s" % (path1))
- raise SystemExit(1)
-
- try:
- old = lxml.etree.parse(path2).getroot()
- except IOError:
- print("Failed to read %s" % (path2))
- raise SystemExit(1)
-
- for src in [new, old]:
- for bundle in src.findall('./Bundle'):
- if bundle.get('name')[-4:] == '.xml':
- bundle.set('name', bundle.get('name')[:-4])
-
- identical = True
-
- for bundle in old.findall('./Bundle'):
- if len(new.xpath('Bundle[@name="%s"]' % (bundle.get('name')))) == 0:
- print(" Bundle %s only in old configuration" %
- bundle.get('name'))
- identical = False
- for bundle in new.findall('./Bundle'):
- equiv = old.xpath('Bundle[@name="%s"]' % (bundle.get('name')))
- if len(equiv) == 0:
- print(" Bundle %s only in new configuration" %
- bundle.get('name'))
- identical = False
- elif not self.compareStructures(bundle, equiv[0]):
- identical = False
-
- i1 = lxml.etree.Element('Independent')
- i2 = lxml.etree.Element('Independent')
- i1.extend(new.findall('./Independent/*'))
- i2.extend(old.findall('./Independent/*'))
- if not self.compareStructures(i1, i2):
- identical = False
-
- return identical
-
- def __call__(self, args):
- if len(args) == 0:
- self.errExit("No argument specified.\n"
- "Please see bcfg2-admin compare help for usage.")
- if '-r' in args:
- args = list(args)
- args.remove('-r')
- (oldd, newd) = args
- (old, new) = [os.listdir(spot) for spot in args]
- old_extra = []
- for item in old:
- if item not in new:
- old_extra.append(item)
- continue
- print("File: %s" % item)
- state = self.__call__([oldd + '/' + item, newd + '/' + item])
- new.remove(item)
- if state:
- print("File %s is good" % item)
- else:
- print("File %s is bad" % item)
- if new:
- print("%s has extra files: %s" % (newd, ', '.join(new)))
- if old_extra:
- print("%s has extra files: %s" % (oldd, ', '.join(old_extra)))
- return
- try:
- (old, new) = args
- return self.compareSpecifications(new, old)
- except IndexError:
- self.errExit(self.__call__.__doc__)
diff --git a/src/lib/Bcfg2/Server/Admin/Init.py b/src/lib/Bcfg2/Server/Admin/Init.py
deleted file mode 100644
index fdab5abca..000000000
--- a/src/lib/Bcfg2/Server/Admin/Init.py
+++ /dev/null
@@ -1,349 +0,0 @@
-""" Interactively initialize a new repository. """
-
-import os
-import sys
-import stat
-import select
-import random
-import socket
-import string
-import getpass
-import subprocess
-
-import Bcfg2.Server.Admin
-import Bcfg2.Server.Plugin
-import Bcfg2.Options
-import Bcfg2.Server.Plugins.Metadata
-from Bcfg2.Compat import input # pylint: disable=W0622
-
-# default config file
-CONFIG = '''[server]
-repository = %s
-plugins = %s
-# Uncomment the following to listen on all interfaces
-#listen_all = true
-
-[statistics]
-sendmailpath = %s
-#web_debug = False
-#time_zone =
-
-[database]
-#engine = sqlite3
-# 'postgresql', 'mysql', 'mysql_old', 'sqlite3' or 'ado_mssql'.
-#name =
-# Or path to database file if using sqlite3.
-#<repository>/etc/bcfg2.sqlite is default path if left empty
-#user =
-# Not used with sqlite3.
-#password =
-# Not used with sqlite3.
-#host =
-# Not used with sqlite3.
-#port =
-
-[reporting]
-transport = LocalFilesystem
-
-[communication]
-protocol = %s
-password = %s
-certificate = %s
-key = %s
-ca = %s
-
-[components]
-bcfg2 = %s
-'''
-
-# Default groups
-GROUPS = '''<Groups version='3.0'>
- <Group profile='true' public='true' default='true' name='basic'>
- <Group name='%s'/>
- </Group>
- <Group name='ubuntu'/>
- <Group name='debian'/>
- <Group name='freebsd'/>
- <Group name='gentoo'/>
- <Group name='redhat'/>
- <Group name='suse'/>
- <Group name='mandrake'/>
- <Group name='solaris'/>
- <Group name='arch'/>
-</Groups>
-'''
-
-# Default contents of clients.xml
-CLIENTS = '''<Clients version="3.0">
- <Client profile="basic" name="%s"/>
-</Clients>
-'''
-
-# Mapping of operating system names to groups
-OS_LIST = [('Red Hat/Fedora/RHEL/RHAS/CentOS', 'redhat'),
- ('SUSE/SLES', 'suse'),
- ('Mandrake', 'mandrake'),
- ('Debian', 'debian'),
- ('Ubuntu', 'ubuntu'),
- ('Gentoo', 'gentoo'),
- ('FreeBSD', 'freebsd'),
- ('Arch', 'arch')]
-
-
-def safe_input(prompt):
- """ input() that flushes the input buffer before accepting input """
- # flush input buffer
- while len(select.select([sys.stdin.fileno()], [], [], 0.0)[0]) > 0:
- os.read(sys.stdin.fileno(), 4096)
- return input(prompt)
-
-
-def gen_password(length):
- """Generates a random alphanumeric password with length characters."""
- chars = string.letters + string.digits
- return "".join(random.choice(chars) for i in range(length))
-
-
-def create_key(hostname, keypath, certpath, country, state, location):
- """Creates a bcfg2.key at the directory specifed by keypath."""
- kcstr = ("openssl req -batch -x509 -nodes -subj '/C=%s/ST=%s/L=%s/CN=%s' "
- "-days 1000 -newkey rsa:2048 -keyout %s -noout" % (country,
- state,
- location,
- hostname,
- keypath))
- subprocess.call((kcstr), shell=True)
- ccstr = ("openssl req -batch -new -subj '/C=%s/ST=%s/L=%s/CN=%s' -key %s "
- "| openssl x509 -req -days 1000 -signkey %s -out %s" % (country,
- state,
- location,
- hostname,
- keypath,
- keypath,
- certpath))
- subprocess.call((ccstr), shell=True)
- os.chmod(keypath, stat.S_IRUSR | stat.S_IWUSR) # 0600
-
-
-def create_conf(confpath, confdata):
- """ create the config file """
- # Don't overwrite existing bcfg2.conf file
- if os.path.exists(confpath):
- result = safe_input("\nWarning: %s already exists. "
- "Overwrite? [y/N]: " % confpath)
- if result not in ['Y', 'y']:
- print("Leaving %s unchanged" % confpath)
- return
- try:
- open(confpath, "w").write(confdata)
- os.chmod(confpath, stat.S_IRUSR | stat.S_IWUSR) # 0600
- except Exception:
- err = sys.exc_info()[1]
- print("Error trying to write configuration file '%s': %s" %
- (confpath, err))
- raise SystemExit(1)
-
-
-class Init(Bcfg2.Server.Admin.Mode):
- """Interactively initialize a new repository."""
- options = {'configfile': Bcfg2.Options.CFILE,
- 'plugins': Bcfg2.Options.SERVER_PLUGINS,
- 'proto': Bcfg2.Options.SERVER_PROTOCOL,
- 'repo': Bcfg2.Options.SERVER_REPOSITORY,
- 'sendmail': Bcfg2.Options.SENDMAIL_PATH}
-
- def __init__(self, setup):
- Bcfg2.Server.Admin.Mode.__init__(self, setup)
- self.data = dict()
- self.plugins = Bcfg2.Options.SERVER_PLUGINS.default
-
- def _set_defaults(self, opts):
- """Set default parameters."""
- self.data['configfile'] = opts['configfile']
- self.data['repopath'] = opts['repo']
- self.data['password'] = gen_password(8)
- self.data['server_uri'] = "https://%s:6789" % socket.getfqdn()
- self.data['sendmail'] = opts['sendmail']
- self.data['proto'] = opts['proto']
- if os.path.exists("/etc/pki/tls"):
- self.data['keypath'] = "/etc/pki/tls/private/bcfg2.key"
- self.data['certpath'] = "/etc/pki/tls/certs/bcfg2.crt"
- elif os.path.exists("/etc/ssl"):
- self.data['keypath'] = "/etc/ssl/bcfg2.key"
- self.data['certpath'] = "/etc/ssl/bcfg2.crt"
- else:
- basepath = os.path.dirname(self.configfile)
- self.data['keypath'] = os.path.join(basepath, "bcfg2.key")
- self.data['certpath'] = os.path.join(basepath, 'bcfg2.crt')
-
- def __call__(self, args):
- # Parse options
- opts = Bcfg2.Options.OptionParser(self.options)
- opts.parse(args)
- self._set_defaults(opts)
-
- # Prompt the user for input
- self._prompt_config()
- self._prompt_repopath()
- self._prompt_password()
- self._prompt_hostname()
- self._prompt_server()
- self._prompt_groups()
- self._prompt_keypath()
- self._prompt_certificate()
-
- # Initialize the repository
- self.init_repo()
-
- def _prompt_hostname(self):
- """Ask for the server hostname."""
- data = safe_input("What is the server's hostname [%s]: " %
- socket.getfqdn())
- if data != '':
- self.data['shostname'] = data
- else:
- self.data['shostname'] = socket.getfqdn()
-
- def _prompt_config(self):
- """Ask for the configuration file path."""
- newconfig = safe_input("Store Bcfg2 configuration in [%s]: " %
- self.configfile)
- if newconfig != '':
- self.data['configfile'] = os.path.abspath(newconfig)
-
- def _prompt_repopath(self):
- """Ask for the repository path."""
- while True:
- newrepo = safe_input("Location of Bcfg2 repository [%s]: " %
- self.data['repopath'])
- if newrepo != '':
- self.data['repopath'] = os.path.abspath(newrepo)
- if os.path.isdir(self.data['repopath']):
- response = safe_input("Directory %s exists. Overwrite? [y/N]:"
- % self.data['repopath'])
- if response.lower().strip() == 'y':
- break
- else:
- break
-
- def _prompt_password(self):
- """Ask for a password or generate one if none is provided."""
- newpassword = getpass.getpass(
- "Input password used for communication verification "
- "(without echoing; leave blank for a random): ").strip()
- if len(newpassword) != 0:
- self.data['password'] = newpassword
-
- def _prompt_server(self):
- """Ask for the server name."""
- newserver = safe_input(
- "Input the server location (the server listens on a single "
- "interface by default) [%s]: " % self.data['server_uri'])
- if newserver != '':
- self.data['server_uri'] = newserver
-
- def _prompt_groups(self):
- """Create the groups.xml file."""
- prompt = '''Input base Operating System for clients:\n'''
- for entry in OS_LIST:
- prompt += "%d: %s\n" % (OS_LIST.index(entry) + 1, entry[0])
- prompt += ': '
- while True:
- try:
- osidx = int(safe_input(prompt))
- self.data['os_sel'] = OS_LIST[osidx - 1][1]
- break
- except ValueError:
- continue
-
- def _prompt_certificate(self):
- """Ask for the key details (country, state, and location)."""
- print("The following questions affect SSL certificate generation.")
- print("If no data is provided, the default values are used.")
- newcountry = safe_input("Country name (2 letter code) for "
- "certificate: ")
- if newcountry != '':
- if len(newcountry) == 2:
- self.data['country'] = newcountry
- else:
- while len(newcountry) != 2:
- newcountry = safe_input("2 letter country code (eg. US): ")
- if len(newcountry) == 2:
- self.data['country'] = newcountry
- break
- else:
- self.data['country'] = 'US'
-
- newstate = safe_input("State or Province Name (full name) for "
- "certificate: ")
- if newstate != '':
- self.data['state'] = newstate
- else:
- self.data['state'] = 'Illinois'
-
- newlocation = safe_input("Locality Name (eg, city) for certificate: ")
- if newlocation != '':
- self.data['location'] = newlocation
- else:
- self.data['location'] = 'Argonne'
-
- def _prompt_keypath(self):
- """ Ask for the key pair location. Try to use sensible
- defaults depending on the OS """
- keypath = safe_input("Path where Bcfg2 server private key will be "
- "created [%s]: " % self.data['keypath'])
- if keypath:
- self.data['keypath'] = keypath
- certpath = safe_input("Path where Bcfg2 server cert will be created "
- "[%s]: " % self.data['certpath'])
- if certpath:
- self.data['certpath'] = certpath
-
- def _init_plugins(self):
- """Initialize each plugin-specific portion of the repository."""
- for plugin in self.plugins:
- if plugin == 'Metadata':
- Bcfg2.Server.Plugins.Metadata.Metadata.init_repo(
- self.data['repopath'],
- groups_xml=GROUPS % self.data['os_sel'],
- clients_xml=CLIENTS % socket.getfqdn())
- else:
- try:
- module = __import__("Bcfg2.Server.Plugins.%s" % plugin, '',
- '', ["Bcfg2.Server.Plugins"])
- cls = getattr(module, plugin)
- cls.init_repo(self.data['repopath'])
- except: # pylint: disable=W0702
- err = sys.exc_info()[1]
- print("Plugin setup for %s failed: %s\n"
- "Check that dependencies are installed" % (plugin,
- err))
-
- def init_repo(self):
- """Setup a new repo and create the content of the
- configuration file."""
- # Create the repository
- path = os.path.join(self.data['repopath'], 'etc')
- try:
- os.makedirs(path)
- self._init_plugins()
- print("Repository created successfuly in %s" %
- self.data['repopath'])
- except OSError:
- print("Failed to create %s." % path)
-
- confdata = CONFIG % (self.data['repopath'],
- ','.join(self.plugins),
- self.data['sendmail'],
- self.data['proto'],
- self.data['password'],
- self.data['certpath'],
- self.data['keypath'],
- self.data['certpath'],
- self.data['server_uri'])
-
- # Create the configuration file and SSL key
- create_conf(self.data['configfile'], confdata)
- create_key(self.data['shostname'], self.data['keypath'],
- self.data['certpath'], self.data['country'],
- self.data['state'], self.data['location'])
diff --git a/src/lib/Bcfg2/Server/Admin/Minestruct.py b/src/lib/Bcfg2/Server/Admin/Minestruct.py
deleted file mode 100644
index 65f99a213..000000000
--- a/src/lib/Bcfg2/Server/Admin/Minestruct.py
+++ /dev/null
@@ -1,56 +0,0 @@
-""" Extract extra entry lists from statistics """
-import getopt
-import lxml.etree
-import sys
-import Bcfg2.Server.Admin
-from Bcfg2.Server.Plugin import PullSource
-
-
-class Minestruct(Bcfg2.Server.Admin.StructureMode):
- """ Extract extra entry lists from statistics """
- __usage__ = ("[options] <client>\n\n"
- " %-25s%s\n"
- " %-25s%s\n" %
- ("-f <filename>", "build a particular file",
- "-g <groups>", "only build config for groups"))
-
- def __call__(self, args):
- if len(args) == 0:
- self.errExit("No argument specified.\n"
- "Please see bcfg2-admin minestruct help for usage.")
- try:
- (opts, args) = getopt.getopt(args, 'f:g:h')
- except getopt.GetoptError:
- self.errExit(self.__doc__)
-
- client = args[0]
- output = sys.stdout
- groups = []
-
- for (opt, optarg) in opts:
- if opt == '-f':
- try:
- output = open(optarg, 'w')
- except IOError:
- self.errExit("Failed to open file: %s" % (optarg))
- elif opt == '-g':
- groups = optarg.split(':')
-
- try:
- extra = set()
- for source in self.bcore.plugins_by_type(PullSource):
- for item in source.GetExtra(client):
- extra.add(item)
- except: # pylint: disable=W0702
- self.errExit("Failed to find extra entry info for client %s" %
- client)
- root = lxml.etree.Element("Base")
- self.log.info("Found %d extra entries" % (len(extra)))
- add_point = root
- for grp in groups:
- add_point = lxml.etree.SubElement(add_point, "Group", name=grp)
- for tag, name in extra:
- self.log.info("%s: %s" % (tag, name))
- lxml.etree.SubElement(add_point, tag, name=name)
-
- lxml.etree.ElementTree(root).write(output, pretty_print=True)
diff --git a/src/lib/Bcfg2/Server/Admin/Perf.py b/src/lib/Bcfg2/Server/Admin/Perf.py
deleted file mode 100644
index f6bc22959..000000000
--- a/src/lib/Bcfg2/Server/Admin/Perf.py
+++ /dev/null
@@ -1,37 +0,0 @@
-""" Get performance data from server """
-
-import sys
-import Bcfg2.Options
-import Bcfg2.Proxy
-import Bcfg2.Server.Admin
-
-
-class Perf(Bcfg2.Server.Admin.Mode):
- """ Get performance data from server """
-
- def __call__(self, args):
- output = [('Name', 'Min', 'Max', 'Mean', 'Count')]
- optinfo = {
- 'ca': Bcfg2.Options.CLIENT_CA,
- 'certificate': Bcfg2.Options.CLIENT_CERT,
- 'key': Bcfg2.Options.SERVER_KEY,
- 'password': Bcfg2.Options.SERVER_PASSWORD,
- 'server': Bcfg2.Options.SERVER_LOCATION,
- 'user': Bcfg2.Options.CLIENT_USER,
- 'timeout': Bcfg2.Options.CLIENT_TIMEOUT}
- setup = Bcfg2.Options.OptionParser(optinfo)
- setup.parse(sys.argv[1:])
- proxy = Bcfg2.Proxy.ComponentProxy(setup['server'],
- setup['user'],
- setup['password'],
- key=setup['key'],
- cert=setup['certificate'],
- ca=setup['ca'],
- timeout=setup['timeout'])
- data = proxy.get_statistics()
- for key in sorted(data.keys()):
- output.append(
- (key, ) +
- tuple(["%.06f" % item
- for item in data[key][:-1]] + [data[key][-1]]))
- self.print_table(output)
diff --git a/src/lib/Bcfg2/Server/Admin/Pull.py b/src/lib/Bcfg2/Server/Admin/Pull.py
deleted file mode 100644
index fccdb2d94..000000000
--- a/src/lib/Bcfg2/Server/Admin/Pull.py
+++ /dev/null
@@ -1,147 +0,0 @@
-""" Retrieves entries from clients and integrates the information into
-the repository """
-
-import os
-import sys
-import getopt
-import select
-import Bcfg2.Server.Admin
-from Bcfg2.Server.Plugin import PullSource, Generator
-from Bcfg2.Compat import input # pylint: disable=W0622
-
-
-class Pull(Bcfg2.Server.Admin.MetadataCore):
- """ Retrieves entries from clients and integrates the information
- into the repository """
- __usage__ = ("[options] <client> <entry type> <entry name>\n\n"
- " %-25s%s\n"
- " %-25s%s\n"
- " %-25s%s\n"
- " %-25s%s\n" %
- ("-v", "be verbose",
- "-f", "force",
- "-I", "interactive",
- "-s", "stdin"))
-
- def __init__(self, setup):
- Bcfg2.Server.Admin.MetadataCore.__init__(self, setup)
- self.log = False
- self.mode = 'interactive'
-
- def __call__(self, args):
- use_stdin = False
- try:
- opts, gargs = getopt.getopt(args, 'vfIs')
- except getopt.GetoptError:
- self.errExit(self.__doc__)
- for opt in opts:
- if opt[0] == '-v':
- self.log = True
- elif opt[0] == '-f':
- self.mode = 'force'
- elif opt[0] == '-I':
- self.mode = 'interactive'
- elif opt[0] == '-s':
- use_stdin = True
-
- if use_stdin:
- for line in sys.stdin:
- try:
- self.PullEntry(*line.split(None, 3))
- except SystemExit:
- print(" for %s" % line)
- except:
- print("Bad entry: %s" % line.strip())
- elif len(gargs) < 3:
- self.usage()
- else:
- self.PullEntry(gargs[0], gargs[1], gargs[2])
-
- def BuildNewEntry(self, client, etype, ename):
- """Construct a new full entry for
- given client/entry from statistics.
- """
- new_entry = {'type': etype, 'name': ename}
- pull_sources = self.bcore.plugins_by_type(PullSource)
- for plugin in pull_sources:
- try:
- (owner, group, mode, contents) = \
- plugin.GetCurrentEntry(client, etype, ename)
- break
- except Bcfg2.Server.Plugin.PluginExecutionError:
- if plugin == pull_sources[-1]:
- print("Pull Source failure; could not fetch current state")
- raise SystemExit(1)
-
- try:
- data = {'owner': owner,
- 'group': group,
- 'mode': mode,
- 'text': contents}
- except UnboundLocalError:
- print("Unable to build entry. "
- "Do you have a statistics plugin enabled?")
- raise SystemExit(1)
- for key, val in list(data.items()):
- if val:
- new_entry[key] = val
- return new_entry
-
- def Choose(self, choices):
- """Determine where to put pull data."""
- if self.mode == 'interactive':
- for choice in choices:
- print("Plugin returned choice:")
- if id(choice) == id(choices[0]):
- print("(current entry) ")
- if choice.all:
- print(" => global entry")
- elif choice.group:
- print(" => group entry: %s (prio %d)" %
- (choice.group, choice.prio))
- else:
- print(" => host entry: %s" % (choice.hostname))
-
- # flush input buffer
- while len(select.select([sys.stdin.fileno()], [], [],
- 0.0)[0]) > 0:
- os.read(sys.stdin.fileno(), 4096)
- ans = input("Use this entry? [yN]: ") in ['y', 'Y']
- if ans:
- return choice
- return False
- else:
- # mode == 'force'
- if not choices:
- return False
- return choices[0]
-
- def PullEntry(self, client, etype, ename):
- """Make currently recorded client state correct for entry."""
- new_entry = self.BuildNewEntry(client, etype, ename)
-
- meta = self.bcore.build_metadata(client)
- # Find appropriate plugin in bcore
- glist = [gen for gen in self.bcore.plugins_by_type(Generator)
- if ename in gen.Entries.get(etype, {})]
- if len(glist) != 1:
- self.errExit("Got wrong numbers of matching generators for entry:"
- "%s" % ([g.name for g in glist]))
- plugin = glist[0]
- if not isinstance(plugin, Bcfg2.Server.Plugin.PullTarget):
- self.errExit("Configuration upload not supported by plugin %s" %
- plugin.name)
- try:
- choices = plugin.AcceptChoices(new_entry, meta)
- specific = self.Choose(choices)
- if specific:
- plugin.AcceptPullData(specific, new_entry, self.log)
- except Bcfg2.Server.Plugin.PluginExecutionError:
- self.errExit("Configuration upload not supported by plugin %s" %
- plugin.name)
- # Commit if running under a VCS
- for vcsplugin in list(self.bcore.plugins.values()):
- if isinstance(vcsplugin, Bcfg2.Server.Plugin.Version):
- files = "%s/%s" % (plugin.data, ename)
- comment = 'file "%s" pulled from host %s' % (files, client)
- vcsplugin.commit_data([files], comment)
diff --git a/src/lib/Bcfg2/Server/Admin/Reports.py b/src/lib/Bcfg2/Server/Admin/Reports.py
deleted file mode 100644
index eb97123f7..000000000
--- a/src/lib/Bcfg2/Server/Admin/Reports.py
+++ /dev/null
@@ -1,262 +0,0 @@
-'''Admin interface for dynamic reports'''
-import Bcfg2.Logger
-import Bcfg2.Server.Admin
-import datetime
-import os
-import sys
-import traceback
-from Bcfg2 import settings
-
-# Load django and reports stuff _after_ we know we can load settings
-from django.core import management
-from Bcfg2.Reporting.utils import *
-
-project_directory = os.path.dirname(settings.__file__)
-project_name = os.path.basename(project_directory)
-sys.path.append(os.path.join(project_directory, '..'))
-project_module = __import__(project_name, '', '', [''])
-sys.path.pop()
-
-# Set DJANGO_SETTINGS_MODULE appropriately.
-os.environ['DJANGO_SETTINGS_MODULE'] = '%s.settings' % project_name
-
-from Bcfg2.Reporting.models import Client, Interaction, \
- Performance, Bundle, Group, FailureEntry, PathEntry, \
- PackageEntry, ServiceEntry, ActionEntry
-from Bcfg2.Reporting.Compat import transaction
-
-
-def printStats(fn):
- """
- Print db stats.
-
- Decorator for purging. Prints database statistics after a run.
- """
- def print_stats(self, *data):
- classes = (Client, Interaction, Performance, \
- FailureEntry, ActionEntry, PathEntry, PackageEntry, \
- ServiceEntry, Group, Bundle)
-
- starts = {}
- for cls in classes:
- starts[cls] = cls.objects.count()
-
- fn(self, *data)
-
- for cls in classes:
- print("%s removed: %s" % (cls().__class__.__name__,
- starts[cls] - cls.objects.count()))
-
- return print_stats
-
-
-class Reports(Bcfg2.Server.Admin.Mode):
- """ Manage dynamic reports """
- django_commands = ['dbshell', 'shell', 'sqlall', 'validate']
- __usage__ = ("[command] [options]\n"
- " Commands:\n"
- " init Initialize the database\n"
- " purge Purge records\n"
- " --client [n] Client to operate on\n"
- " --days [n] Records older then n days\n"
- " --expired Expired clients only\n"
- " scrub Scrub the database for duplicate "
- "reasons and orphaned entries\n"
- " stats print database statistics\n"
- " update Apply any updates to the reporting "
- "database\n"
- "\n"
- " Django commands:\n " \
- + "\n ".join(django_commands))
-
- def __init__(self, setup):
- Bcfg2.Server.Admin.Mode.__init__(self, setup)
- try:
- import south
- except ImportError:
- print("Django south is required for Reporting")
- raise SystemExit(-3)
-
- def __call__(self, args):
- if len(args) == 0 or args[0] == '-h':
- self.errExit(self.__usage__)
-
- # FIXME - dry run
-
- if args[0] in self.django_commands:
- self.django_command_proxy(args[0])
- elif args[0] == 'scrub':
- self.scrub()
- elif args[0] == 'stats':
- self.stats()
- elif args[0] in ['init', 'update', 'syncdb']:
- if self.setup['debug']:
- vrb = 2
- elif self.setup['verbose']:
- vrb = 1
- else:
- vrb = 0
- try:
- management.call_command("syncdb", verbosity=vrb)
- management.call_command("migrate", verbosity=vrb)
- except:
- self.errExit("Update failed: %s" % sys.exc_info()[1])
- elif args[0] == 'purge':
- expired = False
- client = None
- maxdate = None
- state = None
- i = 1
- while i < len(args):
- if args[i] == '-c' or args[i] == '--client':
- if client:
- self.errExit("Only one client per run")
- client = args[i + 1]
- print(client)
- i = i + 1
- elif args[i] == '--days':
- if maxdate:
- self.errExit("Max date specified multiple times")
- try:
- maxdate = datetime.datetime.now() - \
- datetime.timedelta(days=int(args[i + 1]))
- except:
- self.errExit("Invalid number of days: %s" %
- args[i + 1])
- i = i + 1
- elif args[i] == '--expired':
- expired = True
- i = i + 1
- if expired:
- if state:
- self.errExit("--state is not valid with --expired")
- self.purge_expired(maxdate)
- else:
- self.purge(client, maxdate, state)
- else:
- self.errExit("Unknown command: %s" % args[0])
-
- @transaction.atomic
- def scrub(self):
- ''' Perform a thorough scrub and cleanup of the database '''
-
- # Cleanup unused entries
- for cls in (Group, Bundle, FailureEntry, ActionEntry, PathEntry,
- PackageEntry, PathEntry):
- try:
- start_count = cls.objects.count()
- cls.prune_orphans()
- self.log.info("Pruned %d %s records" % \
- (start_count - cls.objects.count(), cls.__class__.__name__))
- except:
- print("Failed to prune %s: %s" %
- (cls.__class__.__name__, sys.exc_info()[1]))
-
- def django_command_proxy(self, command):
- '''Call a django command'''
- if command == 'sqlall':
- management.call_command(command, 'Reporting')
- else:
- management.call_command(command)
-
- @printStats
- def purge(self, client=None, maxdate=None, state=None):
- '''Purge historical data from the database'''
-
- filtered = False # indicates whether or not a client should be deleted
-
- if not client and not maxdate and not state:
- self.errExit("Reports.prune: Refusing to prune all data")
-
- ipurge = Interaction.objects
- if client:
- try:
- cobj = Client.objects.get(name=client)
- ipurge = ipurge.filter(client=cobj)
- except Client.DoesNotExist:
- self.errExit("Client %s not in database" % client)
- self.log.debug("Filtering by client: %s" % client)
-
- if maxdate:
- filtered = True
- if not isinstance(maxdate, datetime.datetime):
- raise TypeError("maxdate is not a DateTime object")
- self.log.debug("Filtering by maxdate: %s" % maxdate)
- ipurge = ipurge.filter(timestamp__lt=maxdate)
-
- if settings.DATABASES['default']['ENGINE'] == \
- 'django.db.backends.sqlite3':
- grp_limit = 100
- else:
- grp_limit = 1000
- if state:
- filtered = True
- if state not in ('dirty', 'clean', 'modified'):
- raise TypeError("state is not one of the following values: "
- "dirty, clean, modified")
- self.log.debug("Filtering by state: %s" % state)
- ipurge = ipurge.filter(state=state)
-
- count = ipurge.count()
- rnum = 0
- try:
- while rnum < count:
- grp = list(ipurge[:grp_limit].values("id"))
- # just in case...
- if not grp:
- break
- Interaction.objects.filter(id__in=[x['id']
- for x in grp]).delete()
- rnum += len(grp)
- self.log.debug("Deleted %s of %s" % (rnum, count))
- except:
- self.log.error("Failed to remove interactions")
- (a, b, c) = sys.exc_info()
- msg = traceback.format_exception(a, b, c, limit=2)[-1][:-1]
- del a, b, c
- self.log.error(msg)
-
- # Prune any orphaned ManyToMany relations
- for m2m in (ActionEntry, PackageEntry, PathEntry, ServiceEntry, \
- FailureEntry, Group, Bundle):
- self.log.debug("Pruning any orphaned %s objects" % \
- m2m().__class__.__name__)
- m2m.prune_orphans()
-
- if client and not filtered:
- # Delete the client, ping data is automatic
- try:
- self.log.debug("Purging client %s" % client)
- cobj.delete()
- except:
- self.log.error("Failed to delete client %s" % client)
- (a, b, c) = sys.exc_info()
- msg = traceback.format_exception(a, b, c, limit=2)[-1][:-1]
- del a, b, c
- self.log.error(msg)
-
- @printStats
- def purge_expired(self, maxdate=None):
- '''Purge expired clients from the database'''
-
- if maxdate:
- if not isinstance(maxdate, datetime.datetime):
- raise TypeError("maxdate is not a DateTime object")
- self.log.debug("Filtering by maxdate: %s" % maxdate)
- clients = Client.objects.filter(expiration__lt=maxdate)
- else:
- clients = Client.objects.filter(expiration__isnull=False)
-
- for client in clients:
- self.log.debug("Purging client %s" % client)
- Interaction.objects.filter(client=client).delete()
- client.delete()
-
- def stats(self):
- classes = (Client, Interaction, Performance, \
- FailureEntry, ActionEntry, PathEntry, PackageEntry, \
- ServiceEntry, Group, Bundle)
-
- for cls in classes:
- print("%s has %s records" % (cls().__class__.__name__,
- cls.objects.count()))
diff --git a/src/lib/Bcfg2/Server/Admin/Snapshots.py b/src/lib/Bcfg2/Server/Admin/Snapshots.py
deleted file mode 100644
index fcb240352..000000000
--- a/src/lib/Bcfg2/Server/Admin/Snapshots.py
+++ /dev/null
@@ -1,162 +0,0 @@
-from datetime import date
-import sys
-
-# Prereq issues can be signaled with ImportError, so no try needed
-import sqlalchemy, sqlalchemy.orm
-import Bcfg2.Server.Admin
-import Bcfg2.Server.Snapshots
-import Bcfg2.Server.Snapshots.model
-from Bcfg2.Server.Snapshots.model import Snapshot, Client, Metadata, Base, \
- File, Group, Package, Service
-# Compatibility import
-from Bcfg2.Compat import u_str
-
-class Snapshots(Bcfg2.Server.Admin.Mode):
- """ Interact with the Snapshots system """
- __usage__ = "[init|query qtype]"
-
- q_dispatch = {'client': Client,
- 'group': Group,
- 'metadata': Metadata,
- 'package': Package,
- 'snapshot': Snapshot}
-
- def __init__(self, setup):
- Bcfg2.Server.Admin.Mode.__init__(self, setup)
- self.session = Bcfg2.Server.Snapshots.setup_session(self.configfile)
- self.cfile = self.configfile
-
- def __call__(self, args):
- if len(args) == 0 or args[0] == '-h':
- print(self.__usage__)
- raise SystemExit(0)
-
- if args[0] == 'query':
- if args[1] in self.q_dispatch:
- q_obj = self.q_dispatch[args[1]]
- if q_obj == Client:
- rows = []
- labels = ('Client', 'Active')
- for host in \
- self.session.query(q_obj).filter(q_obj.active == False):
- rows.append([host.name, 'No'])
- for host in \
- self.session.query(q_obj).filter(q_obj.active == True):
- rows.append([host.name, 'Yes'])
- self.print_table([labels]+rows,
- justify='left',
- hdr=True,
- vdelim=" ",
- padding=1)
- elif q_obj == Group:
- print("Groups:")
- for group in self.session.query(q_obj).all():
- print(" %s" % group.name)
- else:
- results = self.session.query(q_obj).all()
- else:
- print('error')
- raise SystemExit(1)
- elif args[0] == 'init':
- # Initialize the Snapshots database
- dbpath = Bcfg2.Server.Snapshots.db_from_config(self.cfile)
- engine = sqlalchemy.create_engine(dbpath, echo=True)
- metadata = Base.metadata
- metadata.create_all(engine)
- Session = sqlalchemy.orm.sessionmaker()
- Session.configure(bind=engine)
- session = Session()
- session.commit()
- elif args[0] == 'dump':
- client = args[1]
- snap = Snapshot.get_current(self.session, u_str(client))
- if not snap:
- print("Current snapshot for %s not found" % client)
- sys.exit(1)
- print("Client %s last run at %s" % (client, snap.timestamp))
- for pkg in snap.packages:
- print("C:", pkg.correct, 'M:', pkg.modified)
- print("start", pkg.start.name, pkg.start.version)
- print("end", pkg.end.name, pkg.end.version)
- elif args[0] == 'reports':
- # bcfg2-admin reporting interface for Snapshots
- if '-a' in args[1:]:
- # Query all hosts for Name, Status, Revision, Timestamp
- q = self.session.query(Client.name,
- Snapshot.correct,
- Snapshot.revision,
- Snapshot.timestamp)\
- .filter(Client.id==Snapshot.client_id)\
- .group_by(Client.id)
- rows = []
- labels = ('Client', 'Correct', 'Revision', 'Time')
- for item in q.all():
- cli, cor, time, rev = item
- rows.append([cli, cor, time, rev])
- self.print_table([labels]+rows,
- justify='left',
- hdr=True, vdelim=" ",
- padding=1)
- elif '-b' in args[1:]:
- # Query a single host for bad entries
- if len(args) < 3:
- print("Usage: bcfg2-admin snapshots -b <client>")
- return
- client = args[2]
- snap = Snapshot.get_current(self.session, u_str(client))
- if not snap:
- print("Current snapshot for %s not found" % client)
- sys.exit(1)
- print("Bad entries:")
- bad_pkgs = [self.session.query(Package)
- .filter(Package.id==p.start_id).one().name \
- for p in snap.packages if p.correct == False]
- for p in bad_pkgs:
- print(" Package:%s" % p)
- bad_files = [self.session.query(File)
- .filter(File.id==f.start_id).one().name \
- for f in snap.files if f.correct == False]
- for filename in bad_files:
- print(" File:%s" % filename)
- bad_svcs = [self.session.query(Service)
- .filter(Service.id==s.start_id).one().name \
- for s in snap.services if s.correct == False]
- for svc in bad_svcs:
- print(" Service:%s" % svc)
- elif '-e' in args[1:]:
- # Query a single host for extra entries
- client = args[2]
- snap = Snapshot.get_current(self.session, u_str(client))
- if not snap:
- print("Current snapshot for %s not found" % client)
- sys.exit(1)
- print("Extra entries:")
- for pkg in snap.extra_packages:
- print(" Package:%s" % pkg.name)
- # FIXME: Do we know about extra files yet?
- for f in snap.extra_files:
- print(" File:%s" % f.name)
- for svc in snap.extra_services:
- print(" Service:%s" % svc.name)
- elif '--date' in args[1:]:
- year, month, day = args[2:]
- timestamp = date(int(year), int(month), int(day))
- snaps = []
- for client in self.session.query(Client).filter(Client.active == True):
- snaps.append(Snapshot.get_by_date(self.session,
- client.name,
- timestamp))
- rows = []
- labels = ('Client', 'Correct', 'Revision', 'Time')
- for snap in snaps:
- rows.append([snap.client.name,
- snap.correct,
- snap.revision,
- snap.timestamp])
- self.print_table([labels]+rows,
- justify='left',
- hdr=True,
- vdelim=" ",
- padding=1)
- else:
- print("Unknown options: ", args[1:])
diff --git a/src/lib/Bcfg2/Server/Admin/Syncdb.py b/src/lib/Bcfg2/Server/Admin/Syncdb.py
deleted file mode 100644
index eb417966d..000000000
--- a/src/lib/Bcfg2/Server/Admin/Syncdb.py
+++ /dev/null
@@ -1,29 +0,0 @@
-import sys
-import Bcfg2.settings
-import Bcfg2.Options
-import Bcfg2.Server.Admin
-import Bcfg2.Server.models
-from django.core.exceptions import ImproperlyConfigured
-from django.core.management import setup_environ, call_command
-
-
-class Syncdb(Bcfg2.Server.Admin.Mode):
- """ Sync the Django ORM with the configured database """
- options = {'configfile': Bcfg2.Options.WEB_CFILE}
-
- def __call__(self, args):
- # Parse options
- opts = Bcfg2.Options.OptionParser(self.options)
- opts.parse(args)
-
- setup_environ(Bcfg2.settings)
- Bcfg2.Server.models.load_models(cfile=opts['configfile'])
-
- try:
- call_command("syncdb", interactive=False, verbosity=0)
- self._database_available = True
- except ImproperlyConfigured:
- self.errExit("Django configuration problem: %s" %
- sys.exc_info()[1])
- except:
- self.errExit("Database update failed: %s" % sys.exc_info()[1])
diff --git a/src/lib/Bcfg2/Server/Admin/Viz.py b/src/lib/Bcfg2/Server/Admin/Viz.py
deleted file mode 100644
index cb2470e17..000000000
--- a/src/lib/Bcfg2/Server/Admin/Viz.py
+++ /dev/null
@@ -1,113 +0,0 @@
-""" Produce graphviz diagrams of metadata structures """
-
-import getopt
-from subprocess import Popen, PIPE
-import pipes
-import Bcfg2.Server.Admin
-
-
-class Viz(Bcfg2.Server.Admin.MetadataCore):
- """ Produce graphviz diagrams of metadata structures """
- __usage__ = ("[options]\n\n"
- " %-32s%s\n"
- " %-32s%s\n"
- " %-32s%s\n"
- " %-32s%s\n"
- " %-32s%s\n" %
- ("-H, --includehosts",
- "include hosts in the viz output",
- "-b, --includebundles",
- "include bundles in the viz output",
- "-k, --includekey",
- "show a key for different digraph shapes",
- "-c, --only-client <clientname>",
- "show only the groups, bundles for the named client",
- "-o, --outfile <file>",
- "write viz output to an output file"))
-
- colors = ['steelblue1', 'chartreuse', 'gold', 'magenta',
- 'indianred1', 'limegreen', 'orange1', 'lightblue2',
- 'green1', 'blue1', 'yellow1', 'darkturquoise', 'gray66']
-
- __plugin_blacklist__ = ['DBStats', 'Snapshots', 'Cfg', 'Pkgmgr',
- 'Packages', 'Rules', 'Account', 'Decisions',
- 'Deps', 'Git', 'Svn', 'Fossil', 'Bzr', 'Bundler',
- 'TGenshi', 'Base']
-
- def __call__(self, args):
- # First get options to the 'viz' subcommand
- try:
- opts, args = getopt.getopt(args, 'Hbkc:o:',
- ['includehosts', 'includebundles',
- 'includekey', 'only-client=',
- 'outfile='])
- except getopt.GetoptError:
- self.usage()
-
- hset = False
- bset = False
- kset = False
- only_client = None
- outputfile = False
- for opt, arg in opts:
- if opt in ("-H", "--includehosts"):
- hset = True
- elif opt in ("-b", "--includebundles"):
- bset = True
- elif opt in ("-k", "--includekey"):
- kset = True
- elif opt in ("-c", "--only-client"):
- only_client = arg
- elif opt in ("-o", "--outfile"):
- outputfile = arg
-
- data = self.Visualize(hset, bset, kset, only_client, outputfile)
- if data:
- print(data)
-
- def Visualize(self, hosts=False, bundles=False, key=False,
- only_client=None, output=None):
- """Build visualization of groups file."""
- if output:
- fmt = output.split('.')[-1]
- else:
- fmt = 'png'
-
- cmd = ["dot", "-T", fmt]
- if output:
- cmd.extend(["-o", output])
- try:
- dotpipe = Popen(cmd, stdin=PIPE, stdout=PIPE, close_fds=True)
- except OSError:
- # on some systems (RHEL 6), you cannot run dot with
- # shell=True. on others (Gentoo with Python 2.7), you
- # must. In yet others (RHEL 5), either way works. I have
- # no idea what the difference is, but it's kind of a PITA.
- cmd = ["dot", "-T", pipes.quote(fmt)]
- if output:
- cmd.extend(["-o", pipes.quote(output)])
- dotpipe = Popen(cmd, shell=True,
- stdin=PIPE, stdout=PIPE, close_fds=True)
- try:
- dotpipe.stdin.write("digraph groups {\n")
- except:
- print("write to dot process failed. Is graphviz installed?")
- raise SystemExit(1)
- dotpipe.stdin.write('\trankdir="LR";\n')
- dotpipe.stdin.write(self.metadata.viz(hosts, bundles,
- key, only_client, self.colors))
- if key:
- dotpipe.stdin.write("\tsubgraph cluster_key {\n")
- dotpipe.stdin.write('\tstyle="filled";\n')
- dotpipe.stdin.write('\tcolor="lightblue";\n')
- dotpipe.stdin.write('\tBundle [ shape="septagon" ];\n')
- dotpipe.stdin.write('\tGroup [shape="ellipse"];\n')
- dotpipe.stdin.write('\tGroup Category [shape="trapezium"];\n')
- dotpipe.stdin.write('\tProfile [style="bold", shape="ellipse"];\n')
- dotpipe.stdin.write('\tHblock [label="Host1|Host2|Host3", '
- 'shape="record"];\n')
- dotpipe.stdin.write('\tlabel="Key";\n')
- dotpipe.stdin.write("\t}\n")
- dotpipe.stdin.write("}\n")
- dotpipe.stdin.close()
- return dotpipe.stdout.read()
diff --git a/src/lib/Bcfg2/Server/Admin/Xcmd.py b/src/lib/Bcfg2/Server/Admin/Xcmd.py
deleted file mode 100644
index 036129a1b..000000000
--- a/src/lib/Bcfg2/Server/Admin/Xcmd.py
+++ /dev/null
@@ -1,44 +0,0 @@
-""" XML-RPC Command Interface for bcfg2-admin"""
-
-import sys
-import Bcfg2.Options
-import Bcfg2.Proxy
-import Bcfg2.Server.Admin
-
-
-class Xcmd(Bcfg2.Server.Admin.Mode):
- """ XML-RPC Command Interface """
- __usage__ = "<command>"
-
- def __call__(self, args):
- optinfo = {
- 'server': Bcfg2.Options.SERVER_LOCATION,
- 'user': Bcfg2.Options.CLIENT_USER,
- 'password': Bcfg2.Options.SERVER_PASSWORD,
- 'key': Bcfg2.Options.SERVER_KEY,
- 'certificate': Bcfg2.Options.CLIENT_CERT,
- 'ca': Bcfg2.Options.CLIENT_CA,
- 'timeout': Bcfg2.Options.CLIENT_TIMEOUT}
- setup = Bcfg2.Options.OptionParser(optinfo)
- setup.parse(args)
- Bcfg2.Proxy.RetryMethod.max_retries = 1
- proxy = Bcfg2.Proxy.ComponentProxy(setup['server'],
- setup['user'],
- setup['password'],
- key=setup['key'],
- cert=setup['certificate'],
- ca=setup['ca'],
- timeout=setup['timeout'])
- if len(setup['args']) == 0:
- self.errExit("Usage: xcmd <xmlrpc method> <optional arguments>")
- cmd = setup['args'][0]
- args = ()
- if len(setup['args']) > 1:
- args = tuple(setup['args'][1:])
- try:
- data = getattr(proxy, cmd)(*args)
- except Bcfg2.Proxy.ProxyError:
- self.errExit("Proxy Error: %s" % sys.exc_info()[1])
-
- if data is not None:
- print(data)
diff --git a/src/lib/Bcfg2/Server/Admin/__init__.py b/src/lib/Bcfg2/Server/Admin/__init__.py
deleted file mode 100644
index ef5b2a08c..000000000
--- a/src/lib/Bcfg2/Server/Admin/__init__.py
+++ /dev/null
@@ -1,142 +0,0 @@
-""" Base classes for admin modes """
-
-import re
-import sys
-import logging
-import lxml.etree
-import Bcfg2.Server.Core
-import Bcfg2.Options
-from Bcfg2.Compat import ConfigParser, walk_packages
-
-__all__ = [m[1] for m in walk_packages(path=__path__)]
-
-
-class Mode(object):
- """ Base object for admin modes. Docstrings are used as help
- messages, so if you are seeing this, a help message has not yet
- been added for this mode. """
- __usage__ = None
- __args__ = []
-
- def __init__(self, setup):
- self.setup = setup
- self.configfile = setup['configfile']
- self.__cfp = False
- self.log = logging.getLogger('Bcfg2.Server.Admin.Mode')
- usage = "bcfg2-admin %s" % self.__class__.__name__.lower()
- if self.__usage__ is not None:
- usage += " " + self.__usage__
- setup.hm = usage
-
- def getCFP(self):
- """ get a config parser for the Bcfg2 config file """
- if not self.__cfp:
- self.__cfp = ConfigParser.ConfigParser()
- self.__cfp.read(self.configfile)
- return self.__cfp
-
- cfp = property(getCFP)
-
- def __call__(self, args):
- raise NotImplementedError
-
- @classmethod
- def usage(cls, rv=1):
- """ Exit with a long usage message """
- print(re.sub(r'\s{2,}', ' ', cls.__doc__.strip()))
- print("")
- print("Usage:")
- usage = "bcfg2-admin %s" % cls.__name__.lower()
- if cls.__usage__ is not None:
- usage += " " + cls.__usage__
- print(" %s" % usage)
- raise SystemExit(rv)
-
- def shutdown(self):
- """ Perform any necessary shtudown tasks for this mode """
- pass
-
- def errExit(self, emsg):
- """ exit with an error """
- sys.stderr.write('%s\n' % emsg)
- raise SystemExit(1)
-
- def load_stats(self, client):
- """ Load static statistics from the repository """
- stats = lxml.etree.parse("%s/etc/statistics.xml" % self.setup['repo'])
- hostent = stats.xpath('//Node[@name="%s"]' % client)
- if not hostent:
- self.errExit("Could not find stats for client %s" % (client))
- return hostent[0]
-
- def print_table(self, rows, justify='left', hdr=True, vdelim=" ",
- padding=1):
- """Pretty print a table
-
- rows - list of rows ([[row 1], [row 2], ..., [row n]])
- hdr - if True the first row is treated as a table header
- vdelim - vertical delimiter between columns
- padding - # of spaces around the longest element in the column
- justify - may be left,center,right
-
- """
- hdelim = "="
- justify = {'left': str.ljust,
- 'center': str.center,
- 'right': str.rjust}[justify.lower()]
-
- # Calculate column widths (longest item in each column
- # plus padding on both sides)
- cols = list(zip(*rows))
- col_widths = [max([len(str(item)) + 2 * padding
- for item in col]) for col in cols]
- borderline = vdelim.join([w * hdelim for w in col_widths])
-
- # Print out the table
- print(borderline)
- for row in rows:
- print(vdelim.join([justify(str(item), width)
- for (item, width) in zip(row, col_widths)]))
- if hdr:
- print(borderline)
- hdr = False
-
-
-# pylint wants MetadataCore and StructureMode to be concrete classes
-# and implement __call__, but they aren't and they don't, so we
-# disable that warning
-# pylint: disable=W0223
-
-class MetadataCore(Mode):
- """Base class for admin-modes that handle metadata."""
- __plugin_whitelist__ = None
- __plugin_blacklist__ = None
-
- def __init__(self, setup):
- Mode.__init__(self, setup)
- if self.__plugin_whitelist__ is not None:
- setup['plugins'] = [p for p in setup['plugins']
- if p in self.__plugin_whitelist__]
- elif self.__plugin_blacklist__ is not None:
- setup['plugins'] = [p for p in setup['plugins']
- if p not in self.__plugin_blacklist__]
-
- # admin modes don't need to watch for changes. one shot is fine here.
- setup['filemonitor'] = 'pseudo'
- try:
- self.bcore = Bcfg2.Server.Core.BaseCore(setup)
- except Bcfg2.Server.Core.CoreInitError:
- msg = sys.exc_info()[1]
- self.errExit("Core load failed: %s" % msg)
- self.bcore.load_plugins()
- self.bcore.fam.handle_event_set()
- self.metadata = self.bcore.metadata
-
- def shutdown(self):
- if hasattr(self, 'bcore'):
- self.bcore.shutdown()
-
-
-class StructureMode(MetadataCore): # pylint: disable=W0223
- """ Base class for admin modes that handle structure plugins """
- pass
diff --git a/src/lib/Bcfg2/Server/BuiltinCore.py b/src/lib/Bcfg2/Server/BuiltinCore.py
index 29beb35d5..e138c57e4 100644
--- a/src/lib/Bcfg2/Server/BuiltinCore.py
+++ b/src/lib/Bcfg2/Server/BuiltinCore.py
@@ -5,10 +5,11 @@ import sys
import time
import socket
import daemon
-import Bcfg2.Statistics
-from Bcfg2.Server.Core import BaseCore, NoExposedMethod
+import Bcfg2.Options
+import Bcfg2.Server.Statistics
+from Bcfg2.Server.Core import NetworkCore, NoExposedMethod
from Bcfg2.Compat import xmlrpclib, urlparse
-from Bcfg2.SSLServer import XMLRPCServer
+from Bcfg2.Server.SSLServer import XMLRPCServer
from lockfile import LockFailed, LockTimeout
# pylint: disable=E0611
@@ -19,29 +20,29 @@ except ImportError:
# pylint: enable=E0611
-class Core(BaseCore):
+class BuiltinCore(NetworkCore):
""" The built-in server core """
name = 'bcfg2-server'
- def __init__(self, setup):
- BaseCore.__init__(self, setup)
+ def __init__(self):
+ NetworkCore.__init__(self)
- #: The :class:`Bcfg2.SSLServer.XMLRPCServer` instance powering
- #: this server core
+ #: The :class:`Bcfg2.Server.SSLServer.XMLRPCServer` instance
+ #: powering this server core
self.server = None
- daemon_args = dict(uid=self.setup['daemon_uid'],
- gid=self.setup['daemon_gid'],
- umask=int(self.setup['umask'], 8),
+ daemon_args = dict(uid=Bcfg2.Options.setup.daemon_uid,
+ gid=Bcfg2.Options.setup.daemon_gid,
+ umask=int(Bcfg2.Options.setup.umask, 8),
detach_process=True)
- if self.setup['daemon']:
- daemon_args['pidfile'] = TimeoutPIDLockFile(self.setup['daemon'],
- acquire_timeout=5)
+ if Bcfg2.Options.setup.daemon:
+ daemon_args['pidfile'] = TimeoutPIDLockFile(
+ Bcfg2.Options.setup.daemon, acquire_timeout=5)
#: The :class:`daemon.DaemonContext` used to drop
#: privileges, write the PID file (with :class:`PidFile`),
#: and daemonize this core.
self.context = daemon.DaemonContext(**daemon_args)
- __init__.__doc__ = BaseCore.__init__.__doc__.split('.. -----')[0]
+ __init__.__doc__ = NetworkCore.__init__.__doc__.split('.. -----')[0]
def _dispatch(self, method, args, dispatch_dict):
""" Dispatch XML-RPC method calls
@@ -71,8 +72,9 @@ class Core(BaseCore):
try:
return method_func(*args)
finally:
- Bcfg2.Statistics.stats.add_value(method,
- time.time() - method_start)
+ Bcfg2.Server.Statistics.stats.add_value(
+ method,
+ time.time() - method_start)
except xmlrpclib.Fault:
raise
except Exception:
@@ -100,7 +102,7 @@ class Core(BaseCore):
err = sys.exc_info()[1]
self.logger.error("Failed to daemonize %s: Failed to acquire"
"lock on %s" % (self.name,
- self.setup['daemon']))
+ Bcfg2.Options.setup.daemon))
return False
else:
self.context.pidfile.release()
@@ -111,19 +113,19 @@ class Core(BaseCore):
def _run(self):
""" Create :attr:`server` to start the server listening. """
- hostname, port = urlparse(self.setup['location'])[1].split(':')
+ hostname, port = urlparse(Bcfg2.Options.setup.server)[1].split(':')
server_address = socket.getaddrinfo(hostname,
port,
socket.AF_UNSPEC,
socket.SOCK_STREAM)[0][4]
try:
- self.server = XMLRPCServer(self.setup['listen_all'],
+ self.server = XMLRPCServer(Bcfg2.Options.setup.listen_all,
server_address,
- keyfile=self.setup['key'],
- certfile=self.setup['cert'],
+ keyfile=Bcfg2.Options.setup.key,
+ certfile=Bcfg2.Options.setup.cert,
register=False,
- ca=self.setup['ca'],
- protocol=self.setup['protocol'])
+ ca=Bcfg2.Options.setup.ca,
+ protocol=Bcfg2.Options.setup.protocol)
except: # pylint: disable=W0702
err = sys.exc_info()[1]
self.logger.error("Server startup failed: %s" % err)
diff --git a/src/lib/Bcfg2/Server/Cache.py b/src/lib/Bcfg2/Server/Cache.py
new file mode 100644
index 000000000..d05eb0bf6
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Cache.py
@@ -0,0 +1,180 @@
+""" ``Bcfg2.Server.Cache`` is an implementation of a simple
+memory-backed cache. Right now this doesn't provide many features, but
+more (time-based expiration, etc.) can be added as necessary.
+
+The normal workflow is to get a Cache object, which is simply a dict
+interface to the unified cache that automatically uses a certain tag
+set. For instance:
+
+.. code-block:: python
+
+ groupcache = Bcfg2.Server.Cache.Cache("Probes", "probegroups")
+ groupcache['foo.example.com'] = ['group1', 'group2']
+
+This would create a Cache object that automatically tags its entries
+with ``frozenset(["Probes", "probegroups"])``, and store the list
+``['group1', 'group1']`` with the *additional* tag
+``foo.example.com``. So the unified backend cache would then contain
+a single entry:
+
+.. code-block:: python
+
+ {frozenset(["Probes", "probegroups", "foo.example.com"]):
+ ['group1', 'group2']}
+
+In addition to the dict interface, Cache objects (returned from
+:func:`Bcfg2.Server.Cache.Cache`) have one additional method,
+``expire()``, which is mostly identical to
+:func:`Bcfg2.Server.Cache.expire`, except that it is specific to the
+tag set of the cache object. E.g., to expire all ``foo.example.com``
+records for a given cache, you could do:
+
+.. code-block:: python
+
+ groupcache = Bcfg2.Server.Cache.Cache("Probes", "probegroups")
+ groupcache.expire("foo.example.com")
+
+This is mostly functionally identical to:
+
+.. code-block:: python
+
+ Bcfg2.Server.Cache.expire("Probes", "probegroups", "foo.example.com")
+
+It's not completely identical, though; the first example will expire,
+at most, exactly one item from the cache. The second example will
+expire all items that are tagged with a superset of the given tags.
+To illustrate the difference, consider the following two examples:
+
+.. code-block:: python
+
+ groupcache = Bcfg2.Server.Cache.Cache("Probes")
+ groupcache.expire("probegroups")
+
+ Bcfg2.Server.Cache.expire("Probes", "probegroups")
+
+The former will not expire any data, because there is no single datum
+tagged with ``"Probes", "probegroups"``. The latter will expire *all*
+items tagged with ``"Probes", "probegroups"`` -- i.e., the entire
+cache. In this case, the latter call is equivalent to:
+
+.. code-block:: python
+
+ groupcache = Bcfg2.Server.Cache.Cache("Probes", "probegroups")
+ groupcache.expire()
+
+"""
+
+from Bcfg2.Compat import MutableMapping
+
+
+class _Cache(MutableMapping):
+ """ The object returned by :func:`Bcfg2.Server.Cache.Cache` that
+ presents a dict-like interface to the portion of the unified cache
+ that uses the specified tags. """
+ def __init__(self, registry, tags):
+ self._registry = registry
+ self._tags = tags
+
+ def __getitem__(self, key):
+ return self._registry[self._tags | set([key])]
+
+ def __setitem__(self, key, value):
+ self._registry[self._tags | set([key])] = value
+
+ def __delitem__(self, key):
+ del self._registry[self._tags | set([key])]
+
+ def __iter__(self):
+ for item in self._registry.iterate(*self._tags):
+ yield list(item.difference(self._tags))[0]
+
+ def keys(self):
+ """ List cache keys """
+ return list(iter(self))
+
+ def __len__(self):
+ return len(list(iter(self)))
+
+ def expire(self, key=None):
+ """ expire all items, or a specific item, from the cache """
+ if key is None:
+ expire(*self._tags)
+ else:
+ tags = self._tags | set([key])
+ # py 2.5 doesn't support mixing *args and explicit keyword
+ # args
+ kwargs = dict(exact=True)
+ expire(*tags, **kwargs)
+
+ def __repr__(self):
+ return repr(dict(self))
+
+ def __str__(self):
+ return str(dict(self))
+
+
+class _CacheRegistry(dict):
+ """ The grand unified cache backend which contains all cache
+ items. """
+
+ def iterate(self, *tags):
+ """ Iterate over all items that match the given tags *and*
+ have exactly one additional tag. This is used to get items
+ for :class:`Bcfg2.Server.Cache._Cache` objects that have been
+ instantiated via :func:`Bcfg2.Server.Cache.Cache`. """
+ tags = frozenset(tags)
+ for key in self.keys():
+ if key.issuperset(tags) and len(key.difference(tags)) == 1:
+ yield key
+
+ def iter_all(self, *tags):
+ """ Iterate over all items that match the given tags,
+ regardless of how many additional tags they have (or don't
+ have). This is used to expire all cache data that matches a
+ set of tags. """
+ tags = frozenset(tags)
+ for key in list(self.keys()):
+ if key.issuperset(tags):
+ yield key
+
+
+_cache = _CacheRegistry() # pylint: disable=C0103
+_hooks = [] # pylint: disable=C0103
+
+
+def Cache(*tags): # pylint: disable=C0103
+ """ A dict interface to the cache data tagged with the given
+ tags. """
+ return _Cache(_cache, frozenset(tags))
+
+
+def expire(*tags, **kwargs):
+ """ Expire all items, a set of items, or one specific item from
+ the cache. If ``exact`` is set to True, then if the given tag set
+ doesn't match exactly one item in the cache, nothing will be
+ expired. """
+ exact = kwargs.pop("exact", False)
+ count = 0
+ if not tags:
+ count = len(_cache)
+ _cache.clear()
+ elif exact:
+ if frozenset(tags) in _cache:
+ count = 1
+ del _cache[frozenset(tags)]
+ else:
+ for match in _cache.iter_all(*tags):
+ count += 1
+ del _cache[match]
+
+ for hook in _hooks:
+ hook(tags, exact, count)
+
+
+def add_expire_hook(func):
+ """ Add a hook that will be called when an item is expired from
+ the cache. The callable passed in must take three options: the
+ first will be the tag set that was expired; the second will be the
+ state of the ``exact`` flag (True or False); and the third will be
+ the number of items that were expired from the cache. """
+ _hooks.append(func)
diff --git a/src/lib/Bcfg2/Server/CherryPyCore.py b/src/lib/Bcfg2/Server/CherrypyCore.py
index c1581679c..05c6c5a94 100644
--- a/src/lib/Bcfg2/Server/CherryPyCore.py
+++ b/src/lib/Bcfg2/Server/CherrypyCore.py
@@ -3,9 +3,9 @@ server. """
import sys
import time
-import Bcfg2.Statistics
+import Bcfg2.Server.Statistics
from Bcfg2.Compat import urlparse, xmlrpclib, b64decode
-from Bcfg2.Server.Core import BaseCore
+from Bcfg2.Server.Core import NetworkCore
import cherrypy
from cherrypy.lib import xmlrpcutil
from cherrypy._cptools import ErrorTool
@@ -27,7 +27,7 @@ def on_error(*args, **kwargs): # pylint: disable=W0613
cherrypy.tools.xmlrpc_error = ErrorTool(on_error)
-class Core(BaseCore):
+class CherrypyCore(NetworkCore):
""" The CherryPy-based server core. """
#: Base CherryPy config for this class. We enable the
@@ -36,8 +36,8 @@ class Core(BaseCore):
_cp_config = {'tools.xmlrpc_error.on': True,
'tools.bcfg2_authn.on': True}
- def __init__(self, setup):
- BaseCore.__init__(self, setup)
+ def __init__(self):
+ NetworkCore.__init__(self)
cherrypy.tools.bcfg2_authn = cherrypy.Tool('on_start_resource',
self.do_authn)
@@ -45,11 +45,11 @@ class Core(BaseCore):
#: List of exposed plugin RMI
self.rmi = self._get_rmi()
cherrypy.engine.subscribe('stop', self.shutdown)
- __init__.__doc__ = BaseCore.__init__.__doc__.split('.. -----')[0]
+ __init__.__doc__ = NetworkCore.__init__.__doc__.split('.. -----')[0]
def do_authn(self):
""" Perform authentication by calling
- :func:`Bcfg2.Server.Core.BaseCore.authenticate`. This is
+ :func:`Bcfg2.Server.Core.NetworkCore.authenticate`. This is
implemented as a CherryPy tool."""
try:
header = cherrypy.request.headers['Authorization']
@@ -65,8 +65,15 @@ class Core(BaseCore):
# FIXME: Get client cert
cert = None
- address = (cherrypy.request.remote.ip, cherrypy.request.remote.name)
- return self.authenticate(cert, username, password, address)
+ address = (cherrypy.request.remote.ip, cherrypy.request.remote.port)
+
+ rpcmethod = xmlrpcutil.process_body()[1]
+ if rpcmethod == 'ERRORMETHOD':
+ raise Exception("Unknown error processing XML-RPC request body")
+
+ if (not self.check_acls(address[0], rpcmethod) or
+ not self.authenticate(cert, username, password, address)):
+ raise cherrypy.HTTPError(401)
@cherrypy.expose
def default(self, *args, **params): # pylint: disable=W0613
@@ -96,8 +103,8 @@ class Core(BaseCore):
try:
body = handler(*rpcparams, **params)
finally:
- Bcfg2.Statistics.stats.add_value(rpcmethod,
- time.time() - method_start)
+ Bcfg2.Server.Statistics.stats.add_value(rpcmethod,
+ time.time() - method_start)
xmlrpcutil.respond(body, 'utf-8', True)
return cherrypy.serving.response.body
@@ -108,40 +115,40 @@ class Core(BaseCore):
PID file with :class:`cherrypy.process.plugins.PIDFile`. """
self._drop_privileges()
Daemonizer(cherrypy.engine).subscribe()
- PIDFile(cherrypy.engine, self.setup['daemon']).subscribe()
+ PIDFile(cherrypy.engine, Bcfg2.Options.setup.daemon).subscribe()
return True
def _drop_privileges(self):
""" Drop privileges with
:class:`cherrypy.process.plugins.DropPrivileges` """
DropPrivileges(cherrypy.engine,
- uid=self.setup['daemon_uid'],
- gid=self.setup['daemon_gid'],
- umask=int(self.setup['umask'], 8)).subscribe()
+ uid=Bcfg2.Options.setup.daemon_uid,
+ gid=Bcfg2.Options.setup.daemon_gid,
+ umask=int(Bcfg2.Options.setup.umask, 8)).subscribe()
def _run(self):
""" Start the server listening. """
- hostname, port = urlparse(self.setup['location'])[1].split(':')
- if self.setup['listen_all']:
+ hostname, port = urlparse(Bcfg2.Options.setup.server)[1].split(':')
+ if Bcfg2.Options.setup.listen_all:
hostname = '0.0.0.0'
config = {'engine.autoreload.on': False,
'server.socket_port': int(port),
'server.socket_host': hostname}
- if self.setup['cert'] and self.setup['key']:
+ if Bcfg2.Options.setup.cert and Bcfg2.Options.setup.key:
config.update({'server.ssl_module': 'pyopenssl',
- 'server.ssl_certificate': self.setup['cert'],
- 'server.ssl_private_key': self.setup['key']})
- if self.setup['debug']:
+ 'server.ssl_certificate': Bcfg2.Options.setup.cert,
+ 'server.ssl_private_key': Bcfg2.Options.setup.key})
+ if Bcfg2.Options.setup.debug:
config['log.screen'] = True
cherrypy.config.update(config)
- cherrypy.tree.mount(self, '/', {'/': self.setup})
+ cherrypy.tree.mount(self, '/', {'/': Bcfg2.Options.setup})
cherrypy.engine.start()
return True
def _block(self):
""" Enter the blocking infinite server
- loop. :func:`Bcfg2.Server.Core.BaseCore.shutdown` is called on
+ loop. :func:`Bcfg2.Server.Core.NetworkCore.shutdown` is called on
exit by a :meth:`subscription
<cherrypy.process.wspbus.Bus.subscribe>` on the top-level
CherryPy engine."""
diff --git a/src/lib/Bcfg2/Server/Core.py b/src/lib/Bcfg2/Server/Core.py
index 0369da8f2..dc9c91556 100644
--- a/src/lib/Bcfg2/Server/Core.py
+++ b/src/lib/Bcfg2/Server/Core.py
@@ -12,17 +12,25 @@ import time
import inspect
import lxml.etree
import daemon
-import Bcfg2.settings
import Bcfg2.Server
import Bcfg2.Logger
+import Bcfg2.Options
+import Bcfg2.DBSettings
+import Bcfg2.Server.Statistics
import Bcfg2.Server.FileMonitor
-from Bcfg2.Cache import Cache
-import Bcfg2.Statistics
from itertools import chain
+from Bcfg2.Server.Cache import Cache
from Bcfg2.Compat import xmlrpclib, wraps # pylint: disable=W0622
from Bcfg2.Server.Plugin.exceptions import * # pylint: disable=W0401,W0614
from Bcfg2.Server.Plugin.interfaces import * # pylint: disable=W0401,W0614
-from Bcfg2.Server.Plugin import track_statistics
+from Bcfg2.Server.Statistics import track_statistics
+
+try:
+ from django.core.exceptions import ImproperlyConfigured
+ import django.conf
+ HAS_DJANGO = True
+except ImportError:
+ HAS_DJANGO = False
try:
import psyco
@@ -30,8 +38,6 @@ try:
except ImportError:
pass
-os.environ['DJANGO_SETTINGS_MODULE'] = 'Bcfg2.settings'
-
def exposed(func):
""" Decorator that sets the ``exposed`` attribute of a function to
@@ -96,47 +102,65 @@ class NoExposedMethod (Exception):
method exposed with the given name. """
-# pylint: disable=W0702
+class DefaultACL(Plugin, ClientACLs):
+ """ Default ACL 'plugin' that provides security by default. This
+ is only loaded if no other ClientACLs plugin is enabled. """
+ create = False
+
+ def __init__(self, core):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core)
+ Bcfg2.Server.Plugin.ClientACLs.__init__(self)
+
+ def check_acl_ip(self, address, rmi):
+ return (("." not in rmi and
+ not rmi.endswith("_debug") and
+ rmi != 'get_statistics') or
+ address[0] == "127.0.0.1")
+
# in core we frequently want to catch all exceptions, regardless of
# type, so disable the pylint rule that catches that.
+# pylint: disable=W0702
-class BaseCore(object):
+class Core(object):
""" The server core is the container for all Bcfg2 server logic
and modules. All core implementations must inherit from
- ``BaseCore``. """
- name = "core"
-
- def __init__(self, setup): # pylint: disable=R0912,R0915
+ ``Core``. """
+
+ options = [
+ Bcfg2.Options.Common.plugins,
+ Bcfg2.Options.Common.repository,
+ Bcfg2.Options.Common.filemonitor,
+ Bcfg2.Options.BooleanOption(
+ "--no-fam-blocking", cf=('server', 'fam_blocking'),
+ dest="fam_blocking", default=True,
+ help='FAM blocks on startup until all events are processed'),
+ Bcfg2.Options.BooleanOption(
+ cf=('logging', 'performance'), dest="perflog",
+ help="Periodically log performance statistics"),
+ Bcfg2.Options.Option(
+ cf=('logging', 'performance_interval'), default=300.0,
+ type=Bcfg2.Options.Types.timeout,
+ help="Performance statistics logging interval in seconds"),
+ Bcfg2.Options.Option(
+ cf=('caching', 'client_metadata'), dest='client_metadata_cache',
+ default='off',
+ choices=['off', 'on', 'initial', 'cautious', 'aggressive'])]
+
+ #: The name of this server core. This can be overridden by core
+ #: implementations to provide a more specific name.
+ name = "Core"
+
+ def __init__(self): # pylint: disable=R0912,R0915
"""
- :param setup: A Bcfg2 options dict
- :type setup: Bcfg2.Options.OptionParser
-
- .. automethod:: _daemonize
- .. automethod:: _drop_privileges
.. automethod:: _run
.. automethod:: _block
.. -----
.. automethod:: _file_monitor_thread
.. automethod:: _perflog_thread
"""
- #: The Bcfg2 repository directory
- self.datastore = setup['repo']
-
- if setup['verbose']:
- level = logging.INFO
- else:
- level = logging.WARNING
- # we set a higher log level for the console by default. we
- # assume that if someone is running bcfg2-server in such a way
- # that it _can_ log to console, they want more output. if
- # level is set to DEBUG, that will get handled by
- # setup_logging and the console will get DEBUG output.
- Bcfg2.Logger.setup_logging('bcfg2-server',
- to_console=logging.INFO,
- to_syslog=setup['syslog'],
- to_file=setup['logging'],
- level=level)
+ # Record the core as a module variable
+ Bcfg2.Server.core = self
#: A :class:`logging.Logger` object for use by the core
self.logger = logging.getLogger('bcfg2-server')
@@ -148,43 +172,32 @@ class BaseCore(object):
#: special, and will be used for any log handlers whose name
#: does not appear elsewhere in the dict. At a minimum,
#: ``default`` must be provided.
- self._loglevels = {True: dict(default=logging.DEBUG),
- False: dict(console=logging.INFO,
- default=level)}
+ self._loglevels = {
+ True: dict(default=logging.DEBUG),
+ False: dict(console=logging.INFO,
+ default=Bcfg2.Logger.default_log_level())}
#: Used to keep track of the current debug state of the core.
self.debug_flag = False
# enable debugging on the core now. debugging is enabled on
# everything else later
- if setup['debug']:
- self.set_core_debug(None, setup['debug'])
-
- try:
- filemonitor = \
- Bcfg2.Server.FileMonitor.available[setup['filemonitor']]
- except KeyError:
- self.logger.error("File monitor driver %s not available; "
- "forcing to default" % setup['filemonitor'])
- filemonitor = Bcfg2.Server.FileMonitor.available['default']
- famargs = dict(ignore=[], debug=False)
- if 'ignore' in setup:
- famargs['ignore'] = setup['ignore']
- if 'debug' in setup:
- famargs['debug'] = setup['debug']
+ if Bcfg2.Options.setup.debug:
+ self.set_core_debug(None, Bcfg2.Options.setup.debug)
try:
#: The :class:`Bcfg2.Server.FileMonitor.FileMonitor`
#: object used by the core to monitor for Bcfg2 data
#: changes.
- self.fam = filemonitor(**famargs)
+ self.fam = Bcfg2.Server.FileMonitor.get_fam()
except IOError:
- msg = "Failed to instantiate fam driver %s" % setup['filemonitor']
+ msg = "Failed to instantiate fam driver %s" % \
+ Bcfg2.Options.setup.filemonitor
self.logger.error(msg, exc_info=1)
raise CoreInitError(msg)
#: Path to bcfg2.conf
- self.cfile = setup['configfile']
+ self.cfile = Bcfg2.Options.setup.config
#: Dict of plugins that are enabled. Keys are the plugin
#: names (just the plugin name, in the correct case; e.g.,
@@ -205,9 +218,6 @@ class BaseCore(object):
#: :class:`Bcfg2.Server.Plugin.interfaces.Version` plugin.
self.revision = '-1'
- #: The Bcfg2 options dict
- self.setup = setup
-
atexit.register(self.shutdown)
#: if :func:`Bcfg2.Server.Core.shutdown` is called explicitly,
#: then :mod:`atexit` calls it *again*, so it gets called
@@ -223,65 +233,21 @@ class BaseCore(object):
#: RLock to be held on writes to the backend db
self.db_write_lock = threading.RLock()
- # generate Django ORM settings. this must be done _before_ we
- # load plugins
- Bcfg2.settings.read_config(repo=self.datastore)
-
# mapping of group name => plugin name to record where groups
# that are created by Connector plugins came from
self._dynamic_groups = dict()
- #: Whether or not it's possible to use the Django database
- #: backend for plugins that have that capability
- self._database_available = False
- if Bcfg2.settings.HAS_DJANGO:
- db_settings = Bcfg2.settings.DATABASES['default']
- if ('daemon' in self.setup and 'daemon_uid' in self.setup and
- self.setup['daemon'] and self.setup['daemon_uid'] and
- db_settings['ENGINE'].endswith(".sqlite3") and
- not os.path.exists(db_settings['NAME'])):
- # syncdb will create the sqlite database, and we're
- # going to daemonize, dropping privs to a non-root
- # user, so we need to chown the database after
- # creating it
- do_chown = True
- else:
- do_chown = False
-
- from django.core.exceptions import ImproperlyConfigured
- from django.core import management
- try:
- management.call_command("syncdb", interactive=False,
- verbosity=0)
- self._database_available = True
- except ImproperlyConfigured:
- self.logger.error("Django configuration problem: %s" %
- sys.exc_info()[1])
- except:
- self.logger.error("Database update failed: %s" %
- sys.exc_info()[1])
-
- if do_chown and self._database_available:
- try:
- os.chown(db_settings['NAME'],
- self.setup['daemon_uid'],
- self.setup['daemon_gid'])
- except OSError:
- err = sys.exc_info()[1]
- self.logger.error("Failed to set ownership of database "
- "at %s: %s" % (db_settings['NAME'], err))
-
- #: The CA that signed the server cert
- self.ca = setup['ca']
-
#: The FAM :class:`threading.Thread`,
#: :func:`_file_monitor_thread`
self.fam_thread = \
- threading.Thread(name="%sFAMThread" % setup['filemonitor'],
+ threading.Thread(name="%sFAMThread" %
+ Bcfg2.Options.setup.filemonitor.__name__,
target=self._file_monitor_thread)
+ #: The :class:`threading.Thread` that reports performance
+ #: statistics to syslog.
self.perflog_thread = None
- if self.setup['perflog']:
+ if Bcfg2.Options.setup.perflog:
self.perflog_thread = \
threading.Thread(name="PerformanceLoggingThread",
target=self._perflog_thread)
@@ -290,23 +256,28 @@ class BaseCore(object):
#: :func:`Bcfg2.Server.FileMonitor.FileMonitor.handle_event_set`
self.lock = threading.Lock()
- #: A :class:`Bcfg2.Cache.Cache` object for caching client
+ #: A :class:`Bcfg2.Server.Cache.Cache` object for caching client
#: metadata
- self.metadata_cache = Cache()
+ self.metadata_cache = Cache("Metadata")
- def expire_caches_by_type(self, base_cls, key=None):
- """ Expire caches for all
- :class:`Bcfg2.Server.Plugin.interfaces.Caching` plugins that
- are instances of ``base_cls``.
+ #: Whether or not it's possible to use the Django database
+ #: backend for plugins that have that capability
+ self._database_available = False
+ if HAS_DJANGO:
+ try:
+ Bcfg2.DBSettings.sync_databases(interactive=False,
+ verbosity=0)
+ self._database_available = True
+ except ImproperlyConfigured:
+ self.logger.error("Django configuration problem: %s" %
+ sys.exc_info()[1])
+ except:
+ self.logger.error("Updating database %s failed: %s" %
+ (Bcfg2.Options.setup.db_name,
+ sys.exc_info()[1]))
- :param base_cls: The base plugin interface class to match (see
- :mod:`Bcfg2.Server.Plugin.interfaces`)
- :type base_cls: type
- :param key: The cache key to expire
- """
- for plugin in self.plugins_by_type(base_cls):
- if isinstance(plugin, Bcfg2.Server.Plugin.Caching):
- plugin.expire_cache(key)
+ def __str__(self):
+ return self.__class__.__name__
def plugins_by_type(self, base_cls):
""" Return a list of loaded plugins that match the passed type.
@@ -333,7 +304,7 @@ class BaseCore(object):
to syslog. """
self.logger.debug("Performance logging thread starting")
while not self.terminate.isSet():
- self.terminate.wait(self.setup['perflog_interval'])
+ self.terminate.wait(Bcfg2.Options.setup.performance_interval)
if not self.terminate.isSet():
for name, stats in self.get_statistics(None).items():
self.logger.info("Performance statistics: "
@@ -350,17 +321,21 @@ class BaseCore(object):
famfd = self.fam.fileno()
terminate = self.terminate
while not terminate.isSet():
- try:
- if famfd:
- select.select([famfd], [], [], 2)
- else:
- if not self.fam.pending():
- terminate.wait(15)
- if self.fam.pending():
+ if famfd:
+ select.select([famfd], [], [], 2)
+ elif not self.fam.pending():
+ terminate.wait(15)
+ if self.fam.pending():
+ try:
self._update_vcs_revision()
+ except:
+ self.logger.error("Error updating VCS revision: %s" %
+ sys.exc_info()[1])
+ try:
self.fam.handle_event_set(self.lock)
except:
- continue
+ self.logger.error("Error handling event set: %s" %
+ sys.exc_info()[1])
self.logger.info("File monitor thread terminated")
@track_statistics()
@@ -385,10 +360,7 @@ class BaseCore(object):
:attr:`Bcfg2.Server.Core.BaseCore.metadata` as side effects.
This does not start plugin threads; that is done later, in
:func:`Bcfg2.Server.Core.BaseCore.run` """
- while '' in self.setup['plugins']:
- self.setup['plugins'].remove('')
-
- for plugin in self.setup['plugins']:
+ for plugin in Bcfg2.Options.setup.plugins:
if plugin not in self.plugins:
self.init_plugin(plugin)
@@ -400,17 +372,18 @@ class BaseCore(object):
for plug in blacklist:
del self.plugins[plug]
- # Log experimental plugins
- expl = [plug for plug in list(self.plugins.values())
- if plug.experimental]
+ # Log deprecated and experimental plugins
+ expl = []
+ depr = []
+ for plug in list(self.plugins.values()):
+ if plug.experimental:
+ expl.append(plug)
+ if plug.deprecated:
+ depr.append(plug)
if expl:
self.logger.info("Loading experimental plugin(s): %s" %
(" ".join([x.name for x in expl])))
self.logger.info("NOTE: Interfaces subject to change")
-
- # Log deprecated plugins
- depr = [plug for plug in list(self.plugins.values())
- if plug.deprecated]
if depr:
self.logger.info("Loading deprecated plugin(s): %s" %
(" ".join([x.name for x in depr])))
@@ -427,43 +400,25 @@ class BaseCore(object):
"failed to instantiate Core")
raise CoreInitError("No Metadata Plugin")
- if self.debug_flag:
- # enable debugging on plugins
- self.plugins[plugin].set_debug(self.debug_flag)
+ # ensure that an ACL plugin is loaded
+ if not self.plugins_by_type(Bcfg2.Server.Plugin.ClientACLs):
+ self.init_plugin(DefaultACL)
def init_plugin(self, plugin):
""" Import and instantiate a single plugin. The plugin is
stored to :attr:`plugins`.
- :param plugin: The name of the plugin. This is just the name
- of the plugin, in the appropriate case. I.e.,
- ``Cfg``, not ``Bcfg2.Server.Plugins.Cfg``.
- :type plugin: string
+ :param plugin: The plugin class to load.
+ :type plugin: type
:returns: None
"""
- self.logger.debug("%s: Loading plugin %s" % (self.name, plugin))
- try:
- mod = getattr(__import__("Bcfg2.Server.Plugins.%s" %
- (plugin)).Server.Plugins, plugin)
- except ImportError:
- try:
- mod = __import__(plugin, globals(), locals(),
- [plugin.split('.')[-1]])
- except:
- self.logger.error("Failed to load plugin %s" % plugin)
- return
- try:
- plug = getattr(mod, plugin.split('.')[-1])
- except AttributeError:
- self.logger.error("Failed to load plugin %s: %s" %
- (plugin, sys.exc_info()[1]))
- return
+ self.logger.debug("Loading plugin %s" % plugin.name)
# Blacklist conflicting plugins
- cplugs = [conflict for conflict in plug.conflicts
+ cplugs = [conflict for conflict in plugin.conflicts
if conflict in self.plugins]
- self.plugin_blacklist[plug.name] = cplugs
+ self.plugin_blacklist[plugin.name] = cplugs
try:
- self.plugins[plugin] = plug(self, self.datastore)
+ self.plugins[plugin.name] = plugin(self)
except PluginInitError:
self.logger.error("Failed to instantiate plugin %s" % plugin,
exc_info=1)
@@ -499,8 +454,7 @@ class BaseCore(object):
""" Get the client :attr:`metadata_cache` mode. Options are
off, initial, cautious, aggressive, on (synonym for
cautious). See :ref:`server-caching` for more details. """
- mode = self.setup.cfp.get("caching", "client_metadata",
- default="off").lower()
+ mode = Bcfg2.Options.setup.client_metadata_cache
if mode == "on":
return "cautious"
else:
@@ -537,9 +491,10 @@ class BaseCore(object):
self.logger.error("%s: Error invoking hook %s: %s" %
(plugin, hook, err))
finally:
- Bcfg2.Statistics.stats.add_value("%s:client_run_hook:%s" %
- (self.__class__.__name__, hook),
- time.time() - start)
+ Bcfg2.Server.Statistics.stats.add_value(
+ "%s:client_run_hook:%s" %
+ (self.__class__.__name__, hook),
+ time.time() - start)
@track_statistics()
def validate_structures(self, metadata, data):
@@ -606,7 +561,8 @@ class BaseCore(object):
structures = list(
chain(*[struct.BuildStructures(metadata)
for struct in self.plugins_by_type(Structure)]))
- sbundles = [b.get('name') for b in structures if b.tag == 'Bundle']
+ sbundles = [b.get('name') for b in structures
+ if b.tag == 'Bundle' or b.tag == 'Independent']
missing = [b for b in metadata.bundles if b not in sbundles]
if missing:
self.logger.error("Client %s configuration missing bundles: %s" %
@@ -710,10 +666,10 @@ class BaseCore(object):
raise PluginExecutionError("No matching generator: %s:%s" %
(entry.tag, entry.get('name')))
finally:
- Bcfg2.Statistics.stats.add_value("%s:Bind:%s" %
- (self.__class__.__name__,
- entry.tag),
- time.time() - start)
+ Bcfg2.Server.Statistics.stats.add_value("%s:Bind:%s" %
+ (self.__class__.__name__,
+ entry.tag),
+ time.time() - start)
def BuildConfiguration(self, client):
""" Build the complete configuration for a client.
@@ -779,10 +735,13 @@ class BaseCore(object):
self.logger.error("Got event for unknown file: %s" %
event.filename)
return
- if event.code2str() == 'deleted':
+ if event.code2str() in ['deleted', 'exists']:
+ # ignore config file deletion, and ignore the initial
+ # 'exists' event as well. we've already parsed options on
+ # startup, we don't need to parse them twice.
return
- self.setup.reparse()
- self.expire_caches_by_type(Bcfg2.Server.Plugin.Metadata)
+ Bcfg2.Options.get_parser().reparse()
+ self.metadata_cache.expire()
def block_for_fam_events(self, handle_events=False):
""" Block until all fam events have been handleed, optionally
@@ -794,7 +753,7 @@ class BaseCore(object):
if handle_events:
self.fam.handle_events_in_interval(1)
slept += 1
- if self.setup['fam_blocking']:
+ if Bcfg2.Options.setup.fam_blocking:
time.sleep(1)
slept += 1
while self.fam.pending() != 0:
@@ -805,38 +764,12 @@ class BaseCore(object):
self.logger.debug("Slept %s seconds while handling FAM events" % slept)
def run(self):
- """ Run the server core. This calls :func:`_daemonize`
- (or :func:`_drop_privileges` if not in daemon mode),
- :func:`_run`, starts the :attr:`fam_thread`, and calls
- :func:`_block`, but note that it is the responsibility of the
- server core implementation to call :func:`shutdown` under
- normal operation. This also handles creation of the directory
- containing the pidfile, if necessary. """
- if self.setup['daemon']:
- # if we're dropping privs, then the pidfile is likely
- # /var/run/bcfg2-server/bcfg2-server.pid or similar.
- # since some OSes clean directories out of /var/run on
- # reboot, we need to ensure that the directory containing
- # the pidfile exists and has the appropriate permissions
- piddir = os.path.dirname(self.setup['daemon'])
- if not os.path.exists(piddir):
- os.makedirs(piddir)
- os.chown(piddir,
- self.setup['daemon_uid'],
- self.setup['daemon_gid'])
- os.chmod(piddir, 493) # 0775
- if not self._daemonize():
- return False
-
- # rewrite $HOME. pulp stores its auth creds in ~/.pulp, so
- # this is necessary to make that work when privileges are
- # dropped
- os.environ['HOME'] = pwd.getpwuid(self.setup['daemon_uid'])[5]
- else:
- if os.getuid() == 0:
- self._drop_privileges()
- os.umask(int(self.setup['umask'], 8))
-
+ """ Run the server core. This calls :func:`_run`, starts the
+ :attr:`fam_thread`, and calls :func:`_block`, but note that it
+ is the responsibility of the server core implementation to
+ call :func:`shutdown` under normal operation. This also
+ handles creation of the directory containing the pidfile, if
+ necessary."""
if not self._run():
self.shutdown()
return False
@@ -853,29 +786,12 @@ class BaseCore(object):
for plug in self.plugins_by_type(Threaded):
plug.start_threads()
- if self.debug_flag:
- self.set_debug(None, self.debug_flag)
self.block_for_fam_events()
self._block()
except:
self.shutdown()
raise
- def _daemonize(self):
- """ Daemonize the server and write the pidfile. This must be
- overridden by a core implementation. """
- raise NotImplementedError
-
- def _drop_privileges(self):
- """ This is called if not daemonized and running as root to
- drop the privileges to the configured daemon_uid and daemon_gid.
- """
- daemon.daemon.change_process_owner(
- self.setup['daemon_uid'],
- self.setup['daemon_gid'])
- self.logger.debug("Dropped privileges to %s:%s." %
- (os.getuid(), os.getgid()))
-
def _run(self):
""" Start up the server; this method should return
immediately. This must be overridden by a core
@@ -908,6 +824,61 @@ class BaseCore(object):
return result
@track_statistics()
+ def check_acls(self, address, rmi):
+ """ Check client IP address and metadata object against all
+ :class:`Bcfg2.Server.Plugin.interfaces.ClientACLs` plugins.
+ If any ACL plugin denies access, then access is denied. ACLs
+ are checked in two phases: First, with the client IP address;
+ and second, with the client metadata object. This lets an ACL
+ interface do a quick rejection based on IP before metadata is
+ ever built.
+
+ :param address: The address pair of the client to check ACLs for
+ :type address: tuple of (<ip address>, <port>)
+ :param rmi: The fully-qualified name of the RPC call
+ :param rmi: string
+ :returns: bool
+ """
+ plugins = self.plugins_by_type(Bcfg2.Server.Plugin.ClientACLs)
+ try:
+ ip_checks = [p.check_acl_ip(address, rmi) for p in plugins]
+ except:
+ self.logger.error("Unexpected error checking ACLs for %s for %s: "
+ "%s" % (address[0], rmi, sys.exc_info()[1]))
+ return False # failsafe
+
+ if all(ip_checks):
+ # if all ACL plugins return True (allow), then allow
+ self.logger.debug("Client %s passed IP-based ACL checks for %s" %
+ (address[0], rmi))
+ return True
+ elif False in ip_checks:
+ # if any ACL plugin returned False (deny), then deny
+ self.logger.warning("Client %s failed IP-based ACL checks for %s" %
+ (address[0], rmi))
+ return False
+ # else, no plugins returned False, but not all plugins
+ # returned True, so some plugin returned None (defer), so
+ # defer.
+
+ client, metadata = self.resolve_client(address)
+ try:
+ rv = all(p.check_acl_metadata(metadata, rmi) for p in plugins)
+ if rv:
+ self.logger.debug(
+ "Client %s passed metadata ACL checks for %s" %
+ (metadata.hostname, rmi))
+ else:
+ self.logger.warning(
+ "Client %s failed metadata ACL checks for %s" %
+ (metadata.hostname, rmi))
+ return rv
+ except:
+ self.logger.error("Unexpected error checking ACLs for %s for %s: "
+ "%s" % (client, rmi, sys.exc_info()[1]))
+ return False # failsafe
+
+ @track_statistics()
def build_metadata(self, client_name):
""" Build initial client metadata for a client
@@ -1007,7 +978,7 @@ class BaseCore(object):
:param address: The address pair of the client to get the
canonical hostname for.
- :type address: tuple of (<ip address>, <hostname>)
+ :type address: tuple of (<ip address>, <port>)
:param cleanup_cache: Tell the
:class:`Bcfg2.Server.Plugin.interfaces.Metadata`
plugin in :attr:`metadata` to clean up
@@ -1090,21 +1061,23 @@ class BaseCore(object):
def listMethods(self, address): # pylint: disable=W0613
""" List all exposed methods, including plugin RMI.
- :param address: Client (address, hostname) pair
+ :param address: Client (address, port) pair
:type address: tuple
:returns: list of exposed method names
"""
methods = [name
for name, func in inspect.getmembers(self, callable)
- if getattr(func, "exposed", False)]
- methods.extend(self._get_rmi().keys())
+ if (getattr(func, "exposed", False) and
+ self.check_acls(address, name))]
+ methods.extend([m for m in self._get_rmi().keys()
+ if self.check_acls(address, m)])
return methods
@exposed
def methodHelp(self, address, method_name): # pylint: disable=W0613
""" Get help from the docstring of an exposed method
- :param address: Client (address, hostname) pair
+ :param address: Client (address, port) pair
:type address: tuple
:param method_name: The name of the method to get help on
:type method_name: string
@@ -1122,7 +1095,7 @@ class BaseCore(object):
def DeclareVersion(self, address, version):
""" Declare the client version.
- :param address: Client (address, hostname) pair
+ :param address: Client (address, port) pair
:type address: tuple
:param version: The client's declared version
:type version: string
@@ -1145,7 +1118,7 @@ class BaseCore(object):
def GetProbes(self, address):
""" Fetch probes for the client.
- :param address: Client (address, hostname) pair
+ :param address: Client (address, port) pair
:type address: tuple
:returns: lxml.etree._Element - XML tree describing probes for
this client
@@ -1171,7 +1144,7 @@ class BaseCore(object):
def RecvProbeData(self, address, probedata):
""" Receive probe data from clients.
- :param address: Client (address, hostname) pair
+ :param address: Client (address, port) pair
:type address: tuple
:returns: bool - True on success
:raises: :exc:`xmlrpclib.Fault`
@@ -1185,7 +1158,7 @@ class BaseCore(object):
# that's created for RecvProbeData doesn't get cached.
# I.e., the next metadata object that's built, after probe
# data is processed, is cached.
- self.expire_caches_by_type(Bcfg2.Server.Plugin.Metadata)
+ self.metadata_cache.expire(client)
try:
xpdata = lxml.etree.XML(probedata.encode('utf-8'),
parser=Bcfg2.Server.XMLParser)
@@ -1219,7 +1192,7 @@ class BaseCore(object):
def AssertProfile(self, address, profile):
""" Set profile for a client.
- :param address: Client (address, hostname) pair
+ :param address: Client (address, port) pair
:type address: tuple
:returns: bool - True on success
:raises: :exc:`xmlrpclib.Fault`
@@ -1240,7 +1213,7 @@ class BaseCore(object):
""" Build config for a client by calling
:func:`BuildConfiguration`.
- :param address: Client (address, hostname) pair
+ :param address: Client (address, port) pair
:type address: tuple
:returns: lxml.etree._Element - The full configuration
document for the client
@@ -1259,7 +1232,7 @@ class BaseCore(object):
def RecvStats(self, address, stats):
""" Act on statistics upload with :func:`process_statistics`.
- :param address: Client (address, hostname) pair
+ :param address: Client (address, port) pair
:type address: tuple
:returns: bool - True on success
:raises: :exc:`xmlrpclib.Fault`
@@ -1270,34 +1243,12 @@ class BaseCore(object):
self.process_statistics(client, sdata)
return True
- def authenticate(self, cert, user, password, address):
- """ Authenticate a client connection with
- :func:`Bcfg2.Server.Plugin.interfaces.Metadata.AuthenticateConnection`.
-
- :param cert: an x509 certificate
- :type cert: dict
- :param user: The username of the user trying to authenticate
- :type user: string
- :param password: The password supplied by the client
- :type password: string
- :param address: An address pair of ``(<ip address>, <hostname>)``
- :type address: tuple
- :return: bool - True if the authenticate succeeds, False otherwise
- """
- if self.ca:
- acert = cert
- else:
- # No ca, so no cert validation can be done
- acert = None
- return self.metadata.AuthenticateConnection(acert, user, password,
- address)
-
@exposed
@close_db_connection
def GetDecisionList(self, address, mode):
""" Get the decision list for the client with :func:`GetDecisions`.
- :param address: Client (address, hostname) pair
+ :param address: Client (address, port) pair
:type address: tuple
:returns: list of decision tuples
:raises: :exc:`xmlrpclib.Fault`
@@ -1314,17 +1265,17 @@ class BaseCore(object):
@exposed
def get_statistics(self, _):
""" Get current statistics about component execution from
- :attr:`Bcfg2.Statistics.stats`.
+ :attr:`Bcfg2.Server.Statistics.stats`.
:returns: dict - The statistics data as returned by
- :func:`Bcfg2.Statistics.Statistics.display` """
- return Bcfg2.Statistics.stats.display()
+ :func:`Bcfg2.Server.Statistics.Statistics.display` """
+ return Bcfg2.Server.Statistics.stats.display()
@exposed
def toggle_debug(self, address):
""" Toggle debug status of the FAM and all plugins
- :param address: Client (address, hostname) pair
+ :param address: Client (address, port) pair
:type address: tuple
:returns: bool - The new debug state of the FAM
"""
@@ -1414,3 +1365,135 @@ class BaseCore(object):
address[0])
return "This method is deprecated and will be removed in a future " + \
"release\n%s" % self.fam.set_debug(debug)
+
+
+class NetworkCore(Core):
+ """ A server core that actually listens on the network, can be
+ daemonized, etc."""
+ options = Core.options + [
+ Bcfg2.Options.Common.daemon, Bcfg2.Options.Common.syslog,
+ Bcfg2.Options.Common.location, Bcfg2.Options.Common.ssl_ca,
+ Bcfg2.Options.Common.protocol,
+ Bcfg2.Options.PathOption(
+ '--ssl-key', cf=('communication', 'key'), dest="key",
+ help='Path to SSL key',
+ default="/etc/pki/tls/private/bcfg2.key"),
+ Bcfg2.Options.PathOption(
+ cf=('communication', 'certificate'), dest="cert",
+ help='Path to SSL certificate',
+ default="/etc/pki/tls/certs/bcfg2.crt"),
+ Bcfg2.Options.BooleanOption(
+ '--listen-all', cf=('server', 'listen_all'), default=False,
+ help="Listen on all interfaces"),
+ Bcfg2.Options.Option(
+ cf=('server', 'umask'), default='0077', help='Server umask',
+ type=Bcfg2.Options.Types.octal),
+ Bcfg2.Options.Option(
+ cf=('server', 'user'), default=0, dest='daemon_uid',
+ type=Bcfg2.Options.Types.username,
+ help="User to run the server daemon as"),
+ Bcfg2.Options.Option(
+ cf=('server', 'group'), default=0, dest='daemon_gid',
+ type=Bcfg2.Options.Types.groupname,
+ help="Group to run the server daemon as")]
+
+ def __init__(self):
+ Core.__init__(self)
+
+ #: The CA that signed the server cert
+ self.ca = Bcfg2.Options.setup.ca
+
+ if self._database_available:
+ db_settings = django.conf.settings.DATABASES['default']
+ if (Bcfg2.Options.setup.daemon and
+ Bcfg2.Options.setup.daemon_uid and
+ db_settings['ENGINE'].endswith(".sqlite3") and
+ not os.path.exists(db_settings['NAME'])):
+ # syncdb will create the sqlite database, and we're
+ # going to daemonize, dropping privs to a non-root
+ # user, so we need to chown the database after
+ # creating it
+ try:
+ os.chown(db_settings['NAME'],
+ Bcfg2.Options.setup.daemon_uid,
+ Bcfg2.Options.setup.daemon_gid)
+ except OSError:
+ err = sys.exc_info()[1]
+ self.logger.error("Failed to set ownership of database "
+ "at %s: %s" % (db_settings['NAME'], err))
+ __init__.__doc__ = Core.__init__.__doc__.split(".. -----")[0] + \
+ "\n.. automethod:: _daemonize\n"
+
+ def __str__(self):
+ if hasattr(Bcfg2.Options.setup, "location"):
+ return "%s(%s)" % (self.__class__.__name__,
+ Bcfg2.Options.setup.location)
+ else:
+ return Core.__str__(self)
+
+ def run(self):
+ """ Run the server core. This calls :func:`_daemonize` before
+ calling :func:`Bcfg2.Server.Core.Core.run` to run the server
+ core. """
+ if Bcfg2.Options.setup.daemon:
+ # if we're dropping privs, then the pidfile is likely
+ # /var/run/bcfg2-server/bcfg2-server.pid or similar.
+ # since some OSes clean directories out of /var/run on
+ # reboot, we need to ensure that the directory containing
+ # the pidfile exists and has the appropriate permissions
+ piddir = os.path.dirname(Bcfg2.Options.setup.daemon)
+ if not os.path.exists(piddir):
+ os.makedirs(piddir)
+ os.chown(piddir,
+ Bcfg2.Options.setup.daemon_uid,
+ Bcfg2.Options.setup.daemon_gid)
+ os.chmod(piddir, 493) # 0775
+ if not self._daemonize():
+ return False
+
+ # rewrite $HOME. pulp stores its auth creds in ~/.pulp, so
+ # this is necessary to make that work when privileges are
+ # dropped
+ os.environ['HOME'] = \
+ pwd.getpwuid(Bcfg2.Options.setup.daemon_uid)[5]
+ else:
+ os.umask(int(Bcfg2.Options.setup.umask, 8))
+
+ Core.run(self)
+
+ def authenticate(self, cert, user, password, address):
+ """ Authenticate a client connection with
+ :func:`Bcfg2.Server.Plugin.interfaces.Metadata.AuthenticateConnection`.
+
+ :param cert: an x509 certificate
+ :type cert: dict
+ :param user: The username of the user trying to authenticate
+ :type user: string
+ :param password: The password supplied by the client
+ :type password: string
+ :param address: An address pair of ``(<ip address>, <port>)``
+ :type address: tuple
+ :return: bool - True if the authenticate succeeds, False otherwise
+ """
+ if self.ca:
+ acert = cert
+ else:
+ # No ca, so no cert validation can be done
+ acert = None
+ return self.metadata.AuthenticateConnection(acert, user, password,
+ address)
+
+ def _daemonize(self):
+ """ Daemonize the server and write the pidfile. This must be
+ overridden by a core implementation. """
+ raise NotImplementedError
+
+ def _drop_privileges(self):
+ """ This is called if not daemonized and running as root to
+ drop the privileges to the configured daemon_uid and daemon_gid.
+ """
+ daemon.daemon.change_process_owner(
+ Bcfg2.Options.setup.daemon_uid,
+ Bcfg2.Options.setup.daemon_gid)
+ self.logger.debug("Dropped privileges to %s:%s." %
+ (os.getuid(), os.getgid()))
diff --git a/src/lib/Bcfg2/Server/Encryption.py b/src/lib/Bcfg2/Server/Encryption.py
new file mode 100755
index 000000000..b60302871
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Encryption.py
@@ -0,0 +1,681 @@
+""" Bcfg2.Server.Encryption provides a number of convenience methods
+for handling encryption in Bcfg2. See :ref:`server-encryption` for
+more details. """
+
+import os
+import sys
+import copy
+import logging
+import lxml.etree
+import Bcfg2.Logger
+import Bcfg2.Options
+from M2Crypto import Rand
+from M2Crypto.EVP import Cipher, EVPError
+from Bcfg2.Utils import safe_input
+from Bcfg2.Server import XMLParser
+from Bcfg2.Compat import md5, b64encode, b64decode, StringIO
+
+#: Constant representing the encryption operation for
+#: :class:`M2Crypto.EVP.Cipher`, which uses a simple integer. This
+#: makes our code more readable.
+ENCRYPT = 1
+
+#: Constant representing the decryption operation for
+#: :class:`M2Crypto.EVP.Cipher`, which uses a simple integer. This
+#: makes our code more readable.
+DECRYPT = 0
+
+#: Default initialization vector. For best security, you should use a
+#: unique IV for each message. :func:`ssl_encrypt` does this in an
+#: automated fashion.
+IV = r'\0' * 16
+
+
+class _OptionContainer(object):
+ """ Container for options loaded at import-time to configure
+ encryption """
+ options = [
+ Bcfg2.Options.BooleanOption(
+ cf=("encryption", "lax_decryption"),
+ help="Decryption failures should cause warnings, not errors"),
+ Bcfg2.Options.Option(
+ cf=("encryption", "algorithm"), default="aes_256_cbc",
+ type=lambda v: v.lower().replace("-", "_"),
+ help="The encryption algorithm to use"),
+ Bcfg2.Options.Option(
+ cf=("encryption", "*"), dest='passphrases', default=dict(),
+ help="Encryption passphrases")]
+
+
+Bcfg2.Options.get_parser().add_component(_OptionContainer)
+
+Rand.rand_seed(os.urandom(1024))
+
+
+def _cipher_filter(cipher, instr):
+ """ M2Crypto reads and writes file-like objects, so this uses
+ StringIO to pass data through it """
+ inbuf = StringIO(instr)
+ outbuf = StringIO()
+ while 1:
+ buf = inbuf.read()
+ if not buf:
+ break
+ outbuf.write(cipher.update(buf))
+ outbuf.write(cipher.final())
+ rv = outbuf.getvalue()
+ inbuf.close()
+ outbuf.close()
+ return rv
+
+
+def str_encrypt(plaintext, key, iv=IV, algorithm=None, salt=None):
+ """ Encrypt a string with a key. For a higher-level encryption
+ interface, see :func:`ssl_encrypt`.
+
+ :param plaintext: The plaintext data to encrypt
+ :type plaintext: string
+ :param key: The key to encrypt the data with
+ :type key: string
+ :param iv: The initialization vector
+ :type iv: string
+ :param algorithm: The cipher algorithm to use
+ :type algorithm: string
+ :param salt: The salt to use
+ :type salt: string
+ :returns: string - The decrypted data
+ """
+ if algorithm is None:
+ algorithm = Bcfg2.Options.setup.algorithm
+ cipher = Cipher(alg=algorithm, key=key, iv=iv, op=ENCRYPT, salt=salt)
+ return _cipher_filter(cipher, plaintext)
+
+
+def str_decrypt(crypted, key, iv=IV, algorithm=None):
+ """ Decrypt a string with a key. For a higher-level decryption
+ interface, see :func:`ssl_decrypt`.
+
+ :param crypted: The raw binary encrypted data
+ :type crypted: string
+ :param key: The encryption key to decrypt with
+ :type key: string
+ :param iv: The initialization vector
+ :type iv: string
+ :param algorithm: The cipher algorithm to use
+ :type algorithm: string
+ :returns: string - The decrypted data
+ """
+ if algorithm is None:
+ algorithm = Bcfg2.Options.setup.algorithm
+ cipher = Cipher(alg=algorithm, key=key, iv=iv, op=DECRYPT)
+ return _cipher_filter(cipher, crypted)
+
+
+def ssl_decrypt(data, passwd, algorithm=None):
+ """ Decrypt openssl-encrypted data. This can decrypt data
+ encrypted by :func:`ssl_encrypt`, or ``openssl enc``. It performs
+ a base64 decode first if the data is base64 encoded, and
+ automatically determines the salt and initialization vector (both
+ of which are embedded in the encrypted data).
+
+ :param data: The encrypted data (either base64-encoded or raw
+ binary) to decrypt
+ :type data: string
+ :param passwd: The password to use to decrypt the data
+ :type passwd: string
+ :param algorithm: The cipher algorithm to use
+ :type algorithm: string
+ :returns: string - The decrypted data
+ """
+ # base64-decode the data
+ data = b64decode(data)
+ salt = data[8:16]
+ # pylint: disable=E1101,E1121
+ hashes = [md5(passwd + salt).digest()]
+ for i in range(1, 3):
+ hashes.append(md5(hashes[i - 1] + passwd + salt).digest())
+ # pylint: enable=E1101,E1121
+ key = hashes[0] + hashes[1]
+ iv = hashes[2]
+
+ return str_decrypt(data[16:], key=key, iv=iv, algorithm=algorithm)
+
+
+def ssl_encrypt(plaintext, passwd, algorithm=None, salt=None):
+ """ Encrypt data in a format that is openssl compatible.
+
+ :param plaintext: The plaintext data to encrypt
+ :type plaintext: string
+ :param passwd: The password to use to encrypt the data
+ :type passwd: string
+ :param algorithm: The cipher algorithm to use
+ :type algorithm: string
+ :param salt: The salt to use. If none is provided, one will be
+ randomly generated.
+ :type salt: bytes
+ :returns: string - The base64-encoded, salted, encrypted string.
+ The string includes a trailing newline to make it fully
+ compatible with openssl command-line tools.
+ """
+ if salt is None:
+ salt = Rand.rand_bytes(8)
+
+ # pylint: disable=E1101,E1121
+ hashes = [md5(passwd + salt).digest()]
+ for i in range(1, 3):
+ hashes.append(md5(hashes[i - 1] + passwd + salt).digest())
+ # pylint: enable=E1101,E1121
+ key = hashes[0] + hashes[1]
+ iv = hashes[2]
+
+ crypted = str_encrypt(plaintext, key=key, salt=salt, iv=iv,
+ algorithm=algorithm)
+ return b64encode("Salted__" + salt + crypted) + "\n"
+
+
+def is_encrypted(val):
+ """ Make a best guess if the value is encrypted or not. This just
+ checks to see if ``val`` is a base64-encoded string whose content
+ starts with "Salted__", so it may have (rare) false positives. It
+ will not have false negatives. """
+ try:
+ return b64decode(val).startswith("Salted__")
+ except: # pylint: disable=W0702
+ return False
+
+
+def bruteforce_decrypt(crypted, passphrases=None, algorithm=None):
+ """ Convenience method to decrypt the given encrypted string by
+ trying the given passphrases or all passphrases sequentially until
+ one is found that works.
+
+ :param crypted: The data to decrypt
+ :type crypted: string
+ :param passphrases: The passphrases to try.
+ :type passphrases: list
+ :param algorithm: The cipher algorithm to use
+ :type algorithm: string
+ :returns: string - The decrypted data
+ :raises: :class:`M2Crypto.EVP.EVPError`, if the data cannot be decrypted
+ """
+ if passphrases is None:
+ passphrases = Bcfg2.Options.setup.passphrases.values()
+ for passwd in passphrases:
+ try:
+ return ssl_decrypt(crypted, passwd, algorithm=algorithm)
+ except EVPError:
+ pass
+ raise EVPError("Failed to decrypt")
+
+
+def print_xml(element, keep_text=False):
+ """ Render an XML element for error output. This prefixes the
+ line number and removes children for nicer display.
+
+ :param element: The element to render
+ :type element: lxml.etree._Element
+ :param keep_text: Do not discard text content from the element for
+ display
+ :type keep_text: boolean
+ """
+ xml = None
+ if len(element) or element.text:
+ el = copy.copy(element)
+ if el.text and not keep_text:
+ el.text = '...'
+ for child in el.iterchildren():
+ el.remove(child)
+ xml = lxml.etree.tostring(
+ el,
+ xml_declaration=False).decode("UTF-8").strip()
+ else:
+ xml = lxml.etree.tostring(
+ element,
+ xml_declaration=False).decode("UTF-8").strip()
+ return "%s (line %s)" % (xml, element.sourceline)
+
+
+class PassphraseError(Exception):
+ """ Exception raised when there's a problem determining the
+ passphrase to encrypt or decrypt with """
+
+
+class DecryptError(Exception):
+ """ Exception raised when decryption fails. """
+
+
+class EncryptError(Exception):
+ """ Exception raised when encryption fails. """
+
+
+class CryptoTool(object):
+ """ Generic decryption/encryption interface base object """
+
+ def __init__(self, filename):
+ self.logger = logging.getLogger(self.__class__.__name__)
+ self.filename = filename
+ self.data = open(self.filename).read()
+ self.pname, self.passphrase = self._get_passphrase()
+
+ def _get_passphrase(self):
+ """ get the passphrase for the current file """
+ if not Bcfg2.Options.setup.passphrases:
+ raise PassphraseError("No passphrases available in %s" %
+ Bcfg2.Options.setup.config)
+
+ pname = None
+ if Bcfg2.Options.setup.passphrase:
+ pname = Bcfg2.Options.setup.passphrase
+
+ if pname:
+ try:
+ passphrase = Bcfg2.Options.setup.passphrases[pname]
+ self.logger.debug("Using passphrase %s specified on command "
+ "line" % pname)
+ return (pname, passphrase)
+ except KeyError:
+ raise PassphraseError("Could not find passphrase %s in %s" %
+ (pname, Bcfg2.Options.setup.config))
+ else:
+ if len(Bcfg2.Options.setup.passphrases) == 1:
+ pname, passphrase = Bcfg2.Options.setup.passphrases.items()[0]
+ self.logger.info("Using passphrase %s" % pname)
+ return (pname, passphrase)
+ elif len(Bcfg2.Options.setup.passphrases) > 1:
+ return (None, None)
+ raise PassphraseError("No passphrase could be determined")
+
+ def get_destination_filename(self, original_filename):
+ """ Get the filename where data should be written """
+ return original_filename
+
+ def write(self, data):
+ """ write data to disk """
+ new_fname = self.get_destination_filename(self.filename)
+ try:
+ self._write(new_fname, data)
+ self.logger.info("Wrote data to %s" % new_fname)
+ return True
+ except IOError:
+ err = sys.exc_info()[1]
+ self.logger.error("Error writing data from %s to %s: %s" %
+ (self.filename, new_fname, err))
+ return False
+
+ def _write(self, filename, data):
+ """ Perform the actual write of data. This is separate from
+ :func:`CryptoTool.write` so it can be easily
+ overridden. """
+ open(filename, "wb").write(data)
+
+
+class Decryptor(CryptoTool):
+ """ Decryptor interface """
+ def decrypt(self):
+ """ decrypt the file, returning the encrypted data """
+ raise NotImplementedError
+
+
+class Encryptor(CryptoTool):
+ """ encryptor interface """
+ def encrypt(self):
+ """ encrypt the file, returning the encrypted data """
+ raise NotImplementedError
+
+
+class CfgEncryptor(Encryptor):
+ """ encryptor class for Cfg files """
+
+ def __init__(self, filename):
+ Encryptor.__init__(self, filename)
+ if self.passphrase is None:
+ raise PassphraseError("Multiple passphrases found in %s, "
+ "specify one on the command line with -p" %
+ Bcfg2.Options.setup.config)
+
+ def encrypt(self):
+ if is_encrypted(self.data):
+ raise EncryptError("Data is alraedy encrypted")
+ return ssl_encrypt(self.data, self.passphrase)
+
+ def get_destination_filename(self, original_filename):
+ return original_filename + ".crypt"
+
+
+class CfgDecryptor(Decryptor):
+ """ Decrypt Cfg files """
+
+ def decrypt(self):
+ """ decrypt the given file, returning the plaintext data """
+ if self.passphrase:
+ try:
+ return ssl_decrypt(self.data, self.passphrase)
+ except EVPError:
+ raise DecryptError("Could not decrypt %s with the "
+ "specified passphrase" % self.filename)
+ except:
+ raise DecryptError("Error decrypting %s: %s" %
+ (self.filename, sys.exc_info()[1]))
+ else: # no passphrase given, brute force
+ try:
+ return bruteforce_decrypt(self.data)
+ except EVPError:
+ raise DecryptError("Could not decrypt %s with any passphrase" %
+ self.filename)
+
+ def get_destination_filename(self, original_filename):
+ if original_filename.endswith(".crypt"):
+ return original_filename[:-6]
+ else:
+ return Decryptor.get_destination_filename(self, original_filename)
+
+
+class PropertiesCryptoMixin(object):
+ """ Mixin to provide some common methods for Properties crypto """
+ default_xpath = '//*[@encrypted]'
+
+ def _get_elements(self, xdata):
+ """ Get the list of elements to encrypt or decrypt """
+ if Bcfg2.Options.setup.xpath:
+ elements = xdata.xpath(Bcfg2.Options.setup.xpath)
+ if not elements:
+ self.logger.warning("XPath expression %s matched no elements" %
+ Bcfg2.Options.setup.xpath)
+ else:
+ elements = xdata.xpath(self.default_xpath)
+ if not elements:
+ elements = list(xdata.getiterator(tag=lxml.etree.Element))
+
+ # filter out elements without text data
+ for el in elements[:]:
+ if not el.text:
+ elements.remove(el)
+
+ if Bcfg2.Options.setup.interactive:
+ for element in elements[:]:
+ if len(element):
+ elt = copy.copy(element)
+ for child in elt.iterchildren():
+ elt.remove(child)
+ else:
+ elt = element
+ print(lxml.etree.tostring(
+ elt,
+ xml_declaration=False).decode("UTF-8").strip())
+ ans = safe_input("Encrypt this element? [y/N] ")
+ if not ans.lower().startswith("y"):
+ elements.remove(element)
+ return elements
+
+ def _get_element_passphrase(self, element):
+ """ Get the passphrase to use to encrypt or decrypt a given
+ element """
+ pname = element.get("encrypted")
+ if pname in Bcfg2.Options.setup.passphrases:
+ passphrase = Bcfg2.Options.setup.passphrases[pname]
+ else:
+ if pname:
+ self.logger.warning("Passphrase %s not found in %s, "
+ "using passphrase given on command line" %
+ (pname, Bcfg2.Options.setup.config))
+ if self.passphrase:
+ passphrase = self.passphrase
+ pname = self.pname
+ else:
+ self.logger.warning("No passphrase specified for %s element" %
+ element.tag)
+ raise PassphraseError("Multiple passphrases found in %s, "
+ "specify one on the command line with "
+ "-p" % Bcfg2.Options.setup.config)
+ return (pname, passphrase)
+
+ def _write(self, filename, data):
+ """ Write the data """
+ data.getroottree().write(filename,
+ xml_declaration=False,
+ pretty_print=True)
+
+
+class PropertiesEncryptor(Encryptor, PropertiesCryptoMixin):
+ """ encryptor class for Properties files """
+
+ def encrypt(self):
+ xdata = lxml.etree.XML(self.data, parser=XMLParser)
+ for elt in self._get_elements(xdata):
+ if is_encrypted(elt.text):
+ raise EncryptError("Element is already encrypted: %s" %
+ print_xml(elt))
+ try:
+ pname, passphrase = self._get_element_passphrase(elt)
+ except PassphraseError:
+ raise EncryptError(str(sys.exc_info()[1]))
+ self.logger.debug("Encrypting %s" % print_xml(elt))
+ elt.text = ssl_encrypt(elt.text, passphrase).strip()
+ elt.set("encrypted", pname)
+ return xdata
+
+ def _write(self, filename, data):
+ PropertiesCryptoMixin._write(self, filename, data)
+
+
+class PropertiesDecryptor(Decryptor, PropertiesCryptoMixin):
+ """ decryptor class for Properties files """
+
+ def decrypt(self):
+ decrypted_any = False
+ xdata = lxml.etree.XML(self.data, parser=XMLParser)
+ for elt in self._get_elements(xdata):
+ try:
+ pname, passphrase = self._get_element_passphrase(elt)
+ except PassphraseError:
+ raise DecryptError(str(sys.exc_info()[1]))
+ self.logger.debug("Decrypting %s" % print_xml(elt))
+ try:
+ decrypted = ssl_decrypt(elt.text, passphrase).strip()
+ elt.text = decrypted.encode('ascii', 'xmlcharrefreplace')
+ elt.set("encrypted", pname)
+ decrypted_any = True
+ except (EVPError, TypeError):
+ self.logger.error("Could not decrypt %s, skipping" %
+ print_xml(elt))
+ except UnicodeDecodeError:
+ # we managed to decrypt the value, but it contains
+ # content that can't even be encoded into xml
+ # entities. what probably happened here is that we
+ # coincidentally could decrypt a value encrypted with
+ # a different key, and wound up with gibberish.
+ self.logger.warning("Decrypted %s to gibberish, skipping" %
+ elt.tag)
+ if decrypted_any:
+ return xdata
+ else:
+ raise DecryptError("Failed to decrypt any data in %s" %
+ self.filename)
+
+ def _write(self, filename, data):
+ PropertiesCryptoMixin._write(self, filename, data)
+
+
+class CLI(object):
+ """ The bcfg2-crypt CLI """
+
+ options = [
+ Bcfg2.Options.ExclusiveOptionGroup(
+ Bcfg2.Options.BooleanOption(
+ "--encrypt", help='Encrypt the specified file'),
+ Bcfg2.Options.BooleanOption(
+ "--decrypt", help='Decrypt the specified file')),
+ Bcfg2.Options.BooleanOption(
+ "--stdout",
+ help='Decrypt or encrypt the specified file to stdout'),
+ Bcfg2.Options.Option(
+ "-p", "--passphrase", metavar="NAME",
+ help='Encryption passphrase name'),
+ Bcfg2.Options.ExclusiveOptionGroup(
+ Bcfg2.Options.BooleanOption(
+ "--properties",
+ help='Encrypt the specified file as a Properties file'),
+ Bcfg2.Options.BooleanOption(
+ "--cfg", help='Encrypt the specified file as a Cfg file')),
+ Bcfg2.Options.OptionGroup(
+ Bcfg2.Options.Common.interactive,
+ Bcfg2.Options.Option(
+ "--xpath",
+ help='XPath expression to select elements to encrypt'),
+ title="Options for handling Properties files"),
+ Bcfg2.Options.OptionGroup(
+ Bcfg2.Options.BooleanOption(
+ "--remove", help='Remove the plaintext file after encrypting'),
+ title="Options for handling Cfg files"),
+ Bcfg2.Options.PathOption(
+ "files", help="File(s) to encrypt or decrypt", nargs='+')]
+
+ def __init__(self, argv=None):
+ parser = Bcfg2.Options.get_parser(
+ description="Encrypt and decrypt Bcfg2 data",
+ components=[self, _OptionContainer])
+ parser.parse(argv=argv)
+ self.logger = logging.getLogger(parser.prog)
+
+ if Bcfg2.Options.setup.decrypt:
+ if Bcfg2.Options.setup.remove:
+ self.logger.error("--remove cannot be used with --decrypt, "
+ "ignoring --remove")
+ Bcfg2.Options.setup.remove = False
+ elif Bcfg2.Options.setup.interactive:
+ self.logger.error("Cannot decrypt interactively")
+ Bcfg2.Options.setup.interactive = False
+
+ def _is_properties(self, filename):
+ """ Determine if a given file is a Properties file or not """
+ if Bcfg2.Options.setup.properties:
+ return True
+ elif Bcfg2.Options.setup.cfg:
+ return False
+ elif filename.endswith(".xml"):
+ try:
+ xroot = lxml.etree.parse(filename).getroot()
+ return xroot.tag == "Properties"
+ except lxml.etree.XMLSyntaxError:
+ return False
+ else:
+ return False
+
+ def run(self): # pylint: disable=R0912,R0915
+ """ Run bcfg2-crypt """
+ for fname in Bcfg2.Options.setup.files:
+ if not os.path.exists(fname):
+ self.logger.error("%s does not exist, skipping" % fname)
+ continue
+
+ # figure out if we need to encrypt this as a Properties file
+ # or as a Cfg file
+ try:
+ props = self._is_properties(fname)
+ except IOError:
+ err = sys.exc_info()[1]
+ self.logger.error("Error reading %s, skipping: %s" %
+ (fname, err))
+ continue
+
+ if props:
+ ftype = "Properties"
+ if Bcfg2.Options.setup.remove:
+ self.logger.info("Cannot use --remove with Properties "
+ "file %s, ignoring for this file" % fname)
+ tools = (PropertiesEncryptor, PropertiesDecryptor)
+ else:
+ ftype = "Cfg"
+ if Bcfg2.Options.setup.xpath:
+ self.logger.error("Specifying --xpath with --cfg is "
+ "nonsensical, ignoring --xpath")
+ Bcfg2.Options.setup.xpath = None
+ if Bcfg2.Options.setup.interactive:
+ self.logger.error("Cannot use interactive mode with "
+ "--cfg, ignoring --interactive")
+ Bcfg2.Options.setup.interactive = False
+ tools = (CfgEncryptor, CfgDecryptor)
+
+ data = None
+ mode = None
+ if Bcfg2.Options.setup.encrypt:
+ try:
+ tool = tools[0](fname)
+ except PassphraseError:
+ self.logger.error(str(sys.exc_info()[1]))
+ continue
+ except IOError:
+ self.logger.error("Error reading %s, skipping: %s" %
+ (fname, err))
+ continue
+ mode = "encrypt"
+ self.logger.debug("Encrypting %s file %s" % (ftype, fname))
+ elif Bcfg2.Options.setup.decrypt:
+ try:
+ tool = tools[1](fname)
+ except PassphraseError:
+ self.logger.error(str(sys.exc_info()[1]))
+ continue
+ except IOError:
+ self.logger.error("Error reading %s, skipping: %s" %
+ (fname, err))
+ continue
+ mode = "decrypt"
+ self.logger.debug("Decrypting %s file %s" % (ftype, fname))
+ else:
+ self.logger.info("Neither --encrypt nor --decrypt specified, "
+ "determining mode")
+ try:
+ tool = tools[1](fname)
+ except PassphraseError:
+ self.logger.error(str(sys.exc_info()[1]))
+ continue
+ except IOError:
+ self.logger.error("Error reading %s, skipping: %s" %
+ (fname, err))
+ continue
+ try:
+ self.logger.debug("Trying to decrypt %s file %s" % (ftype,
+ fname))
+ data = tool.decrypt()
+ mode = "decrypt"
+ self.logger.debug("Decrypted %s file %s" % (ftype, fname))
+ except DecryptError:
+ self.logger.info("Failed to decrypt %s, trying encryption"
+ % fname)
+ try:
+ tool = tools[0](fname)
+ except PassphraseError:
+ self.logger.error(str(sys.exc_info()[1]))
+ continue
+ except IOError:
+ self.logger.error("Error reading %s, skipping: %s" %
+ (fname, err))
+ continue
+ mode = "encrypt"
+ self.logger.debug("Encrypting %s file %s" % (ftype, fname))
+
+ if data is None:
+ try:
+ data = getattr(tool, mode)()
+ except (EncryptError, DecryptError):
+ self.logger.error("Failed to %s %s, skipping: %s" %
+ (mode, fname, sys.exc_info()[1]))
+ continue
+ if Bcfg2.Options.setup.stdout:
+ if len(Bcfg2.Options.setup.files) > 1:
+ print("----- %s -----" % fname)
+ print(data)
+ if len(Bcfg2.Options.setup.files) > 1:
+ print("")
+ else:
+ tool.write(data)
+
+ if (Bcfg2.Options.setup.remove and
+ tool.get_destination_filename(fname) != fname):
+ try:
+ os.unlink(fname)
+ except IOError:
+ err = sys.exc_info()[1]
+ self.logger.error("Error removing %s: %s" % (fname, err))
+ continue
diff --git a/src/lib/Bcfg2/Server/FileMonitor/Fam.py b/src/lib/Bcfg2/Server/FileMonitor/Fam.py
deleted file mode 100644
index 09d41038e..000000000
--- a/src/lib/Bcfg2/Server/FileMonitor/Fam.py
+++ /dev/null
@@ -1,105 +0,0 @@
-""" File monitor backend with support for the `File Alteration Monitor
-<http://oss.sgi.com/projects/fam/>`_. The FAM backend is deprecated. """
-
-import os
-import _fam
-import stat
-import logging
-from time import time
-from Bcfg2.Server.FileMonitor import FileMonitor
-
-LOGGER = logging.getLogger(__name__)
-
-
-class Fam(FileMonitor):
- """ **Deprecated** file monitor backend with support for the `File
- Alteration Monitor <http://oss.sgi.com/projects/fam/>`_ (also
- abbreviated "FAM")."""
-
- #: FAM is the worst actual monitor backend, so give it a low
- #: priority.
- __priority__ = 10
-
- def __init__(self, ignore=None, debug=False):
- FileMonitor.__init__(self, ignore=ignore, debug=debug)
- self.filemonitor = _fam.open()
- self.users = {}
- LOGGER.warning("The Fam file monitor backend is deprecated. Please "
- "switch to a supported file monitor.")
- __init__.__doc__ = FileMonitor.__init__.__doc__
-
- def fileno(self):
- return self.filemonitor.fileno()
- fileno.__doc__ = FileMonitor.fileno.__doc__
-
- def handle_event_set(self, _=None):
- self.Service()
- handle_event_set.__doc__ = FileMonitor.handle_event_set.__doc__
-
- def handle_events_in_interval(self, interval):
- now = time()
- while (time() - now) < interval:
- if self.Service():
- now = time()
- handle_events_in_interval.__doc__ = \
- FileMonitor.handle_events_in_interval.__doc__
-
- def AddMonitor(self, path, obj, _=None):
- mode = os.stat(path)[stat.ST_MODE]
- if stat.S_ISDIR(mode):
- handle = self.filemonitor.monitorDirectory(path, None)
- else:
- handle = self.filemonitor.monitorFile(path, None)
- self.handles[handle.requestID()] = handle
- if obj is not None:
- self.users[handle.requestID()] = obj
- return handle.requestID()
- AddMonitor.__doc__ = FileMonitor.AddMonitor.__doc__
-
- def Service(self, interval=0.50):
- """ Handle events for the specified period of time (in
- seconds). This call will block for ``interval`` seconds.
-
- :param interval: The interval, in seconds, during which events
- should be handled. Any events that are
- already pending when :func:`Service` is
- called will also be handled.
- :type interval: int
- :returns: None
- """
- count = 0
- collapsed = 0
- rawevents = []
- start = time()
- now = time()
- while (time() - now) < interval:
- if self.filemonitor.pending():
- while self.filemonitor.pending():
- count += 1
- rawevents.append(self.filemonitor.nextEvent())
- now = time()
- unique = []
- bookkeeping = []
- for event in rawevents:
- if self.should_ignore(event):
- continue
- if event.code2str() != 'changed':
- # process all non-change events
- unique.append(event)
- else:
- if (event.filename, event.requestID) not in bookkeeping:
- bookkeeping.append((event.filename, event.requestID))
- unique.append(event)
- else:
- collapsed += 1
- for event in unique:
- if event.requestID in self.users:
- try:
- self.users[event.requestID].HandleEvent(event)
- except: # pylint: disable=W0702
- LOGGER.error("Handling event for file %s" % event.filename,
- exc_info=1)
- end = time()
- LOGGER.info("Processed %s fam events in %03.03f seconds. "
- "%s coalesced" % (count, (end - start), collapsed))
- return count
diff --git a/src/lib/Bcfg2/Server/FileMonitor/Gamin.py b/src/lib/Bcfg2/Server/FileMonitor/Gamin.py
index 9134758b8..b349d20fd 100644
--- a/src/lib/Bcfg2/Server/FileMonitor/Gamin.py
+++ b/src/lib/Bcfg2/Server/FileMonitor/Gamin.py
@@ -27,14 +27,14 @@ class GaminEvent(Event):
class Gamin(FileMonitor):
""" File monitor backend with `Gamin
- <http://people.gnome.org/~veillard/gamin/>`_ support. """
+ <http://people.gnome.org/~veillard/gamin/>`_ support. **Deprecated.** """
- #: The Gamin backend is fairly decent, particularly newer
- #: releases, so it has a fairly high priority.
- __priority__ = 90
+ #: The Gamin backend is deprecated, but better than pseudo, so it
+ #: has a medium priority.
+ __priority__ = 50
- def __init__(self, ignore=None, debug=False):
- FileMonitor.__init__(self, ignore=ignore, debug=debug)
+ def __init__(self):
+ FileMonitor.__init__(self)
#: The :class:`Gamin.WatchMonitor` object for this monitor.
self.mon = None
@@ -46,6 +46,9 @@ class Gamin(FileMonitor):
#: The queue used to record monitors that are added before
#: :func:`start` has been called and :attr:`mon` is created.
self.add_q = []
+
+ self.logger.warning("The Gamin file monitor backend is deprecated. "
+ "Please switch to a supported file monitor.")
__init__.__doc__ = FileMonitor.__init__.__doc__
def start(self):
diff --git a/src/lib/Bcfg2/Server/FileMonitor/Inotify.py b/src/lib/Bcfg2/Server/FileMonitor/Inotify.py
index bce7ce7c2..c4b34a469 100644
--- a/src/lib/Bcfg2/Server/FileMonitor/Inotify.py
+++ b/src/lib/Bcfg2/Server/FileMonitor/Inotify.py
@@ -34,8 +34,8 @@ class Inotify(Pseudo, pyinotify.ProcessEvent):
#: listed in :attr:`action_map`
mask = reduce(lambda x, y: x | y, action_map.keys())
- def __init__(self, ignore=None, debug=False):
- Pseudo.__init__(self, ignore=ignore, debug=debug)
+ def __init__(self):
+ Pseudo.__init__(self)
pyinotify.ProcessEvent.__init__(self)
#: inotify can't set useful monitors directly on files, only
@@ -149,7 +149,7 @@ class Inotify(Pseudo, pyinotify.ProcessEvent):
evt = Event(handleID, path, action)
if (ievent.wd not in self.event_filter or
- ievent.pathname in self.event_filter[ievent.wd]):
+ ievent.pathname in self.event_filter[ievent.wd]):
self.events.append(evt)
def AddMonitor(self, path, obj, handleID=None):
diff --git a/src/lib/Bcfg2/Server/FileMonitor/Pseudo.py b/src/lib/Bcfg2/Server/FileMonitor/Pseudo.py
index b1e1adab7..9781e5b75 100644
--- a/src/lib/Bcfg2/Server/FileMonitor/Pseudo.py
+++ b/src/lib/Bcfg2/Server/FileMonitor/Pseudo.py
@@ -17,7 +17,8 @@ class Pseudo(FileMonitor):
def AddMonitor(self, path, obj, handleID=None):
if handleID is None:
handleID = len(list(self.handles.keys()))
- self.events.append(Event(handleID, path, 'exists'))
+ if os.path.exists(path):
+ self.events.append(Event(handleID, path, 'exists'))
if os.path.isdir(path):
dirlist = os.listdir(path)
for fname in dirlist:
diff --git a/src/lib/Bcfg2/Server/FileMonitor/__init__.py b/src/lib/Bcfg2/Server/FileMonitor/__init__.py
index 083e50fe6..c10677804 100644
--- a/src/lib/Bcfg2/Server/FileMonitor/__init__.py
+++ b/src/lib/Bcfg2/Server/FileMonitor/__init__.py
@@ -48,11 +48,9 @@ Base Classes
import os
import sys
import fnmatch
-import logging
+import Bcfg2.Options
from time import sleep, time
-from Bcfg2.Server.Plugin import Debuggable
-
-LOGGER = logging.getLogger(__name__)
+from Bcfg2.Logger import Debuggable
class Event(object):
@@ -112,6 +110,14 @@ class FileMonitor(Debuggable):
monitor objects to :attr:`handles` and received events to
:attr:`events`; the basic interface will handle the rest. """
+ options = [
+ Bcfg2.Options.Option(
+ cf=('server', 'ignore_files'),
+ help='File globs to ignore',
+ type=Bcfg2.Options.Types.comma_list,
+ default=['*~', '*#', '.#*', '*.swp', '*.swpx', '.*.swx',
+ 'SCCS', '.svn', '4913', '.gitignore'])]
+
#: The relative priority of this FAM backend. Better backends
#: should have higher priorities.
__priority__ = -1
@@ -119,7 +125,7 @@ class FileMonitor(Debuggable):
#: List of names of methods to be exposed as XML-RPC functions
__rmi__ = Debuggable.__rmi__ + ["list_event_handlers"]
- def __init__(self, ignore=None, debug=False):
+ def __init__(self):
"""
:param ignore: A list of filename globs describing events that
should be ignored (i.e., not processed by any
@@ -133,7 +139,6 @@ class FileMonitor(Debuggable):
.. autoattribute:: __priority__
"""
Debuggable.__init__(self)
- self.debug_flag = debug
#: A dict that records which objects handle which events.
#: Keys are monitor handle IDs and values are objects whose
@@ -143,12 +148,10 @@ class FileMonitor(Debuggable):
#: Queue of events to handle
self.events = []
- if ignore is None:
- ignore = []
#: List of filename globs to ignore events for. For events
#: that include the full path, both the full path and the bare
#: filename will be checked against ``ignore``.
- self.ignore = ignore
+ self.ignore = Bcfg2.Options.setup.ignore_files
#: Whether or not the FAM has been started. See :func:`start`.
self.started = False
@@ -185,7 +188,8 @@ class FileMonitor(Debuggable):
"""
for pattern in self.ignore:
if (fnmatch.fnmatch(event.filename, pattern) or
- fnmatch.fnmatch(os.path.split(event.filename)[-1], pattern)):
+ fnmatch.fnmatch(os.path.split(event.filename)[-1],
+ pattern)):
self.debug_log("Ignoring %s" % event)
return True
return False
@@ -226,8 +230,8 @@ class FileMonitor(Debuggable):
if self.should_ignore(event):
return
if event.requestID not in self.handles:
- LOGGER.info("Got event for unexpected id %s, file %s" %
- (event.requestID, event.filename))
+ self.logger.info("Got event for unexpected id %s, file %s" %
+ (event.requestID, event.filename))
return
self.debug_log("Dispatching event %s %s to obj %s" %
(event.code2str(), event.filename,
@@ -238,9 +242,8 @@ class FileMonitor(Debuggable):
raise
except: # pylint: disable=W0702
err = sys.exc_info()[1]
- LOGGER.error("Error in handling of event %s for %s: %s" %
- (event.code2str(), event.filename, err),
- exc_info=1)
+ self.logger.error("Error in handling of event %s for %s: %s" %
+ (event.code2str(), event.filename, err))
def handle_event_set(self, lock=None):
""" Handle all pending events.
@@ -266,7 +269,8 @@ class FileMonitor(Debuggable):
lock.release()
end = time()
if count > 0:
- LOGGER.info("Handled %d events in %.03fs" % (count, (end - start)))
+ self.logger.info("Handled %d events in %.03fs" % (count,
+ (end - start)))
def handle_events_in_interval(self, interval):
""" Handle events for the specified period of time (in
@@ -328,6 +332,25 @@ class FileMonitor(Debuggable):
return rv
+#: A module-level FAM object that all plugins, etc., can use. This
+#: should not be used directly, but retrieved via :func:`get_fam`.
+_FAM = None
+
+
+def get_fam():
+ """ Get a
+ :class:`Bcfg2.Server.FileMonitor.FileMonitor` object. If
+ :attr:`_FAM` has not been populated, then a new default
+ FileMonitor will be created.
+
+ :returns: :class:`Bcfg2.Server.FileMonitor.FileMonitor`
+ """
+ global _FAM # pylint: disable=W0603
+ if _FAM is None:
+ _FAM = Bcfg2.Options.setup.filemonitor()
+ return _FAM
+
+
#: A dict of all available FAM backends. Keys are the human-readable
#: names of the backends, which are used in bcfg2.conf to select a
#: backend; values are the backend classes. In addition, the
@@ -343,12 +366,6 @@ except ImportError:
pass
try:
- from Bcfg2.Server.FileMonitor.Fam import Fam
- available['fam'] = Fam
-except ImportError:
- pass
-
-try:
from Bcfg2.Server.FileMonitor.Gamin import Gamin
available['gamin'] = Gamin
except ImportError:
diff --git a/src/lib/Bcfg2/Server/Hostbase/.gitignore b/src/lib/Bcfg2/Server/Hostbase/.gitignore
deleted file mode 100644
index 8e15b5395..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-*.pyc
-dev.db
-bcfg2.conf
diff --git a/src/lib/Bcfg2/Server/Hostbase/__init__.py b/src/lib/Bcfg2/Server/Hostbase/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/__init__.py
+++ /dev/null
diff --git a/src/lib/Bcfg2/Server/Hostbase/backends.py b/src/lib/Bcfg2/Server/Hostbase/backends.py
deleted file mode 100644
index cfa9e1e16..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/backends.py
+++ /dev/null
@@ -1,63 +0,0 @@
-from django.contrib.auth.models import User
-#from ldapauth import *
-from nisauth import *
-
-## class LDAPBackend(object):
-
-## def authenticate(self,username=None,password=None):
-## try:
-
-## l = ldapauth(username,password)
-## temp_pass = User.objects.make_random_password(100)
-## ldap_user = dict(username=l.sAMAccountName,
-## )
-## user_session_obj = dict(
-## email=l.email,
-## first_name=l.name_f,
-## last_name=l.name_l,
-## uid=l.badge_no
-## )
-## #fixme: need to add this user session obj to session
-## user,created = User.objects.get_or_create(username=username)
-## return user
-
-## except LDAPAUTHError,e:
-## return None
-
-## def get_user(self,user_id):
-## try:
-## return User.objects.get(pk=user_id)
-## except User.DoesNotExist, e:
-## return None
-
-
-class NISBackend(object):
-
- def authenticate(self, username=None, password=None):
- try:
- n = nisauth(username, password)
- temp_pass = User.objects.make_random_password(100)
- nis_user = dict(username=username,
- )
-
- user_session_obj = dict(
- email = username + "@mcs.anl.gov",
- first_name = None,
- last_name = None,
- uid = n.uid
- )
- user, created = User.objects.get_or_create(username=username)
-
- return user
-
- except NISAUTHError:
- e = sys.exc_info()[1]
- return None
-
-
- def get_user(self, user_id):
- try:
- return User.objects.get(pk=user_id)
- except User.DoesNotExist:
- e = sys.exc_info()[1]
- return None
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/__init__.py b/src/lib/Bcfg2/Server/Hostbase/hostbase/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/__init__.py
+++ /dev/null
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/admin.py b/src/lib/Bcfg2/Server/Hostbase/hostbase/admin.py
deleted file mode 100644
index 70a2233cc..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/admin.py
+++ /dev/null
@@ -1,15 +0,0 @@
-from django.contrib import admin
-
-from models import Host, Interface, IP, MX, Name, CName, Nameserver, ZoneAddress, Zone, Log, ZoneLog
-
-admin.site.register(Host)
-admin.site.register(Interface)
-admin.site.register(IP)
-admin.site.register(MX)
-admin.site.register(Name)
-admin.site.register(CName)
-admin.site.register(Nameserver)
-admin.site.register(ZoneAddress)
-admin.site.register(Zone)
-admin.site.register(Log)
-admin.site.register(ZoneLog)
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/models.py b/src/lib/Bcfg2/Server/Hostbase/hostbase/models.py
deleted file mode 100644
index 3f08a09a0..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/models.py
+++ /dev/null
@@ -1,210 +0,0 @@
-from django.db import models
-
-# Create your models here.
-class Host(models.Model):
- NETGROUP_CHOICES = (
- ('none', 'none'),('cave', 'cave'),('ccst', 'ccst'),('mcs', 'mcs'),
- ('mmlab', 'mmlab'),('sp', 'sp'),('red', 'red'),('virtual', 'virtual'),
- ('win', 'win'),('xterm', 'xterm'),('lcrc', 'lcrc'),('anlext', 'anlext'),
- ('teragrid', 'teragrid')
- )
- STATUS_CHOICES = (
- ('active','active'),('dormant','dormant')
- )
- SUPPORT_CHOICES = (
- ('green','green'),('yellow','yellow'),('red','red')
- )
- CLASS_CHOICES = (
- ('scientific','scientific'),
- ('operations','operations'),('guest','guest'),
- ('confidential','confidential'),('public','public')
- )
- WHATAMI_CHOICES = (
- ('aix-3', 'aix-3'), ('aix-4', 'aix-4'),
- ('aix-5', 'aix-5'), ('baytech', 'baytech'),
- ('decserver', 'decserver'), ('dialup', 'dialup'),
- ('dos', 'dos'), ('freebsd', 'freebsd'),
- ('hpux', 'hpux'), ('irix-5', 'irix-5'),
- ('irix-6', 'irix-6'), ('linux', 'linux'),
- ('linux-2', 'linux-2'), ('linux-rh73', 'linux-rh73'),
- ('linux-rh8', 'linux-rh8'), ('linux-sles8', 'linux-sles8'),
- ('linux-sles8-64', 'linux-sles8-64'), ('linux-sles8-ia32', 'linux-sles8-ia32'),
- ('linux-sles8-ia64', 'linux-sles8-ia64'), ('mac', 'mac'),
- ('network', 'network'), ('next', 'next'),
- ('none', 'none'), ('osf', 'osf'), ('printer', 'printer'),
- ('robot', 'robot'), ('solaris-2', 'solaris-2'),
- ('sun4', 'sun4'), ('unknown', 'unknown'), ('virtual', 'virtual'),
- ('win31', 'win31'), ('win95', 'win95'),
- ('winNTs', 'winNTs'), ('winNTw', 'winNTw'),
- ('win2k', 'win2k'), ('winXP', 'winXP'), ('xterm', 'xterm')
- )
- hostname = models.CharField(max_length=64)
- whatami = models.CharField(max_length=16)
- netgroup = models.CharField(max_length=32, choices=NETGROUP_CHOICES)
- security_class = models.CharField('class', max_length=16)
- support = models.CharField(max_length=8, choices=SUPPORT_CHOICES)
- csi = models.CharField(max_length=32, blank=True)
- printq = models.CharField(max_length=32, blank=True)
- outbound_smtp = models.BooleanField()
- primary_user = models.EmailField()
- administrator = models.EmailField(blank=True)
- location = models.CharField(max_length=16)
- comments = models.TextField(blank=True)
- expiration_date = models.DateField(null=True, blank=True)
- last = models.DateField(auto_now=True, auto_now_add=True)
- status = models.CharField(max_length=7, choices=STATUS_CHOICES)
- dirty = models.BooleanField()
-
- class Admin:
- list_display = ('hostname', 'last')
- search_fields = ['hostname']
-
- def __str__(self):
- return self.hostname
-
- def get_logs(self):
- """
- Get host's log.
- """
- return Log.objects.filter(hostname=self.hostname)
-
-class Interface(models.Model):
- TYPE_CHOICES = (
- ('eth', 'ethernet'), ('wl', 'wireless'), ('virtual', 'virtual'), ('myr', 'myr'),
- ('mgmt', 'mgmt'), ('tape', 'tape'), ('fe', 'fe'), ('ge', 'ge'),
- )
- # FIXME: The new admin interface has change a lot.
- #host = models.ForeignKey(Host, edit_inline=models.TABULAR, num_in_admin=2)
- host = models.ForeignKey(Host)
- # FIXME: The new admin interface has change a lot.
- #mac_addr = models.CharField(max_length=32, core=True)
- mac_addr = models.CharField(max_length=32)
- hdwr_type = models.CharField('type', max_length=16, choices=TYPE_CHOICES, blank=True)
- # FIXME: The new admin interface has change a lot.
- # radio_admin=True, blank=True)
- dhcp = models.BooleanField()
-
- def __str__(self):
- return self.mac_addr
-
- class Admin:
- list_display = ('mac_addr', 'host')
- search_fields = ['mac_addr']
-
-class IP(models.Model):
- interface = models.ForeignKey(Interface)
- # FIXME: The new admin interface has change a lot.
- # edit_inline=models.TABULAR, num_in_admin=1)
- #ip_addr = models.IPAddressField(core=True)
- ip_addr = models.IPAddressField()
-
- def __str__(self):
- return self.ip_addr
-
- class Admin:
- pass
-
- class Meta:
- ordering = ('ip_addr', )
-
-class MX(models.Model):
- priority = models.IntegerField(blank=True)
- # FIXME: The new admin interface has change a lot.
- #mx = models.CharField(max_length=64, blank=True, core=True)
- mx = models.CharField(max_length=64, blank=True)
-
- def __str__(self):
- return (" ".join([str(self.priority), self.mx]))
-
- class Admin:
- pass
-
-class Name(models.Model):
- DNS_CHOICES = (
- ('global','global'),('internal','ANL internal'),
- ('private','private')
- )
- # FIXME: The new admin interface has change a lot.
- #ip = models.ForeignKey(IP, edit_inline=models.TABULAR, num_in_admin=1)
- ip = models.ForeignKey(IP)
- # FIXME: The new admin interface has change a lot.
- #name = models.CharField(max_length=64, core=True)
- name = models.CharField(max_length=64)
- dns_view = models.CharField(max_length=16, choices=DNS_CHOICES)
- only = models.BooleanField(blank=True)
- mxs = models.ManyToManyField(MX)
-
- def __str__(self):
- return self.name
-
- class Admin:
- pass
-
-class CName(models.Model):
- # FIXME: The new admin interface has change a lot.
- #name = models.ForeignKey(Name, edit_inline=models.TABULAR, num_in_admin=1)
- name = models.ForeignKey(Name)
- # FIXME: The new admin interface has change a lot.
- #cname = models.CharField(max_length=64, core=True)
- cname = models.CharField(max_length=64)
-
- def __str__(self):
- return self.cname
-
- class Admin:
- pass
-
-class Nameserver(models.Model):
- name = models.CharField(max_length=64, blank=True)
-
- def __str__(self):
- return self.name
-
- class Admin:
- pass
-
-class ZoneAddress(models.Model):
- ip_addr = models.IPAddressField(blank=True)
-
- def __str__(self):
- return self.ip_addr
-
- class Admin:
- pass
-
-class Zone(models.Model):
- zone = models.CharField(max_length=64)
- serial = models.IntegerField()
- admin = models.CharField(max_length=64)
- primary_master = models.CharField(max_length=64)
- expire = models.IntegerField()
- retry = models.IntegerField()
- refresh = models.IntegerField()
- ttl = models.IntegerField()
- nameservers = models.ManyToManyField(Nameserver, blank=True)
- mxs = models.ManyToManyField(MX, blank=True)
- addresses = models.ManyToManyField(ZoneAddress, blank=True)
- aux = models.TextField(blank=True)
-
- def __str__(self):
- return self.zone
-
- class Admin:
- pass
-
-class Log(models.Model):
- # FIXME: Proposal hostname = models.ForeignKey(Host)
- hostname = models.CharField(max_length=64)
- date = models.DateTimeField(auto_now=True, auto_now_add=True)
- log = models.TextField()
-
- def __str__(self):
- return self.hostname
-
-class ZoneLog(models.Model):
- zone = models.CharField(max_length=64)
- date = models.DateTimeField(auto_now=True, auto_now_add=True)
- log = models.TextField()
-
- def __str__(self):
- return self.zone
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/sql/zone.sql b/src/lib/Bcfg2/Server/Hostbase/hostbase/sql/zone.sql
deleted file mode 100644
index b78187ab2..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/sql/zone.sql
+++ /dev/null
@@ -1,2 +0,0 @@
-INSERT INTO hostbase_zone (zone, serial, admin, primary_master, expire, retry, refresh, ttl, aux)
-VALUES ('.rev', 0, '', '', 1209600, 1800, 7200, 7200, ''); \ No newline at end of file
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/urls.py b/src/lib/Bcfg2/Server/Hostbase/hostbase/urls.py
deleted file mode 100644
index a03d2c919..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/urls.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# -*- coding: utf-8 -*-
-from Bcfg2.Reporting.Compat.django_urls import *
-from django.contrib.auth.decorators import login_required
-from django.core.urlresolvers import reverse
-from django.views.generic.create_update import create_object, update_object, delete_object
-from django.views.generic.list_detail import object_detail, object_list
-
-from models import Host, Zone, Log
-
-host_detail_dict = {
- 'queryset':Host.objects.all(),
- 'template_name':'host.html',
- 'template_object_name':'host',
-}
-
-host_delete_dict = {
- 'model':Host,
- 'post_delete_redirect':'/',
-}
-
-host_log_detail_dict = host_detail_dict.copy()
-host_log_detail_dict['template_name'] = 'logviewer.html'
-
-host_dns_detail_dict = host_detail_dict.copy()
-host_dns_detail_dict['template_name'] = 'dns.html'
-
-zone_new_dict = {
- 'model':Zone,
- 'template_name':'zonenew.html',
- 'post_save_redirect':'../%(id)s',
-}
-
-zones_list_dict = {
- 'queryset':Zone.objects.all(),
- 'template_name':'zones.html',
- 'template_object_name':'zone',
-}
-
-zone_detail_dict = {
- 'queryset':Zone.objects.all(),
- 'template_name':'zoneview.html',
- 'template_object_name':'zone',
-}
-
-urlpatterns = patterns('',
- (r'^(?P<object_id>\d+)/$', object_detail, host_detail_dict, 'host_detail'),
- (r'^zones/new/$', login_required(create_object), zone_new_dict, 'zone_new'),
- (r'^zones/(?P<object_id>\d+)/edit', login_required(update_object), zone_new_dict, 'zone_edit'),
- (r'^zones/$', object_list, zones_list_dict, 'zone_list'),
- (r'^zones/(?P<object_id>\d+)/$', object_detail, zone_detail_dict, 'zone_detail'),
- (r'^zones/(?P<object_id>\d+)/$', object_detail, zone_detail_dict, 'zone_detail'),
- (r'^\d+/logs/(?P<object_id>\d+)/', object_detail, { 'queryset':Log.objects.all() }, 'log_detail'),
- (r'^(?P<object_id>\d+)/logs/', object_detail, host_log_detail_dict, 'host_log_list'),
- (r'^(?P<object_id>\d+)/dns', object_detail, host_dns_detail_dict, 'host_dns_list'),
- (r'^(?P<object_id>\d+)/remove', login_required(delete_object), host_delete_dict, 'host_delete'),
-)
-
-urlpatterns += patterns('Bcfg2.Server.Hostbase.hostbase.views',
- (r'^$', 'search'),
- (r'^(?P<host_id>\d+)/edit', 'edit'),
- (r'^(?P<host_id>\d+)/(?P<item>\D+)/(?P<item_id>\d+)/confirm', 'confirm'),
- (r'^(?P<host_id>\d+)/(?P<item>\D+)/(?P<item_id>\d+)/(?P<name_id>\d+)/confirm', 'confirm'),
- (r'^(?P<host_id>\d+)/dns/edit', 'dnsedit'),
- (r'^new', 'new'),
- (r'^(?P<host_id>\d+)/copy', 'copy'),
-# (r'^hostinfo', 'hostinfo'),
- (r'^zones/(?P<zone_id>\d+)/(?P<item>\D+)/(?P<item_id>\d+)/confirm', 'confirm'),
-)
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/views.py b/src/lib/Bcfg2/Server/Hostbase/hostbase/views.py
deleted file mode 100644
index 57ef5eff8..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/views.py
+++ /dev/null
@@ -1,970 +0,0 @@
-"""Views.py
-Contains all the views associated with the hostbase app
-Also has does form validation
-"""
-from django.http import HttpResponse, HttpResponseRedirect
-
-from django.contrib.auth.decorators import login_required
-from django.contrib.auth import logout
-from django.template import RequestContext
-from Bcfg2.Server.Hostbase.hostbase.models import *
-from datetime import date
-from django.db import connection
-from django.shortcuts import render_to_response
-from django import forms
-from Bcfg2.Server.Hostbase import settings, regex
-import re, copy
-
-attribs = ['hostname', 'whatami', 'netgroup', 'security_class', 'support',
- 'csi', 'printq', 'primary_user', 'administrator', 'location',
- 'status', 'comments']
-
-zoneattribs = ['zone', 'admin', 'primary_master', 'expire', 'retry',
- 'refresh', 'ttl', 'aux']
-
-dispatch = {'mac_addr':'i.mac_addr LIKE \'%%%%%s%%%%\'',
- 'ip_addr':'p.ip_addr LIKE \'%%%%%s%%%%\'',
- 'name':'n.name LIKE \'%%%%%s%%%%\'',
-## 'hostname':'n.name LIKE \'%%%%%s%%%%\'',
-## 'cname':'n.name LIKE \'%%%%%s%%%%\'',
- 'mx':'m.mx LIKE \'%%%%%s%%%%\'',
- 'dns_view':'n.dns_view = \'%s\'',
- 'hdwr_type':'i.hdwr_type = \'%s\'',
- 'dhcp':'i.dhcp = \'%s\''}
-
-def search(request):
- """Search for hosts in the database
- If more than one field is entered, logical AND is used
- """
- if 'sub' in request.GET:
- querystring = """SELECT DISTINCT h.hostname, h.id, h.status
- FROM (((((hostbase_host h
- INNER JOIN hostbase_interface i ON h.id = i.host_id)
- INNER JOIN hostbase_ip p ON i.id = p.interface_id)
- INNER JOIN hostbase_name n ON p.id = n.ip_id)
- INNER JOIN hostbase_name_mxs x ON n.id = x.name_id)
- INNER JOIN hostbase_mx m ON m.id = x.mx_id)
- LEFT JOIN hostbase_cname c ON n.id = c.name_id
- WHERE """
-
- _and = False
- for field in request.POST:
- if request.POST[field] and field == 'hostname':
- if _and:
- querystring += ' AND '
- querystring += 'n.name LIKE \'%%%%%s%%%%\' or c.cname LIKE \'%%%%%s%%%%\'' % (request.POST[field], request.POST[field])
- _and = True
- elif request.POST[field] and field in dispatch:
- if _and:
- querystring += ' AND '
- querystring += dispatch[field] % request.POST[field]
- _and = True
- elif request.POST[field]:
- if _and:
- querystring += ' AND '
- querystring += "h.%s LIKE \'%%%%%s%%%%\'" % (field, request.POST[field])
- _and = True
-
- if not _and:
- cursor = connection.cursor()
- cursor.execute("""SELECT hostname, id, status
- FROM hostbase_host ORDER BY hostname""")
- results = cursor.fetchall()
- else:
- querystring += " ORDER BY h.hostname"
- cursor = connection.cursor()
- cursor.execute(querystring)
- results = cursor.fetchall()
-
- return render_to_response('results.html',
- {'hosts': results,
- 'logged_in': request.session.get('_auth_user_id', False)},
- context_instance = RequestContext(request))
- else:
- return render_to_response('search.html',
- {'TYPE_CHOICES': Interface.TYPE_CHOICES,
- 'DNS_CHOICES': Name.DNS_CHOICES,
- 'yesno': [(1, 'yes'), (0, 'no')],
- 'logged_in': request.session.get('_auth_user_id', False)},
- context_instance = RequestContext(request))
-
-
-def gethostdata(host_id, dnsdata=False):
- """Grabs the necessary data about a host
- Replaces a lot of repeated code"""
- hostdata = {}
- hostdata['ips'] = {}
- hostdata['names'] = {}
- hostdata['cnames'] = {}
- hostdata['mxs'] = {}
- hostdata['host'] = Host.objects.get(id=host_id)
- hostdata['interfaces'] = hostdata['host'].interface_set.all()
- for interface in hostdata['interfaces']:
- hostdata['ips'][interface.id] = interface.ip_set.all()
- if dnsdata:
- for ip in hostdata['ips'][interface.id]:
- hostdata['names'][ip.id] = ip.name_set.all()
- for name in hostdata['names'][ip.id]:
- hostdata['cnames'][name.id] = name.cname_set.all()
- hostdata['mxs'][name.id] = name.mxs.all()
- return hostdata
-
-def fill(template, hostdata, dnsdata=False):
- """Fills a generic template
- Replaces a lot of repeated code"""
- if dnsdata:
- template.names = hostdata['names']
- template.cnames = hostdata['cnames']
- template.mxs = hostdata['mxs']
- template.host = hostdata['host']
- template.interfaces = hostdata['interfaces']
- template.ips = hostdata['ips']
- return template
-
-def edit(request, host_id):
- """edit general host information"""
- manipulator = Host.ChangeManipulator(host_id)
- changename = False
- if request.method == 'POST':
- host = Host.objects.get(id=host_id)
- before = host.__dict__.copy()
- if request.POST['hostname'] != host.hostname:
- oldhostname = host.hostname.split(".")[0]
- changename = True
- interfaces = host.interface_set.all()
- old_interfaces = [interface.__dict__.copy() for interface in interfaces]
-
- new_data = request.POST.copy()
-
- errors = manipulator.get_validation_errors(new_data)
- if not errors:
-
- # somehow keep track of multiple interface change manipulators
- # as well as multiple ip chnage manipulators??? (add manipulators???)
- # change to many-to-many??????
-
- # dynamically look up mx records?
- text = ''
-
- for attrib in attribs:
- if host.__dict__[attrib] != request.POST[attrib]:
- text = do_log(text, attrib, host.__dict__[attrib], request.POST[attrib])
- host.__dict__[attrib] = request.POST[attrib]
-
- if 'expiration_date' in request.POST:
- ymd = request.POST['expiration_date'].split("-")
- if date(int(ymd[0]), int(ymd[1]), int(ymd[2])) != host.__dict__['expiration_date']:
- text = do_log(text, 'expiration_date', host.__dict__['expiration_date'],
- request.POST['expiration_date'])
- host.__dict__['expiration_date'] = date(int(ymd[0]), int(ymd[1]), int(ymd[2]))
-
- for inter in interfaces:
- changetype = False
- ips = IP.objects.filter(interface=inter.id)
- if inter.mac_addr != request.POST['mac_addr%d' % inter.id]:
- text = do_log(text, 'mac_addr', inter.mac_addr, request.POST['mac_addr%d' % inter.id])
- inter.mac_addr = request.POST['mac_addr%d' % inter.id].lower().replace('-',':')
- if inter.hdwr_type != request.POST['hdwr_type%d' % inter.id]:
- oldtype = inter.hdwr_type
- text = do_log(text, 'hdwr_type', oldtype, request.POST['hdwr_type%d' % inter.id])
- inter.hdwr_type = request.POST['hdwr_type%d' % inter.id]
- changetype = True
- if (('dhcp%d' % inter.id) in request.POST and not inter.dhcp or
- not ('dhcp%d' % inter.id) in request.POST and inter.dhcp):
- text = do_log(text, 'dhcp', inter.dhcp, int(not inter.dhcp))
- inter.dhcp = not inter.dhcp
- for ip in ips:
- names = ip.name_set.all()
- if not ip.ip_addr == request.POST['ip_addr%d' % ip.id]:
- oldip = ip.ip_addr
- oldsubnet = oldip.split(".")[2]
- ip.ip_addr = request.POST['ip_addr%d' % ip.id]
- ip.save()
- text = do_log(text, 'ip_addr', oldip, ip.ip_addr)
- for name in names:
- if name.name.split(".")[0].endswith('-%s' % oldsubnet):
- name.name = name.name.replace('-%s' % oldsubnet, '-%s' % ip.ip_addr.split(".")[2])
- name.save()
- if changetype:
- for name in names:
- if name.name.split(".")[0].endswith('-%s' % oldtype):
- name.name = name.name.replace('-%s' % oldtype, '-%s' % inter.hdwr_type)
- name.save()
- if changename:
- for name in names:
- if name.name.startswith(oldhostname):
- name.name = name.name.replace(oldhostname, host.hostname.split(".")[0])
- name.save()
- if request.POST['%dip_addr' % inter.id]:
- mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX)
- if created:
- mx.save()
- new_ip = IP(interface=inter, ip_addr=request.POST['%dip_addr' % inter.id])
- new_ip.save()
- text = do_log(text, '*new*', 'ip_addr', new_ip.ip_addr)
- new_name = "-".join([host.hostname.split(".")[0],
- new_ip.ip_addr.split(".")[2]])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- new_name = "-".join([host.hostname.split(".")[0],
- inter.hdwr_type])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- name = Name(ip=new_ip, name=host.hostname,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- inter.save()
- if request.POST['mac_addr_new']:
- new_inter = Interface(host=host,
- mac_addr=request.POST['mac_addr_new'].lower().replace('-',':'),
- hdwr_type=request.POST['hdwr_type_new'],
- dhcp=request.POST['dhcp_new'])
- text = do_log(text, '*new*', 'mac_addr', new_inter.mac_addr)
- new_inter.save()
- if request.POST['mac_addr_new'] and request.POST['ip_addr_new']:
- mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX)
- if created:
- mx.save()
- new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new'])
- new_ip.save()
- text = do_log(text, '*new*', 'ip_addr', new_ip.ip_addr)
- new_name = "-".join([host.hostname.split(".")[0],
- new_ip.ip_addr.split(".")[2]])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- new_name = "-".join([host.hostname.split(".")[0],
- new_inter.hdwr_type])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- name = Name(ip=new_ip, name=host.hostname,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- if request.POST['ip_addr_new'] and not request.POST['mac_addr_new']:
- mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX)
- if created:
- mx.save()
- new_inter = Interface(host=host, mac_addr="",
- hdwr_type=request.POST['hdwr_type_new'],
- dhcp=False)
- new_inter.save()
- new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new'])
- new_ip.save()
- text = do_log(text, '*new*', 'ip_addr', new_ip.ip_addr)
- new_name = "-".join([host.hostname.split(".")[0],
- new_ip.ip_addr.split(".")[2]])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- new_name = "-".join([host.hostname.split(".")[0],
- new_inter.hdwr_type])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- name = Name(ip=new_ip, name=host.hostname,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- if text:
- log = Log(hostname=host.hostname, log=text)
- log.save()
- host.save()
- return HttpResponseRedirect('/hostbase/%s/' % host.id)
- else:
- return render_to_response('errors.html',
- {'failures': errors,
- 'logged_in': request.session.get('_auth_user_id', False)},
- context_instance = RequestContext(request))
- else:
- host = Host.objects.get(id=host_id)
- interfaces = []
- for interface in host.interface_set.all():
- interfaces.append([interface, interface.ip_set.all()])
- return render_to_response('edit.html',
- {'host': host,
- 'interfaces': interfaces,
- 'TYPE_CHOICES': Interface.TYPE_CHOICES,
- 'logged_in': request.session.get('_auth_user_id', False)},
- context_instance = RequestContext(request))
-
-def confirm(request, item, item_id, host_id=None, name_id=None, zone_id=None):
- """Asks if the user is sure he/she wants to remove an item"""
- if 'sub' in request.GET:
- if item == 'interface':
- for ip in Interface.objects.get(id=item_id).ip_set.all():
- for name in ip.name_set.all():
- name.cname_set.all().delete()
- ip.name_set.all().delete()
- Interface.objects.get(id=item_id).ip_set.all().delete()
- Interface.objects.get(id=item_id).delete()
- elif item=='ip':
- for name in IP.objects.get(id=item_id).name_set.all():
- name.cname_set.all().delete()
- IP.objects.get(id=item_id).name_set.all().delete()
- IP.objects.get(id=item_id).delete()
- elif item=='cname':
- CName.objects.get(id=item_id).delete()
- elif item=='mx':
- mx = MX.objects.get(id=item_id)
- Name.objects.get(id=name_id).mxs.remove(mx)
- elif item=='name':
- Name.objects.get(id=item_id).cname_set.all().delete()
- Name.objects.get(id=item_id).delete()
- elif item=='nameserver':
- nameserver = Nameserver.objects.get(id=item_id)
- Zone.objects.get(id=zone_id).nameservers.remove(nameserver)
- elif item=='zonemx':
- mx = MX.objects.get(id=item_id)
- Zone.objects.get(id=zone_id).mxs.remove(mx)
- elif item=='address':
- address = ZoneAddress.objects.get(id=item_id)
- Zone.objects.get(id=zone_id).addresses.remove(address)
- if item == 'cname' or item == 'mx' or item == 'name':
- return HttpResponseRedirect('/hostbase/%s/dns/edit' % host_id)
- elif item == 'nameserver' or item == 'zonemx' or item == 'address':
- return HttpResponseRedirect('/hostbase/zones/%s/edit' % zone_id)
- else:
- return HttpResponseRedirect('/hostbase/%s/edit' % host_id)
- else:
- interface = None
- ips = []
- names = []
- cnames = []
- mxs = []
- zonemx = None
- nameserver = None
- address = None
- if item == 'interface':
- interface = Interface.objects.get(id=item_id)
- ips = interface.ip_set.all()
- for ip in ips:
- for name in ip.name_set.all():
- names.append((ip.id, name))
- for cname in name.cname_set.all():
- cnames.append((name.id, cname))
- for mx in name.mxs.all():
- mxs.append((name.id, mx))
- elif item=='ip':
- ips = [IP.objects.get(id=item_id)]
- for name in ips[0].name_set.all():
- names.append((ips[0].id, name))
- for cname in name.cname_set.all():
- cnames.append((name.id, cname))
- for mx in name.mxs.all():
- mxs.append((name.id, mx))
- elif item=='name':
- names = [Name.objects.get(id=item_id)]
- for cname in names[0].cname_set.all():
- cnames.append((names[0].id, cname))
- for mx in names[0].mxs.all():
- mxs.append((names[0].id, mx))
- elif item=='cname':
- cnames = [CName.objects.get(id=item_id)]
- elif item=='mx':
- mxs = [MX.objects.get(id=item_id)]
- elif item=='zonemx':
- zonemx = MX.objects.get(id=item_id)
- elif item=='nameserver':
- nameserver = Nameserver.objects.get(id=item_id)
- elif item=='address':
- address = ZoneAddress.objects.get(id=item_id)
- return render_to_response('confirm.html',
- {'interface': interface,
- 'ips': ips,
- 'names': names,
- 'cnames': cnames,
- 'id': item_id,
- 'type': item,
- 'host_id': host_id,
- 'mxs': mxs,
- 'zonemx': zonemx,
- 'nameserver': nameserver,
- 'address': address,
- 'zone_id': zone_id,
- 'logged_in': request.session.get('_auth_user_id', False)},
- context_instance = RequestContext(request))
-
-def dnsedit(request, host_id):
- """Edits specific DNS information
- Data is validated before committed to the database"""
- text = ''
- if 'sub' in request.GET:
- hostdata = gethostdata(host_id, True)
- for ip in hostdata['names']:
- ipaddr = IP.objects.get(id=ip)
- ipaddrstr = ipaddr.__str__()
- for name in hostdata['cnames']:
- for cname in hostdata['cnames'][name]:
- if regex.host.match(request.POST['cname%d' % cname.id]):
- text = do_log(text, 'cname', cname.cname, request.POST['cname%d' % cname.id])
- cname.cname = request.POST['cname%d' % cname.id]
- cname.save()
- for name in hostdata['mxs']:
- for mx in hostdata['mxs'][name]:
- if (mx.priority != request.POST['priority%d' % mx.id] and mx.mx != request.POST['mx%d' % mx.id]):
- text = do_log(text, 'mx', ' '.join([str(mx.priority), str(mx.mx)]),
- ' '.join([request.POST['priority%d' % mx.id], request.POST['mx%d' % mx.id]]))
- nameobject = Name.objects.get(id=name)
- nameobject.mxs.remove(mx)
- newmx, created = MX.objects.get_or_create(priority=request.POST['priority%d' % mx.id], mx=request.POST['mx%d' % mx.id])
- if created:
- newmx.save()
- nameobject.mxs.add(newmx)
- nameobject.save()
- for name in hostdata['names'][ip]:
- name.name = request.POST['name%d' % name.id]
- name.dns_view = request.POST['dns_view%d' % name.id]
- if (request.POST['%dcname' % name.id] and
- regex.host.match(request.POST['%dcname' % name.id])):
- cname = CName(name=name,
- cname=request.POST['%dcname' % name.id])
- text = do_log(text, '*new*', 'cname', cname.cname)
- cname.save()
- if (request.POST['%dpriority' % name.id] and
- request.POST['%dmx' % name.id]):
- mx, created = MX.objects.get_or_create(priority=request.POST['%dpriority' % name.id],
- mx=request.POST['%dmx' % name.id])
- if created:
- mx.save()
- text = do_log(text, '*new*', 'mx',
- ' '.join([request.POST['%dpriority' % name.id],
- request.POST['%dmx' % name.id]]))
- name.mxs.add(mx)
- name.save()
- if request.POST['%sname' % ipaddrstr]:
- name = Name(ip=ipaddr,
- dns_view=request.POST['%sdns_view' % ipaddrstr],
- name=request.POST['%sname' % ipaddrstr], only=False)
- text = do_log(text, '*new*', 'name', name.name)
- name.save()
- if (request.POST['%scname' % ipaddrstr] and
- regex.host.match(request.POST['%scname' % ipaddrstr])):
- cname = CName(name=name,
- cname=request.POST['%scname' % ipaddrstr])
- text = do_log(text, '*new*', 'cname', cname.cname)
- cname.save()
- if (request.POST['%smx' % ipaddrstr] and
- request.POST['%spriority' % ipaddrstr]):
- mx, created = MX.objects.get_or_create(priority=request.POST['%spriority' % ipaddrstr],
- mx=request.POST['%smx' % ipaddrstr])
- if created:
- mx.save()
- text = do_log(text, '*new*', 'mx',
- ' '.join([request.POST['%spriority' % ipaddrstr], request.POST['%smx' % ipaddrstr]]))
- name.mxs.add(mx)
- if text:
- log = Log(hostname=hostdata['host'].hostname, log=text)
- log.save()
- return HttpResponseRedirect('/hostbase/%s/dns' % host_id)
- else:
- host = Host.objects.get(id=host_id)
- ips = []
- info = []
- cnames = []
- mxs = []
- interfaces = host.interface_set.all()
- for interface in host.interface_set.all():
- ips.extend(interface.ip_set.all())
- for ip in ips:
- info.append([ip, ip.name_set.all()])
- for name in ip.name_set.all():
- cnames.extend(name.cname_set.all())
- mxs.append((name.id, name.mxs.all()))
- return render_to_response('dnsedit.html',
- {'host': host,
- 'info': info,
- 'cnames': cnames,
- 'mxs': mxs,
- 'request': request,
- 'interfaces': interfaces,
- 'DNS_CHOICES': Name.DNS_CHOICES,
- 'logged_in': request.session.get('_auth_user_id', False)},
- context_instance = RequestContext(request))
-
-def new(request):
- """Function for creating a new host in hostbase
- Data is validated before committed to the database"""
- if 'sub' in request.GET:
- try:
- Host.objects.get(hostname=request.POST['hostname'].lower())
- return render_to_response('errors.html',
- {'failures': ['%s already exists in hostbase' % request.POST['hostname']],
- 'logged_in': request.session.get('_auth_user_id', False)},
- context_instance = RequestContext(request))
- except:
- pass
- if not validate(request, True):
- if not request.POST['ip_addr_new'] and not request.POST['ip_addr_new2']:
- return render_to_response('errors.html',
- {'failures': ['ip_addr: You must enter an ip address'],
- 'logged_in': request.session.get('_auth_user_id', False)},
- context_instance = RequestContext(request))
- host = Host()
- # this is the stuff that validate() should take care of
- # examine the check boxes for any changes
- host.outbound_smtp = 'outbound_smtp' in request.POST
- for attrib in attribs:
- if attrib in request.POST:
- host.__dict__[attrib] = request.POST[attrib].lower()
- if 'comments' in request.POST:
- host.comments = request.POST['comments']
- if 'expiration_date' in request.POST:
-# ymd = request.POST['expiration_date'].split("-")
-# host.__dict__['expiration_date'] = date(int(ymd[0]), int(ymd[1]), int(ymd[2]))
- host.__dict__['expiration_date'] = date(2000, 1, 1)
- host.status = 'active'
- host.save()
- else:
- return render_to_response('errors.html',
- {'failures': validate(request, True),
- 'logged_in': request.session.get('_auth_user_id', False)},
- context_instance = RequestContext(request))
-
- if request.POST['mac_addr_new']:
- new_inter = Interface(host=host,
- mac_addr = request.POST['mac_addr_new'].lower().replace('-',':'),
- hdwr_type = request.POST['hdwr_type_new'],
- dhcp = 'dhcp_new' in request.POST)
- new_inter.save()
- if request.POST['mac_addr_new'] and request.POST['ip_addr_new']:
- new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new'])
-# Change all this things. Use a "post_save" signal handler for model Host to create all sociate models
-# and use a generi view.
- new_ip.save()
- mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX)
- if created:
- mx.save()
- new_name = "-".join([host.hostname.split(".")[0],
- new_ip.ip_addr.split(".")[2]])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name, dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- new_name = "-".join([host.hostname.split(".")[0],
- new_inter.hdwr_type])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- name = Name(ip=new_ip, name=host.hostname,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- if request.POST['ip_addr_new'] and not request.POST['mac_addr_new']:
- new_inter = Interface(host=host,
- mac_addr="",
- hdwr_type=request.POST['hdwr_type_new'],
- dhcp=False)
- new_inter.save()
- new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new'])
- new_ip.save()
- mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX)
- if created:
- mx.save()
- new_name = "-".join([host.hostname.split(".")[0],
- new_ip.ip_addr.split(".")[2]])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- new_name = "-".join([host.hostname.split(".")[0],
- new_inter.hdwr_type])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- name = Name(ip=new_ip, name=host.hostname,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- if request.POST['mac_addr_new2']:
- new_inter = Interface(host=host,
- mac_addr = request.POST['mac_addr_new2'].lower().replace('-',':'),
- hdwr_type = request.POST['hdwr_type_new2'],
- dhcp = 'dhcp_new2' in request.POST)
- new_inter.save()
- if request.POST['mac_addr_new2'] and request.POST['ip_addr_new2']:
- new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new2'])
- new_ip.save()
- mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX)
- if created:
- mx.save()
- new_name = "-".join([host.hostname.split(".")[0],
- new_ip.ip_addr.split(".")[2]])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- new_name = "-".join([host.hostname.split(".")[0],
- new_inter.hdwr_type])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- name = Name(ip=new_ip, name=host.hostname,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- if request.POST['ip_addr_new2'] and not request.POST['mac_addr_new2']:
- new_inter = Interface(host=host,
- mac_addr="",
- hdwr_type=request.POST['hdwr_type_new2'],
- dhcp=False)
- new_inter.save()
- new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new2'])
- new_ip.save()
- mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX)
- if created:
- mx.save()
- new_name = "-".join([host.hostname.split(".")[0],
- new_ip.ip_addr.split(".")[2]])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- new_name = "-".join([host.hostname.split(".")[0],
- new_inter.hdwr_type])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- name = Name(ip=new_ip, name=host.hostname,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- host.save()
- return HttpResponseRedirect('/hostbase/%s/' % host.id)
- else:
- return render_to_response('new.html',
- {'TYPE_CHOICES': Interface.TYPE_CHOICES,
- 'NETGROUP_CHOICES': Host.NETGROUP_CHOICES,
- 'CLASS_CHOICES': Host.CLASS_CHOICES,
- 'SUPPORT_CHOICES': Host.SUPPORT_CHOICES,
- 'WHATAMI_CHOICES': Host.WHATAMI_CHOICES,
- 'logged_in': request.session.get('_auth_user_id', False)},
- context_instance = RequestContext(request))
-
-def copy(request, host_id):
- """Function for creating a new host in hostbase
- Data is validated before committed to the database"""
- if 'sub' in request.GET:
- try:
- Host.objects.get(hostname=request.POST['hostname'].lower())
- return render_to_response('errors.html',
- {'failures': ['%s already exists in hostbase' % request.POST['hostname']],
- 'logged_in': request.session.get('_auth_user_id', False)},
- context_instance = RequestContext(request))
- except:
- pass
- if not validate(request, True):
- if not request.POST['ip_addr_new'] and not request.POST['ip_addr_new2']:
- return render_to_response('errors.html',
- {'failures': ['ip_addr: You must enter an ip address'],
- 'logged_in': request.session.get('_auth_user_id', False)},
- context_instance = RequestContext(request))
- host = Host()
- # this is the stuff that validate() should take care of
- # examine the check boxes for any changes
- host.outbound_smtp = 'outbound_smtp' in request.POST
- for attrib in attribs:
- if attrib in request.POST:
- host.__dict__[attrib] = request.POST[attrib].lower()
- if 'comments' in request.POST:
- host.comments = request.POST['comments']
- if 'expiration_date' in request.POST:
-# ymd = request.POST['expiration_date'].split("-")
-# host.__dict__['expiration_date'] = date(int(ymd[0]), int(ymd[1]), int(ymd[2]))
- host.__dict__['expiration_date'] = date(2000, 1, 1)
- host.status = 'active'
- host.save()
- else:
- return render_to_response('errors.html',
- {'failures': validate(request, True),
- 'logged_in': request.session.get('_auth_user_id', False)},
- context_instance = RequestContext(request))
-
- if request.POST['mac_addr_new']:
- new_inter = Interface(host=host,
- mac_addr = request.POST['mac_addr_new'].lower().replace('-',':'),
- hdwr_type = request.POST['hdwr_type_new'],
- dhcp = 'dhcp_new' in request.POST)
- new_inter.save()
- if request.POST['mac_addr_new'] and request.POST['ip_addr_new']:
- new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new'])
- new_ip.save()
- mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX)
- if created:
- mx.save()
- new_name = "-".join([host.hostname.split(".")[0],
- new_ip.ip_addr.split(".")[2]])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name, dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- new_name = "-".join([host.hostname.split(".")[0],
- new_inter.hdwr_type])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- name = Name(ip=new_ip, name=host.hostname,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- if request.POST['ip_addr_new'] and not request.POST['mac_addr_new']:
- new_inter = Interface(host=host,
- mac_addr="",
- hdwr_type=request.POST['hdwr_type_new'],
- dhcp=False)
- new_inter.save()
- new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new'])
- new_ip.save()
- mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX)
- if created:
- mx.save()
- new_name = "-".join([host.hostname.split(".")[0],
- new_ip.ip_addr.split(".")[2]])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- new_name = "-".join([host.hostname.split(".")[0],
- new_inter.hdwr_type])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- name = Name(ip=new_ip, name=host.hostname,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- if request.POST['mac_addr_new2']:
- new_inter = Interface(host=host,
- mac_addr = request.POST['mac_addr_new2'].lower().replace('-',':'),
- hdwr_type = request.POST['hdwr_type_new2'],
- dhcp = 'dhcp_new2' in request.POST)
- new_inter.save()
- if request.POST['mac_addr_new2'] and request.POST['ip_addr_new2']:
- new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new2'])
- new_ip.save()
- mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX)
- if created:
- mx.save()
- new_name = "-".join([host.hostname.split(".")[0],
- new_ip.ip_addr.split(".")[2]])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- new_name = "-".join([host.hostname.split(".")[0],
- new_inter.hdwr_type])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- name = Name(ip=new_ip, name=host.hostname,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- if request.POST['ip_addr_new2'] and not request.POST['mac_addr_new2']:
- new_inter = Interface(host=host,
- mac_addr="",
- hdwr_type=request.POST['hdwr_type_new2'],
- dhcp=False)
- new_inter.save()
- new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new2'])
- new_ip.save()
- mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX)
- if created:
- mx.save()
- new_name = "-".join([host.hostname.split(".")[0],
- new_ip.ip_addr.split(".")[2]])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- new_name = "-".join([host.hostname.split(".")[0],
- new_inter.hdwr_type])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- name = Name(ip=new_ip, name=host.hostname,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- host.save()
- return HttpResponseRedirect('/hostbase/%s/' % host.id)
- else:
- host = Host.objects.get(id=host_id)
- return render_to_response('copy.html',
- {'host': host,
- 'TYPE_CHOICES': Interface.TYPE_CHOICES,
- 'NETGROUP_CHOICES': Host.NETGROUP_CHOICES,
- 'CLASS_CHOICES': Host.CLASS_CHOICES,
- 'SUPPORT_CHOICES': Host.SUPPORT_CHOICES,
- 'WHATAMI_CHOICES': Host.WHATAMI_CHOICES,
- 'logged_in': request.session.get('_auth_user_id', False)},
- context_instance = RequestContext(request))
-
-# FIXME: delete all this things in a signal handler "pre_delete"
-#def remove(request, host_id):
-# host = Host.objects.get(id=host_id)
-# if 'sub' in request:
-# for interface in host.interface_set.all():
-# for ip in interface.ip_set.all():
-# for name in ip.name_set.all():
-# name.cname_set.all().delete()
-# ip.name_set.all().delete()
-# interface.ip_set.all().delete()
-# interface.delete()
-# host.delete()
-
-def validate(request, new=False, host_id=None):
- """Function for checking form data"""
- failures = []
- if (request.POST['expiration_date']
- and regex.date.match(request.POST['expiration_date'])):
- try:
- (year, month, day) = request.POST['expiration_date'].split("-")
- date(int(year), int(month), int(day))
- except (ValueError):
- failures.append('expiration_date')
- elif request.POST['expiration_date']:
- failures.append('expiration_date')
-
- if not (request.POST['hostname']
- and regex.host.match(request.POST['hostname'])):
- failures.append('hostname')
-
-## if not regex.printq.match(request.POST['printq']) and request.POST['printq']:
-## failures.append('printq')
-
-## if not regex.user.match(request.POST['primary_user']):
-## failures.append('primary_user')
-
-## if (not regex.user.match(request.POST['administrator'])
-## and request.POST['administrator']):
-## failures.append('administrator')
-
-## if not (request.POST['location']
-## and regex.location.match(request.POST['location'])):
-## failures.append('location')
-
- if new:
- if (not regex.macaddr.match(request.POST['mac_addr_new'])
- and request.POST['mac_addr_new']):
- failures.append('mac_addr (#1)')
- if ((request.POST['mac_addr_new'] or request.POST['ip_addr_new']) and
- not 'hdwr_type_new' in request.REQUEST):
- failures.append('hdwr_type (#1)')
- if ((request.POST['mac_addr_new2'] or request.POST['ip_addr_new2']) and
- not 'hdwr_type_new2' in request.REQUEST):
- failures.append('hdwr_type (#2)')
-
- if (not regex.macaddr.match(request.POST['mac_addr_new2'])
- and request.POST['mac_addr_new2']):
- failures.append('mac_addr (#2)')
-
- if (not regex.ipaddr.match(request.POST['ip_addr_new'])
- and request.POST['ip_addr_new']):
- failures.append('ip_addr (#1)')
- if (not regex. ipaddr.match(request.POST['ip_addr_new2'])
- and request.POST['ip_addr_new2']):
- failures.append('ip_addr (#2)')
-
- [failures.append('ip_addr (#1)') for number in
- request.POST['ip_addr_new'].split(".")
- if number.isdigit() and int(number) > 255
- and 'ip_addr (#1)' not in failures]
- [failures.append('ip_addr (#2)') for number in
- request.POST['ip_addr_new2'].split(".")
- if number.isdigit() and int(number) > 255
- and 'ip_addr (#2)' not in failures]
-
- elif host_id:
- interfaces = Interface.objects.filter(host=host_id)
- for interface in interfaces:
- if (not regex.macaddr.match(request.POST['mac_addr%d' % interface.id])
- and request.POST['mac_addr%d' % interface.id]):
- failures.append('mac_addr (%s)' % request.POST['mac_addr%d' % interface.id])
- for ip in interface.ip_set.all():
- if not regex.ipaddr.match(request.POST['ip_addr%d' % ip.id]):
- failures.append('ip_addr (%s)' % request.POST['ip_addr%d' % ip.id])
- [failures.append('ip_addr (%s)' % request.POST['ip_addr%d' % ip.id])
- for number in request.POST['ip_addr%d' % ip.id].split(".")
- if (number.isdigit() and int(number) > 255 and
- 'ip_addr (%s)' % request.POST['ip_addr%d' % ip.id] not in failures)]
- if (request.POST['%dip_addr' % interface.id]
- and not regex.ipaddr.match(request.POST['%dip_addr' % interface.id])):
- failures.append('ip_addr (%s)' % request.POST['%dip_addr' % interface.id])
- if (request.POST['mac_addr_new']
- and not regex.macaddr.match(request.POST['mac_addr_new'])):
- failures.append('mac_addr (%s)' % request.POST['mac_addr_new'])
- if (request.POST['ip_addr_new']
- and not regex.ipaddr.match(request.POST['ip_addr_new'])):
- failures.append('ip_addr (%s)' % request.POST['ip_addr_new'])
-
- if not failures:
- return 0
- return failures
-
-def do_log(text, attribute, previous, new):
- if previous != new:
- text += "%-20s%-20s -> %s\n" % (attribute, previous, new)
- return text
-
-## login required stuff
-## uncomment the views below that you would like to restrict access to
-
-## uncomment the lines below this point to restrict access to pages that modify the database
-## anonymous users can still view data in Hostbase
-
-edit = login_required(edit)
-confirm = login_required(confirm)
-dnsedit = login_required(dnsedit)
-new = login_required(new)
-copy = login_required(copy)
-#remove = login_required(remove)
-#zoneedit = login_required(zoneedit)
-#zonenew = login_required(zonenew)
-
-## uncomment the lines below this point to restrict access to all of hostbase
-
-## search = login_required(search)
-## look = login_required(look)
-## dns = login_required(dns)
-## zones = login_required(zones)
-## zoneview = login_required(zoneview)
-
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/base.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/base.html
deleted file mode 100644
index 1d7c5565b..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/base.html
+++ /dev/null
@@ -1,34 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
- "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
-<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
-<head>
- <title>{% block title %}BCFG2 - Hostbase{% endblock %}</title>
- <link rel="stylesheet" type="text/css" href="{{ MEDIA_URL }}/boxypastel.css" />
- <link rel="stylesheet" type="text/css" href="{{ MEDIA_URL }}/base.css" />
- <!--<script type="text/javascript" src="http://hostbase.mcs.anl.gov/site_media/main.js"> -->
- {% block extra_header_info %}{% endblock %}
-</head>
-
-<body>
- <div id="header">
- <div id="branding">
- <h1>BCFG2</h1>
- </div>
- <div id="user-tools">...Change is Coming...</div>
- </div>
- <div id="sidebar">
- {% block sidebar %}
- <ul class="sidebar">
- </ul>
- {% endblock %}
- </div>
-
- <div id="content-main">
- <div id="container">
- {% block pagebanner %}{% endblock %}
- {% block content %}{% endblock %}
-
- </div>
- </div>
-</body>
-</html>
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/confirm.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/confirm.html
deleted file mode 100644
index ca8b0cc07..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/confirm.html
+++ /dev/null
@@ -1,117 +0,0 @@
-{% extends "base.html" %}
-
-{% block pagebanner %}
- <div class="header">
- <h2>Confirm Removal</h2>
- </div>
- <br/>
-{% endblock %}
-
-{% block sidebar %}
-{% include "navbar.tmpl" %}
-{% include "logout.tmpl" %}
-{% endblock %}
-
-{% block content %}
-
-<form name="input" action="confirm.html?sub=true" method="post">
-Are you sure you want to remove these items?
-
-{% if interface %}
-<ul>
-<li> interface: {{ interface.mac_addr }} </li>
-{% endif %}
-
-
-{% if ips %}
-<ul>
-{% for ip in ips %}
-<li> ip: {{ ip.ip_addr }} </li>
-<ul>
-{% for name in names %}
-{% ifequal name.0 ip.id %}
-<li> name: {{ name.1.name }} </li>
-<ul>
-{% endifequal %}
-{% for cname in cnames %}
-{% ifequal cname.0 name.1.id %}
-<li> cname: {{ cname.1.name }} </li>
-{% endifequal %}
-{% endfor %}
-</ul>
-<ul>
-{% for mx in mxs %}
-{% ifequal mx.0 name.1.id %}
-<li> mx: {{ mx.1.priority }} {{ mx.1.mx }} </li>
-{% endifequal %}
-{% endfor %}
-</ul>
-{% endfor %}
-</ul>
-{% endfor %}
-</ul>
-{% endif %}
-
-{% if names and not ips %}
-<ul>
-{% for name in names %}
-<li> name: {{ name.name }} </li>
-<ul>
-{% for cname in cnames %}
-{% ifequal cname.0 name.id %}
-<li> cname: {{ cname.1.cname }} </li>
-{% endifequal %}
-{% endfor %}
-</ul>
-<ul>
-{% for mx in mxs %}
-{% ifequal mx.0 name.id %}
-<li> mx: {{ mx.1.priority }} {{ mx.1.mx }} </li>
-{% endifequal %}
-{% endfor %}
-</ul>
-{% endfor %}
-</ul>
-{% endif %}
-
-{% if cnames and not names %}
-<ul>
-{% for cname in cnames %}
-<li> cname: {{ cname.cname }} </li>
-{% endfor %}
-</ul>
-{% endif %}
-
-{% if mxs and not names %}
-<ul>
-{% for mx in mxs %}
-<li> mx: {{ mx.priority }} {{ mx.mx }} </li>
-{% endfor %}
-</ul>
-{% endif %}
-
-{% if interface %}
-</ul>
-{% endif %}
-
-{% if zone_id %}
-<ul>
-{% ifequal type 'zonemx' %}
-<li> mx: {{ zonemx.priority }} {{ zonemx.mx }} </li>
-{% endifequal %}
-
-{% ifequal type 'nameserver' %}
-<li> nameserver: {{ nameserver.name }} </li>
-{% endifequal %}
-
-{% ifequal type 'address' %}
-<li> address: {{ address.ip_addr }} </li>
-{% endifequal %}
-</ul>
-{% endif %}
-
-<input type="submit" value="confirm">
-<input type="reset" value="cancel" onclick="history.back()">
-</form>
-
-{% endblock %}
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/copy.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/copy.html
deleted file mode 100644
index 400ef58f2..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/copy.html
+++ /dev/null
@@ -1,122 +0,0 @@
-{% extends "base.html" %}
-
-{% block pagebanner %}
- <div class="header">
- <h2>new host information</h2>
- </div>
- <br/>
-{% endblock %}
-
-{% block sidebar %}
-<a href="/hostbase/" class="sidebar">search hostbase</a>
-{% include "logout.tmpl" %}
-{% endblock %}
-
-{% block content %}
-
-<form name="hostdata" action="?sub=true" method="post">
-<input type="hidden" name="host">
-<table border="0" width="100%">
- <colgroup>
- <col width="150">
- <col width="*">
- <tr> <td> <b>hostname</b></td>
- <td> <input name="hostname" type="text" value="{{ host.hostname }}" ></td></tr>
- <tr> <td> <b>whatami</b></td>
- <td>
- <select name="whatami">
- {% for choice in WHATAMI_CHOICES %}
- {% ifequal host.whatami choice.0 %}
- <option value="{{ choice.0 }}" selected="selected" >{{ choice.1 }}
- {% else %}
- <option value="{{ choice.0 }}">{{ choice.1 }}
- {% endifequal %}
- {% endfor %}
- </select>
- </td></tr>
- <tr> <td> <b>netgroup</b></td>
- <td>
- <select name="netgroup">
- {% for choice in NETGROUP_CHOICES %}
- {% ifequal host.netgroup choice.0 %}
- <option value="{{ choice.0 }}" selected="selected" >{{ choice.1 }}
- {% else %}
- <option value="{{ choice.0 }}">{{ choice.1 }}
- {% endifequal %}
- {% endfor %}
- </select>
- </td></tr>
- <tr> <td> <b>class</b></td>
- <td>
- <select name="security_class">
- {% for choice in CLASS_CHOICES %}
- {% ifequal host.security_class choice.0 %}
- <option value="{{ choice.0 }}" selected="selected" >{{ choice.1 }}
- {% else %}
- <option value="{{ choice.0 }}">{{ choice.1 }}
- {% endifequal %}
- {% endfor %}
- </select></td></tr>
- <tr> <td> <b>support</b></td>
- <td>
- <select name="support">
- {% for choice in SUPPORT_CHOICES %}
- {% ifequal host.support choice.0 %}
- <option value="{{ choice.0 }}" selected="selected" >{{ choice.1 }}
- {% else %}
- <option value="{{ choice.0 }}">{{ choice.1 }}
- {% endifequal %}
- {% endfor %}
- </select></td></tr>
- <tr> <td> <b>csi</b></td>
- <td> <input name="csi" type="text" value="{{ host.csi }}" ></td></tr>
- <tr> <td> <b>printq</b></td>
- <td> <input name="printq" type="text" value="{{ host.printq }}" ></td></tr>
- <tr> <td> <b>outbound_smtp</b></td>
- <td>
- {% if host.outbound_smtp %}
- <input type="checkbox" name="outbound_smtp" checked="checked" ></td></tr>
- {% else %}
- <input type="checkbox" name="outbound_smtp" ></td></tr>
- {% endif %}
- <tr> <td> <b>primary_user</b></td>
- <td> <input name="primary_user" type="text" size="32" value="{{ host.primary_user }}"> (email address)</td></tr>
- <tr> <td> <b>administrator</b></td>
- <td> <input name="administrator" type="text" size="32" value="{{ host.administrator }}"> (email address)</td></tr>
- <tr> <td> <b>location</b></td>
- <td> <input name="location" type="text" value="{{ host.location }}"></td></tr>
- <tr> <td> <b>expiration_date</b></td>
- <td> <input name="expiration_date" type="text" size="10" value="{{ host.expiration_date }}">YYYY-MM-DD</td></tr>
- <tr> <td><br><b>Interface</b></td><td><br>
- {% for choice in TYPE_CHOICES %}
- <input type="radio" name="hdwr_type_new" value="{{ choice.0 }}" >{{ choice.1 }}
- {% endfor %}
- </td></tr>
- <tr> <td> <b>dhcp</b></td>
- <td>
- <input type="checkbox" name="dhcp_new"></td></tr>
- <tr> <td> <b>mac_addr</b></td>
- <td> <input name="mac_addr_new" type="text"></td></tr>
- <tr> <td> <b>ip_addr</b></td>
- <td> <input name="ip_addr_new" type="text"></td></tr>
- <tr> <td><br><b>Interface</b></td><td><br>
- {% for choice in TYPE_CHOICES %}
- <input type="radio" name="hdwr_type_new2" value="{{ choice.0 }}" >{{ choice.1 }}
- {% endfor %}
- </td></tr>
- <tr> <td> <b>dhcp</b></td>
- <td>
- <input type="checkbox" name="dhcp_new2"></td></tr>
- <tr> <td> <b>mac_addr</b></td>
- <td> <input name="mac_addr_new2" type="text"></td></tr>
- <tr> <td> <b>ip_addr</b></td>
- <td> <input name="ip_addr_new2" type="text"></td></tr>
- <tr> <td> <b>comments</b></td>
- <td> <textarea rows="10" cols="50" name="comments"></textarea></td></tr>
-</table>
-<br>
-<p><input type="submit" value="Submit">
-</form>
-
-{% endblock %}
-
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/dns.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/dns.html
deleted file mode 100644
index da179e5a1..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/dns.html
+++ /dev/null
@@ -1,40 +0,0 @@
-{% extends "base.html" %}
-
-{% block pagebanner %}
- <div class="header">
- <h2>dns info for {{ host.hostname }}</h2>
- </div>
- <br/>
-{% endblock %}
-
-{% block sidebar %}
-{% include "navbar.tmpl" %}
-<ul class="sidebar">
- <li><a href="/hostbase/{{ host.id }}/" class="sidebar">host info</a></li>
- <li><a href="/hostbase/{{ host.id }}/edit/" class="sidebar">edit host info</a></li>
- <li><a href="edit/" class="sidebar">edit dns info</a></li>
- <li><a href="/hostbase/{{ host.id }}/logs/" class="sidebar">change logs</a></li>
-</ul>
-{% include "logout.tmpl" %}
-{% endblock %}
-
-{% block content %}
-
-{% for interface in host.interface_set.all %}
- {% for ip in interface.ip_set.all %}
- <ul><li> <b>ip_addr:</b> {{ ip.ip_addr }}</li>
- {% for name in ip.name_set.all %}
- <ul> <li><b>name:</b> {{ name.name }}</li> <ul>
- {% for cname in name.cname_set.all %}
- <li> <b>cname:</b> {{ cname.cname }}</li>
- {% endfor %}
- {% for mx in name.mxs.all %}
- <li> <b>mx:</b> {{ mx.priority }} {{ mx.mx }}</li>
- {% endfor %}
- </ul></ul>
- {% endfor %}
- </ul>
- {% endfor %}
-{% endfor %}
-{% endblock %}
-
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/dnsedit.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/dnsedit.html
deleted file mode 100644
index b1b71ab67..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/dnsedit.html
+++ /dev/null
@@ -1,98 +0,0 @@
-{% extends "base.html" %}
-
-{% block pagebanner %}
- <div class="header">
- <h2>dns info for {{ host.hostname }}</h2>
- </div>
- <br/>
-{% endblock %}
-
-{% block sidebar %}
-{% include "navbar.tmpl" %}
-<ul class="sidebar">
- <li><a href="/hostbase/{{ host.id }}/" class="sidebar">host info</a></li>
- <li><a href="/hostbase/{{ host.id }}/edit/" class="sidebar">edit host info</a></li>
- <li><a href="/hostbase/{{ host.id }}/dns/" class="sidebar">see dns info</a></li>
- <li><a href="/hostbase/{{ host.id }}/logs/" class="sidebar">change logs</a></li>
-</ul>
-{% include "logout.tmpl" %}
-{% endblock %}
-
-{% block content %}
-
-<form name="dns" action="?sub=true" method="post">
-<input type="hidden" name="host" value="{{ host.id }}">
-<table border="0" width="100%">
- <colgroup>
- <col width="150">
- <col width="*">
- {% for interface in interfaces %}
- <tr><td><br></td></tr>
- <tr> <td> <b>interface type</b> </td>
- <td> {{ interface.hdwr_type }} </td></tr>
- <tr> <td> <b>mac_addr</b> </td>
- <td> {{ interface.mac_addr }} </td></tr>
- <tr><td><hr></td><td><hr></td></tr>
- {% for ip in info %}
- {% ifequal ip.0.interface interface %}
- <tr> <td> <b>ip_addr</b></td>
- <td>{{ ip.0.ip_addr }}</td></tr>
- {% for name in ip.1 %}
- <tr> <td><b>name(dns)</b></td>
- <td> <input name="name{{ name.id }}" type="text" value="{{ name.name }}">
- <select name="dns_view{{ name.id }}">
- {% for choice in DNS_CHOICES %}
- {% ifequal name.dns_view choice.0 %}
- <option value="{{ choice.0 }}" selected="selected">{{ choice.1 }}
- {% else %}
- <option value="{{ choice.0 }}">{{ choice.1 }}
- {% endifequal %}
- {% endfor %}
- </select>
- <a style="font-size:75%" href="/hostbase/{{ host.id }}/name/{{ name.id }}/confirm">remove</a></td></tr>
- {% for cname in cnames %}
- {% ifequal name cname.name %}
- <tr> <td> <b>cname</b></td>
- <td> <input name="cname{{ cname.id }}" type="text" value="{{ cname.cname }}">
- <a style="font-size:75%" href="/hostbase/{{ host.id }}/cname/{{ cname.id }}/confirm">remove</a></td></tr>
- {% endifequal %}
- {% endfor %}
- <tr> <td> <b>cname</b></td>
- <td> <input name="{{ name.id }}cname" type="text"></td></tr>
- {% for mx in mxs %}
- {% ifequal mx.0 name.id %}
- {% for record in mx.1 %}
- <tr> <td> <b>mx</b></td>
- <td> <input name="priority{{ record.id }}" type="text" size="6" value="{{ record.priority }}">
- <input name="mx{{ record.id }}" type="text" value="{{ record.mx }}">
- <a style="font-size:75%" href="/hostbase/{{ host.id }}/mx/{{ record.id }}/{{ name.id }}/confirm">remove</a></td></tr>
- {% endfor %}
- {% endifequal %}
- {% endfor %}
- <tr> <td> <b>mx</b></td>
- <td> <input name="{{ name.id }}priority" type="text" size="6">
- <input name="{{ name.id }}mx" type="text"></td></tr>
- {% endfor %}
- <tr> <td> <b>name</b></td>
- <td> <input name="{{ ip.0.ip_addr }}name" type="text">
- <select name="{{ ip.0.ip_addr }}dns_view">
- {% for choice in DNS_CHOICES %}
- <option value="{{ choice.0 }}">{{ choice.1 }}
- {% endfor %}
- </select></td></tr>
- <tr> <td> <b>cname</b></td>
- <td> <input name="{{ ip.0.ip_addr }}cname" type="text"></td></tr>
- <tr> <td> <b>mx</b></td>
- <td> <input name="{{ ip.0.ip_addr }}priority" type="text" size="6">
- <input name="{{ ip.0.ip_addr }}mx" type="text"></td></tr>
- <tr><td></td></tr>
- <tr><td><hr></td><td><hr></td></tr>
- {% endifequal %}
- {% endfor %}
- {% endfor %}
- </table>
-
-<p><input type="submit" value="Submit">
-</form>
-
-{% endblock %}
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/edit.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/edit.html
deleted file mode 100644
index 961c9d143..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/edit.html
+++ /dev/null
@@ -1,191 +0,0 @@
-{% extends "base.html" %}
-
-{% block pagebanner %}
- <div class="header">
- <h2>{{ host.hostname }}</h2>
- </div>
- <br/>
-{% endblock %}
-
-{% block sidebar %}
-{% include "navbar.tmpl" %}
-<ul class="sidebar">
-<li><a href="/hostbase/{{ host.id }}/" class="sidebar">host info</a></li>
-<li><a href="/hostbase/{{ host.id }}/dns/" class="sidebar">detailed dns info</a></li>
-<li><a href="/hostbase/{{ host.id }}/dns/edit/" class="sidebar">edit dns info</a></li>
-<li><a href="/hostbase/{{ host.id }}/logs/" class="sidebar">change logs</a></li>
-</ul>
-{% include "logout.tmpl" %}
-{% endblock %}
-
-{% block content %}
-
-<script language="JavaScript" type="text/Javascript">
-function toggleAddr(interface_id){
- if(document.getElementById){
- var style = document.getElementById('ipaddr'+interface_id).style;
- style.display = style.display? "":"block";
- }
-}
-function toggleInter(){
- if(document.getElementById){
- var style = document.getElementById('interface').style;
- style.display = style.display? "":"block";
- }
-}
-</script>
-
-<style type=text/css>
-{% for interface in interfaces %}
-div#ipaddr{{ interface.0.id }}{
- display: none;
-}
-{% endfor %}
-div#interface{
- display: none;
-}
-</style>
-
-<form name="hostdata" action="" method="post">
-<fieldset class="module aligned ()">
-<input type="hidden" name="host" value="{{ host.id }}">
- <label for="id_hostname">hostname:</label>
- <input name="hostname" value="{{ host.hostname }}"><br>
- <label for="id_whatami">whatami:</label>
- <select name="whatami">
- {% for choice in host.WHATAMI_CHOICES %}
- {% ifequal host.whatami choice.0 %}
- <option value="{{ choice.0 }}" selected="selected">{{ choice.1 }}
- {% else %}
- <option value="{{ choice.0 }}">{{ choice.1 }}
- {% endifequal %}
- {% endfor %}
- </select><br>
- <label for="id_netgroup">netgroup:</label>
- <select name="netgroup">
- {% for choice in host.NETGROUP_CHOICES %}
- {% ifequal host.netgroup choice.0 %}
- <option value="{{ choice.0 }}" selected="selected">{{ choice.1 }}
- {% else %}
- <option value="{{ choice.0 }}">{{ choice.1 }}
- {% endifequal %}
- {% endfor %}
- </select><br>
- <label for="id_security_class">class:</label>
- <select name="security_class">
- {% for choice in host.CLASS_CHOICES %}
- {% ifequal host.security_class choice.0 %}
- <option value="{{ choice.0 }}" selected="selected">{{ choice.1 }}
- {% else %}
- <option value="{{ choice.0 }}">{{ choice.1 }}
- {% endifequal %}
- {% endfor %}
- </select><br>
- <label for="id_support">support:</label>
- <select name="support">
- {% for choice in host.SUPPORT_CHOICES %}
- {% ifequal host.support choice.0 %}
- <option value="{{ choice.0 }}" selected="selected">{{ choice.1 }}
- {% else %}
- <option value="{{ choice.0 }}">{{ choice.1 }}
- {% endifequal %}
- {% endfor %}
- </select><br>
- <label for="id_csi">csi:</label>
- <input name="csi" type="text" value="{{ host.csi }}"><br>
- <label for="id_printq">printq:</label>
- <input name="printq" type="text" value="{{ host.printq }}"><br>
- <label for="id_outbound_smtp">outbound_smtp:</label>
- {% if host.outbound_smtp %}
- <input type="checkbox" checked="checked" name="outbound_smtp">
- {% else %}
- <input type="checkbox" name="outbound_smtp">
- {% endif %}<br>
- <label for="id_primary_user">primary_user:</label>
- <input name="primary_user" type="text" size="32" value="{{ host.primary_user }}"><br>
- <label for="id_administrator">administrator:</label>
- <input name="administrator" type="text" size="32" value="{{ host.administrator }}"><br>
- <label for="id_location">location:</label>
- <input name="location" type="text" value="{{ host.location }}"><br>
- <label for="id_expiration_date">expiration_date:</label>
- <input name="expiration_date" type="text" value="{{ host.expiration_date }}"> YYYY-MM-DD<br>
- {% for interface in interfaces %}
- <label for="id_interface">Interface:</label>
- <select name="hdwr_type{{ interface.0.id }}">
- {% for choice in interface.0.TYPE_CHOICES %}
- {% ifequal interface.0.hdwr_type choice.0 %}
- <option value="{{ choice.0 }}" selected="selected">{{ choice.1 }}
- {% else %}
- <option value="{{ choice.0 }}">{{ choice.1 }}
- {% endifequal %}
- {% endfor %}
- </select><br>
- <label for="id_dhcp">dhcp:</label>
- {% if interface.0.dhcp %}
- <input type="checkbox" checked="checked" name="dhcp{{ interface.0.id }}">
- {% else %}
- <input type="checkbox" name="dhcp{{ interface.0.id }}">
- {% endif %}<br>
- <label for="id_mac_addr">mac_addr:</label>
- <input name="mac_addr{{ interface.0.id }}" type="text" value="{{ interface.0.mac_addr }}">
- <a style="font-size:75%" href="/hostbase/{{ host.id }}/interface/{{ interface.0.id }}/confirm">remove</a><br>
- {% for ip in interface.1 %}
- <label for="id_ip_addr">ip_addr:</label>
- <input name="ip_addr{{ ip.id }}" type="text" value="{{ ip.ip_addr }}">
- <a style="font-size:75%" href="/hostbase/{{ host.id }}/ip/{{ ip.id }}/confirm">remove</a><br>
- {% endfor %}
-
-<!-- Section for adding a new IP address to an existing interface -->
-<!-- By default, section is hidden -->
- <div id=ipaddr{{ interface.0.id }}>
- <label for="id_ip_addr">ip_addr:</label>
- <input name="{{ interface.0.id }}ip_addr" type="text"><br>
- </div>
- <a style="font-size:75%" href=# onclick="toggleAddr({{ interface.0.id }})">Add a New IP Address</a><br>
- {% endfor %}
-<!-- End section for new IP address -->
-
-<!-- Section for add an entirely new interface to a host -->
-<!-- By default, section is hidden -->
- <div id=interface>
- <label for="id_interface">Interface:</label>
- <select name="hdwr_type_new">
- {% for choice in TYPE_CHOICES %}
- <option value="{{ choice.0 }}">{{ choice.1 }}
- {% endfor %}
- </select><br>
- <label for="id_dhcp">dhcp:</label>
- {% if host.dhcp %}
- <input type="checkbox" checked="checked" name="dhcp_new">
- {% else %}
- <input type="checkbox" name="dhcp_new">
- {% endif %}<br>
- <label for="id_mac_addr">mac_addr:</label>
- <td> <input name="mac_addr_new" type="text"><br>
- <label for="id_ip_addr">ip_addr:</label>
- <td> <input name="ip_addr_new" type="text"><br>
-</div>
-<a style="font-size:75%" href=# onclick="toggleInter()">Add a New Interface</a><br>
-<!-- End new interface section -->
-
-
-<label for="id_comments">comments:</label>
-<textarea rows="10" cols="50" name="comments">{{ host.comments }}</textarea><br>
-<a style="font-size:75%" href="/hostbase/{{ host.id }}/dns/edit">edit detailed DNS information for this host</a>
-<br>
-this host is
-<select name="status">
-{% for choice in host.STATUS_CHOICES %}
-{% ifequal host.status choice.0 %}
-<option value="{{ choice.0 }}" selected="selected">{{ choice.1 }}
-{% else %}
-<option value="{{ choice.0 }}">{{ choice.1 }}
-{% endifequal %}
-{% endfor %}
-</select><br>
-last update on {{ host.last }}<br>
-<input type="submit" value="submit">
-<input type="reset" value="cancel" onclick="history.back()">
-</form>
-
-{% endblock %}
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/errors.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/errors.html
deleted file mode 100644
index e5429b86c..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/errors.html
+++ /dev/null
@@ -1,31 +0,0 @@
-{% extends "base.html" %}
-
-{% block pagebanner %}
- <div class="header">
- <h2>Search Results</h2>
- </div>
- <br/>
-{% endblock %}
-
-{% block sidebar %}
-{% include "navbar.tmpl" %}
-{% include "logout.tmpl" %}
-{% endblock %}
-
-{% block content %}
-
-{% if failures %}
-There were errors in the following fields<br><br>
-{% for failure in failures %}
-
-<font color="#FF0000">{{ failure }}</font><br>
-{% comment %}
-{{ failure.1|join:", " }}
-{% endcomment %}
-
-{% endfor %}
-{% endif %}
-<br>
-Press the back button on your browser and edit those field(s)
-
-{% endblock %}
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/host.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/host.html
deleted file mode 100644
index d6b8873bc..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/host.html
+++ /dev/null
@@ -1,80 +0,0 @@
-{% extends "base.html" %}
-
-{% block pagebanner %}
- <div class="header">
- <h2>{{ host.hostname }}</h2>
- </div>
- <br/>
-{% endblock %}
-
-{% block sidebar %}
-{% include "navbar.tmpl" %}
-<ul class="sidebar">
- <li><a href="dns/" class="sidebar">detailed dns info</a></li>
- <li><a href="edit/" class="sidebar">edit host info</a></li>
- <li><a href="dns/edit/" class="sidebar">edit dns info</a></li>
- <li><a href="logs/" class="sidebar">change logs</a></li>
-</ul>
-{% include "logout.tmpl" %}
-{% endblock %}
-
-{% block content %}
-
-<table border="0" width="100%">
- <colgroup>
- <col width="150">
- <col width="*">
- <tr> <td> <b>hostname</b></td>
- <td> {{ host.hostname }}</td></tr>
- <tr> <td> <b>whatami</b></td>
- <td> {{ host.whatami }}</td></tr>
- <tr> <td> <b>netgroup</b></td>
- <td> {{ host.netgroup }}</td></tr>
- <tr> <td> <b>class</b></td>
- <td> {{ host.security_class }}</td></tr>
- <tr> <td> <b>support</b></td>
- <td> {{ host.support }}</td></tr>
- <tr> <td> <b>csi</b></td>
- <td> {{ host.csi }}</td></tr>
- <tr> <td> <b>printq</b></td>
- <td> {{ host.printq }}</td></tr>
- <tr> <td> <b>outbound_smtp</b></td>
- {% if host.outbound_smtp %}
- <td> y </td></tr>
- {% else %}
- <td> n </td></tr>
- {% endif %}
- <tr> <td> <b>primary_user</b></td>
- <td> {{ host.primary_user }}</td></tr>
- <tr> <td> <b>administrator</b></td>
- <td> {{ host.administrator }}</td></tr>
- <tr> <td> <b>location</b></td>
- <td> {{ host.location }}</td></tr>
- <tr> <td> <b>expiration_date</b></td>
- <td> {{ host.expiration_date }}</td></tr>
- {% for interface in host.inserface_set.all %}
- <tr> <td><br><b>Interface</b></td>
- {% ifnotequal interface.0.hdwr_type 'no' %}
- <td><br>{{ interface.0.hdwr_type }}</td></tr>
- {% endifnotequal %}
- {% if interface.0.dhcp %}
- <tr> <td> <b>mac_addr</b></td>
- <td> {{ interface.0.mac_addr }}</b></td></tr>
- {% endif %}
- {% for ip in interface.1 %}
- <tr> <td> <b>ip_addr</b></td>
- <td> {{ ip.ip_addr }}</td></tr>
- {% endfor %}
- {% endfor %}
- <tr> <td valign="top"> <b>comments</b></td>
- <td>
- {{ host.comments|linebreaksbr }}<br>
- </td></tr>
-
-</table>
-<a style="font-size:75%" href="/hostbase/{{ host.id }}/dns/">see detailed DNS information for this host</a>
-<br><br>
-this host is {{ host.status }}<br>
-last update on {{ host.last }}<br>
-
-{% endblock %}
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/hostbase/host_confirm_delete.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/hostbase/host_confirm_delete.html
deleted file mode 100644
index b5d794b50..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/hostbase/host_confirm_delete.html
+++ /dev/null
@@ -1,89 +0,0 @@
-{% extends "base.html" %}
-
-{% block pagebanner %}
- <div class="header">
- <h2>Are you sure you want to remove {{ object.hostname }}?</h2>
- </div>
- <br/>
-{% endblock %}
-
-{% block sidebar %}
-{% include "navbar.tmpl" %}
-<ul class="sidebar">
- <li><a href="dns/" class="sidebar">detailed dns info</a></li>
- <li><a href="edit/" class="sidebar">edit host info</a></li>
- <li><a href="dns/edit/" class="sidebar">edit dns info</a></li>
-</ul>
-{% include "logout.tmpl" %}
-{% endblock %}
-
-{% block content %}
-
-<table border="0" width="100%">
- <colgroup>
- <col width="150">
- <col width="*">
- <tr> <td> <b>hostname</b></td>
- <td> {{ object.hostname }}</td></tr>
- <tr> <td> <b>whatami</b></td>
- <td> {{ object.whatami }}</td></tr>
- <tr> <td> <b>netgroup</b></td>
- <td> {{ object.netgroup }}</td></tr>
- <tr> <td> <b>class</b></td>
- <td> {{ object.security_class }}</td></tr>
- <tr> <td> <b>support</b></td>
- <td> {{ object.support }}</td></tr>
- <tr> <td> <b>csi</b></td>
- <td> {{ object.csi }}</td></tr>
- <tr> <td> <b>printq</b></td>
- <td> {{ object.printq }}</td></tr>
- <tr> <td> <b>dhcp</b></td>
- {% if host.dhcp %}
- <td> y </td></tr>
- {% else %}
- <td> n </td></tr>
- {% endif %}
- <tr> <td> <b>outbound_smtp</b></td>
- {% if host.outbound_smtp %}
- <td> y </td></tr>
- {% else %}
- <td> n </td></tr>
- {% endif %}
- <tr> <td> <b>primary_user</b></td>
- <td> {{ object.primary_user }}</td></tr>
- <tr> <td> <b>administrator</b></td>
- <td> {{ object.administrator }}</td></tr>
- <tr> <td> <b>location</b></td>
- <td> {{ object.location }}</td></tr>
- <tr> <td> <b>expiration_date</b></td>
- <td> {{ object.expiration_date }}</td></tr>
- {% for interface in interfaces %}
- <tr> <td><br><b>Interface</b></td>
- {% ifnotequal interface.0.hdwr_type 'no' %}
- <td><br>{{ interface.0.hdwr_type }}</td></tr>
- {% endifnotequal %}
- <tr> <td> <b>mac_addr</b></td>
- <td> {{ interface.0.mac_addr }}</b></td></tr>
- {% for ip in interface.1 %}
- <tr> <td> <b>ip_addr</b></td>
- <td> {{ ip.ip_addr }}</td></tr>
- {% endfor %}
- {% endfor %}
- <tr> <td valign="top"> <b>comments</b></td>
- <td>
- {{ object.comments|linebreaksbr }}<br>
- </td></tr>
-
-</table>
-<a style="font-size:75%" href="/hostbase/{{ object.id }}/dns/">see detailed DNS information for this host</a>
-<br><br>
-this host is {{ object.status }}<br>
-last update on {{ object.last }}<br>
-
-<form name="input" action="remove.html?sub=true" method="post">
-<input type="submit" value="remove">
-<input type="reset" value="cancel" onclick="history.back()">
-</form>
-
-{% endblock %}
-
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/hostbase/log_detail.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/hostbase/log_detail.html
deleted file mode 100644
index aa9679cbd..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/hostbase/log_detail.html
+++ /dev/null
@@ -1,23 +0,0 @@
-{% extends "base.html" %}
-
-{% block pagebanner %}
- <div class="header">
- <h2>Change Logs for {{ object.hostname }}</h2>
- </div>
- <br/>
-{% endblock %}
-
-{% block sidebar %}
-{% include "navbar.tmpl" %}
-{% include "logout.tmpl" %}
-{% endblock %}
-
-{% block content %}
-
-<ul>
-<li><b>Hostname:</b>{{ object.hostname }}</li>
-<li><b>Date:</b>{{ object.date }}</li>
-<li><b>Log:</b>{{ object.log }}</li>
-</ul>
-
-{% endblock %}
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/index.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/index.html
deleted file mode 100644
index 92258b648..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/index.html
+++ /dev/null
@@ -1,16 +0,0 @@
-{% extends "base.html" %}
-{% block pagebanner %}
- <div class="header">
- <h2>Welcome to Hostbase!</h2>
- <p>Hostbase is a web based management tools for Bcfg2 Hosts</p>
- </div>
- <br/>
-{% endblock %}
-{% block sidebar %}
-<a href="/login/" class="sidebar">login to hostbase</a><br>
-<a href="/hostbase/" class="sidebar">search for hosts</a><br>
-<a href="hostbase/zones/" class="sidebar">zone file information</a>
-{% endblock %}
-{% block content %}
-{% endblock %}
-
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/login.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/login.html
deleted file mode 100644
index ec24a0fc0..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/login.html
+++ /dev/null
@@ -1,37 +0,0 @@
-{% extends "base.html" %}
-{% block pagebanner %}
- <div class="header">
- <h2>Login to Hostbase!</h2>
- <p>You must login to manage hosts</p>
- </div>
- <br/>
-{% endblock %}
-{% block sidebar %}
-<a href="/hostbase/" class="sidebar">search for hosts</a><br>
-<a href="/hostbase/new" class="sidebar">add a new host</a><br>
-<a href="hostbase/zones/" class="sidebar">zone file information</a>
-{% endblock %}
-{% block content %}
- {% if form.has_errors %}
- {{ form.username.errors|join:", " }}
- <p>Login Failed.</p>
- {% endif %}
- {% if user.is_authenticated %}
- <p>Welcome, {{ user.username }}. Thanks for logging in.</p>
- {% else %}
- <p>Welcome, user. Please log in.</p>
- <form name="input" action="." method="post">
- <input name="username" type="text">
- <br />
- <input name="password" type="password">
- <br />
- <input type="submit" value="Login">
- {% if next %}
- <input type="hidden" name="next" value="{{ next }}" />
- {% else %}
- <input type="hidden" name="next" value="/hostbase/" />
- {% endif %}
-
- </form>
- {% endif %}
-{% endblock %}
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/logout.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/logout.html
deleted file mode 100644
index 994f631a8..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/logout.html
+++ /dev/null
@@ -1,13 +0,0 @@
-{% extends "base.html" %}
-{% block pagebanner %}
- <div class="header">
- <h2>You are logged out of Hostbase!</h2>
- </div>
- <br/>
-{% endblock %}
-{% block sidebar %}
-<a href="/login/" class="sidebar">Login to Hostbase</a>
-{% endblock %}
-{% block content %}
-{% endblock %}
-
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/logout.tmpl b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/logout.tmpl
deleted file mode 100644
index e71e90e76..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/logout.tmpl
+++ /dev/null
@@ -1,6 +0,0 @@
-<p>
-{% if logged_in %}
-<a href="/logout/" class="sidebar">logout</a>
-{% else %}
-<a href="/login/" class="sidebar">login</a>
-{% endif %}
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/logviewer.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/logviewer.html
deleted file mode 100644
index 806ccd63d..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/logviewer.html
+++ /dev/null
@@ -1,27 +0,0 @@
-{% extends "base.html" %}
-
-{% block pagebanner %}
- <div class="header">
- <h2>Change Logs for {{ hostname }}</h2>
- </div>
- <br/>
-{% endblock %}
-
-{% block sidebar %}
-{% include "navbar.tmpl" %}
-{% include "logout.tmpl" %}
-{% endblock %}
-
-{% block content %}
-
-{% if host.get_logs %}
-<ul>
-{% for log in host.get_logs %}
-<li><a href="{{ log.id }}/">{{ log.date }}</li>
-{% endfor %}
-</ul>
-{% else %}
-There are no logs for this host<br>
-{% endif %}
-
-{% endblock %}
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/navbar.tmpl b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/navbar.tmpl
deleted file mode 100644
index 877d427d0..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/navbar.tmpl
+++ /dev/null
@@ -1,5 +0,0 @@
-<a href="/hostbase/" class="sidebar">host search</a><br>
-<a href="/hostbase/new" class="sidebar">add a new host</a><br>
-<a href="/hostbase/zones" class="sidebar">zone file information</a><br>
-<a href="/hostbase/zones/new" class="sidebar">add a zone</a><br>
-
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/new.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/new.html
deleted file mode 100644
index 2dcd6271f..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/new.html
+++ /dev/null
@@ -1,102 +0,0 @@
-{% extends "base.html" %}
-
-{% block pagebanner %}
- <div class="header">
- <h2>new host information</h2>
- </div>
- <br/>
-{% endblock %}
-
-{% block sidebar %}
-<a href="/hostbase/" class="sidebar">search hostbase</a>
-{% include "logout.tmpl" %}
-{% endblock %}
-
-{% block content %}
-
-<form name="hostdata" action="?sub=true" method="post">
-<input type="hidden" name="host">
-<table border="0" width="100%">
- <colgroup>
- <col width="150">
- <col width="*">
- <tr> <td> <b>hostname</b></td>
- <td> <input name="hostname" type="text" ></td></tr>
- <tr> <td> <b>whatami</b></td>
- <td>
- <select name="whatami">
- {% for choice in WHATAMI_CHOICES %}
- <option value="{{ choice.0 }}">{{ choice.1 }}
- {% endfor %}
- </select>
- </td></tr>
- <tr> <td> <b>netgroup</b></td>
- <td>
- <select name="netgroup">
- {% for choice in NETGROUP_CHOICES %}
- <option value="{{ choice.0 }}">{{ choice.1 }}
- {% endfor %}
- </select>
- </td></tr>
- <tr> <td> <b>class</b></td>
- <td>
- <select name="security_class">
- {% for choice in CLASS_CHOICES %}
- <option value="{{ choice.0 }}">{{ choice.1 }}
- {% endfor %}
- </select></td></tr>
- <tr> <td> <b>support</b></td>
- <td>
- <select name="support">
- {% for choice in SUPPORT_CHOICES %}
- <option value="{{ choice.0 }}">{{ choice.1 }}
- {% endfor %}
- </select></td></tr>
- <tr> <td> <b>csi</b></td>
- <td> <input name="csi" type="text" ></td></tr>
- <tr> <td> <b>printq</b></td>
- <td> <input name="printq" type="text" ></td></tr>
- <tr> <td> <b>outbound_smtp</b></td>
- <td>
- <input type="checkbox" name="outbound_smtp"></td></tr>
- <tr> <td> <b>primary_user</b></td>
- <td> <input name="primary_user" type="text" size="32" > (email address)</td></tr>
- <tr> <td> <b>administrator</b></td>
- <td> <input name="administrator" type="text" size="32" > (email address)</td></tr>
- <tr> <td> <b>location</b></td>
- <td> <input name="location" type="text" ></td></tr>
- <tr> <td> <b>expiration_date</b></td>
- <td> <input name="expiration_date" type="text" size="10" >YYYY-MM-DD</td></tr>
- <tr> <td><br><b>Interface</b></td><td><br>
- {% for choice in TYPE_CHOICES %}
- <input type="radio" name="hdwr_type_new" value="{{ choice.0 }}" >{{ choice.1 }}
- {% endfor %}
- </td></tr>
- <tr> <td> <b>dhcp</b></td>
- <td>
- <input type="checkbox" name="dhcp_new"></td></tr>
- <tr> <td> <b>mac_addr</b></td>
- <td> <input name="mac_addr_new" type="text"></td></tr>
- <tr> <td> <b>ip_addr</b></td>
- <td> <input name="ip_addr_new" type="text"></td></tr>
- <tr> <td><br><b>Interface</b></td><td><br>
- {% for choice in TYPE_CHOICES %}
- <input type="radio" name="hdwr_type_new2" value="{{ choice.0 }}" >{{ choice.1 }}
- {% endfor %}
- </td></tr>
- <tr> <td> <b>dhcp</b></td>
- <td>
- <input type="checkbox" name="dhcp_new2"></td></tr>
- <tr> <td> <b>mac_addr</b></td>
- <td> <input name="mac_addr_new2" type="text"></td></tr>
- <tr> <td> <b>ip_addr</b></td>
- <td> <input name="ip_addr_new2" type="text"></td></tr>
- <tr> <td> <b>comments</b></td>
- <td> <textarea rows="10" cols="50" name="comments"></textarea></td></tr>
-</table>
-<br>
-<p><input type="submit" value="Submit">
-</form>
-
-{% endblock %}
-
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/remove.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/remove.html
deleted file mode 100644
index 4329200dd..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/remove.html
+++ /dev/null
@@ -1,89 +0,0 @@
-{% extends "base.html" %}
-
-{% block pagebanner %}
- <div class="header">
- <h2>Are you sure you want to remove {{ host.hostname }}?</h2>
- </div>
- <br/>
-{% endblock %}
-
-{% block sidebar %}
-{% include "navbar.tmpl" %}
-<ul class="sidebar">
- <li><a href="dns/" class="sidebar">detailed dns info</a></li>
- <li><a href="edit/" class="sidebar">edit host info</a></li>
- <li><a href="dns/edit/" class="sidebar">edit dns info</a></li>
-</ul>
-{% include "logout.tmpl" %}
-{% endblock %}
-
-{% block content %}
-
-<table border="0" width="100%">
- <colgroup>
- <col width="150">
- <col width="*">
- <tr> <td> <b>hostname</b></td>
- <td> {{ host.hostname }}</td></tr>
- <tr> <td> <b>whatami</b></td>
- <td> {{ host.whatami }}</td></tr>
- <tr> <td> <b>netgroup</b></td>
- <td> {{ host.netgroup }}</td></tr>
- <tr> <td> <b>class</b></td>
- <td> {{ host.security_class }}</td></tr>
- <tr> <td> <b>support</b></td>
- <td> {{ host.support }}</td></tr>
- <tr> <td> <b>csi</b></td>
- <td> {{ host.csi }}</td></tr>
- <tr> <td> <b>printq</b></td>
- <td> {{ host.printq }}</td></tr>
- <tr> <td> <b>dhcp</b></td>
- {% if host.dhcp %}
- <td> y </td></tr>
- {% else %}
- <td> n </td></tr>
- {% endif %}
- <tr> <td> <b>outbound_smtp</b></td>
- {% if host.outbound_smtp %}
- <td> y </td></tr>
- {% else %}
- <td> n </td></tr>
- {% endif %}
- <tr> <td> <b>primary_user</b></td>
- <td> {{ host.primary_user }}</td></tr>
- <tr> <td> <b>administrator</b></td>
- <td> {{ host.administrator }}</td></tr>
- <tr> <td> <b>location</b></td>
- <td> {{ host.location }}</td></tr>
- <tr> <td> <b>expiration_date</b></td>
- <td> {{ host.expiration_date }}</td></tr>
- {% for interface in interfaces %}
- <tr> <td><br><b>Interface</b></td>
- {% ifnotequal interface.0.hdwr_type 'no' %}
- <td><br>{{ interface.0.hdwr_type }}</td></tr>
- {% endifnotequal %}
- <tr> <td> <b>mac_addr</b></td>
- <td> {{ interface.0.mac_addr }}</b></td></tr>
- {% for ip in interface.1 %}
- <tr> <td> <b>ip_addr</b></td>
- <td> {{ ip.ip_addr }}</td></tr>
- {% endfor %}
- {% endfor %}
- <tr> <td valign="top"> <b>comments</b></td>
- <td>
- {{ host.comments|linebreaksbr }}<br>
- </td></tr>
-
-</table>
-<a style="font-size:75%" href="/hostbase/{{ host.id }}/dns/">see detailed DNS information for this host</a>
-<br><br>
-this host is {{ host.status }}<br>
-last update on {{ host.last }}<br>
-
-<form name="input" action="remove.html?sub=true" method="post">
-<input type="submit" value="remove">
-<input type="reset" value="cancel" onclick="history.back()">
-</form>
-
-{% endblock %}
-
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/results.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/results.html
deleted file mode 100644
index 45b22058d..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/results.html
+++ /dev/null
@@ -1,45 +0,0 @@
-{% extends "base.html" %}
-
-{% block pagebanner %}
- <div class="header">
- <h2>Search Results</h2>
- </div>
- <br/>
-{% endblock %}
-
-{% block sidebar %}
-{% include "navbar.tmpl" %}
-{% include "logout.tmpl" %}
-{% endblock %}
-
-{% block content %}
-
-{% if hosts %}
-<table border="0" width="100%">
- <colgroup>
- <col width="200">
- <col width="75">
- <col width="50">
- <col width="50">
- <col width="50">
- <col width="*">
- <tr> <td><b>hostname</b></td>
- <td> <b>status</b> </td>
- </tr>
- {% for host in hosts %}
- <tr> <td>{{ host.0 }}</td>
- <td> {{ host.2 }} </td>
- <td> <a href="{{ host.1 }}">view</a> </td>
- <td> <a href="{{ host.1 }}/edit">edit</a> </td>
- <td> <a href="{{ host.1 }}/copy">copy</a> </td>
- <td> <a href="{{ host.1 }}/logs">logs</a> </td>
-<!-- <td> <a href="{{ host.1 }}/remove">remove</a> </td> -->
- </tr>
- {% endfor %}
-</table>
-{% else %}
-No hosts matched your query<br>
-Click the back button on your browser to edit your search
-{% endif %}
-
-{% endblock %}
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/search.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/search.html
deleted file mode 100644
index 409d418fe..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/search.html
+++ /dev/null
@@ -1,57 +0,0 @@
-{% extends "base.html" %}
-
-{% block pagebanner %}
- <div class="header">
- <h2>Welcome to Hostbase!</h2>
- <p>search for hosts using one or more of the fields below
- </div>
- <br/>
-{% endblock %}
-
-{% block sidebar %}
-<a href="/hostbase/new" class="sidebar">add a new host</a><br>
-<a href="/hostbase/zones" class="sidebar">zone file information</a><br>
-{% include "logout.tmpl" %}
-{% endblock %}
-
-{% block content %}
-{% comment %}
- ...or go to <a href="hostinfo">this</a>
- page to enter hostinfo-like queries<br><br>
-{% endcomment %}
-
-<form name="input" action="?sub=true" method="post">
- <fieldset class="module aligned ()">
- <label for="hostname">hostname:</label><input name="hostname" type="text" ><br>
- <label for="netgroup">netgroup:</label><input name="netgroup" type="text" ><br>
- <label for="security_class">class:</label><input name="security_class" type="text" ><br>
- <label for="support">support:</label><input name="support" type="text" ><br>
- <label for="csi">csi:</label><input name="csi" type="text" ><br>
- <label for="printq">printq:</label><input name="printq" type="text" ><br>
- <label for="outbound_smtp">outbound_smtp:</label>
- {% for choice in yesno %}
- <input type="radio" name="outbound_smtp" value="{{ choice.0 }}" >{{ choice.1 }}
- {% endfor %}<br>
- <label for="primary_user">primary_user:</label><input name="primary_user" type="text" ><br>
- <label for="administrator">administrator:</label><input name="administrator" type="text" ><br>
- <label for="location">location:</label><input name="location" type="text" ><br>
- <label for="expiration_date">expiration_date:</label><input name="expiration_date" type="text" ><br>
- <br><label for="Interface">Interface:</label>
- {% for choice in TYPE_CHOICES %}
- <input type="radio" name="hdwr_type" value="{{ choice.0 }}" >{{ choice.1 }}
- {% endfor %}<br>
- <label for="dhcp">dhcp:</label>
- {% for choice in yesno %}
- <input type="radio" name="dhcp" value="{{ choice.0 }}" >{{ choice.1 }}
- {% endfor %}<br>
- <label for="mac_addr">mac_addr:</label><input name="mac_addr" type="text" ><br>
- <label for="ip_addr">ip_addr:</label><input name="ip_addr" type="text" ><br>
- <label for="dns_view">dns_viewer:</label>
- {% for choice in DNS_CHOICES %}
- <input type="radio" name="dns_view" value="{{ choice.0 }}" >{{ choice.1 }}
- {% endfor %}<br>
- <label for="mx">mx:</label><input name="mx" type="text" ><br>
-<p>
-<input type="submit" value="Search">
-</form>
-{% endblock %}
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zoneedit.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zoneedit.html
deleted file mode 100644
index ee355ee87..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zoneedit.html
+++ /dev/null
@@ -1,81 +0,0 @@
-{% extends "base.html" %}
-
-{% block pagebanner %}
- <div class="header">
- <h2>Zones</h2>
- <p>Edit information for {{ zone }}
- </div>
- <br/>
-{% endblock %}
-
-{% block sidebar %}
-{% include "navbar.tmpl" %}
-<ul>
-<li><a href="/hostbase/zones/{{ zone_id }}/" class="sidebar">view zone</a><br>
-</li>
-</ul>
-{% include "logout.tmpl" %}
-{% endblock %}
-
-{% block content %}
-
-<script language="JavaScript" type="text/Javascript">
-function toggleField(fieldname){
- if(document.getElementById){
- var style = document.getElementById(fieldname).style;
- style.display = style.display? "":"block";
- }
-}
-</script>
-
-<style type=text/css>
-div#nameserver{
- display: none;
-}
-div#mx{
- display: none;
-}
-div#address{
- display: none;
-}
-</style>
-
-<form name="zonedata" action="" method="post">
- <fieldset class="module aligned ()">
-<label for="id_zone">zone:</label></td> <td>{{ form.zone }}<br>
-<label for="id_admin">admin:</label></td> <td>{{ form.admin }}<br>
-<label for="id_primary_master">primary_master:</label></td> <td>{{ form.primary_master }}<br>
-<label for="id_expire">expire:</label></td> <td>{{ form.expire }}<br>
-<label for="id_retry">retry:</label></td> <td>{{ form.retry }}<br>
-<label for="id_refresh">refresh:</label></td> <td>{{ form.refresh }}<br>
-<label for="id_ttl">ttl:</label></td> <td>{{ form.ttl }}<br>
-{% for ns in nsforms %}
-<label for="id_name">nameserver:</label></td> <td>{{ ns.name }}<br>
-{% endfor %}
-</table>
-<div id=nameserver>
- <label for="id_name">nameserver:</label></td> <td>{{ nsadd.name }}<br>
- <label for="id_name">nameserver:</label></td> <td>{{ nsadd.name }}<br>
-</div>
-<a style="font-size:75%" href=# onclick="toggleField('nameserver')">Add NS records</a><br>
-{% for mx in mxforms %}
-<label for="id_mx">mx:</label></td> <td>{{ mx.priority }} {{ mx.mx }}<br>
-{% endfor %}
-<div id=mx>
- <label for="id_mx">mx:</label></td> <td>{{ mxadd.priority }} {{ mxadd.mx }}<br>
- <label for="id_mx">mx:</label></td> <td>{{ mxadd.priority }} {{ mxadd.mx }}<br>
-</div>
-<a style="font-size:75%" href=# onclick="toggleField('mx')">Add MX records</a><br>
-{% for a in aforms %}
-<label for="id_address">ip address:</label></td> <td>{{ a.ip_addr }}<br>
-{% endfor %}
-<div id=address>
- <label for="id_address">ip address:</label></td> <td>{{ addadd.ip_addr }}<br>
- <label for="id_address">ip address:</label></td> <td>{{ addadd.ip_addr }}<br>
-</div>
-<a style="font-size:75%" href=# onclick="toggleField('address')">Add A records</a><br>
-<label for="id_aux">aux:</label></td> <td>{{ form.aux }}<br>
-<p><input type="submit" value="Submit">
-</form>
-
-{% endblock %}
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zonenew.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zonenew.html
deleted file mode 100644
index b59fa9e3c..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zonenew.html
+++ /dev/null
@@ -1,43 +0,0 @@
-{% extends "base.html" %}
-
-{% block pagebanner %}
- <div class="header">
- <h2>Zones</h2>
- <p>Enter information for a new zone to be generated by Hostbase
- </div>
- <br/>
-{% endblock %}
-
-{% block sidebar %}
-{% include "navbar.tmpl" %}
-{% include "logout.tmpl" %}
-{% endblock %}
-
-{% block content %}
-<form name="zonedata" action="" method="post">
- <fieldset class="module aligned ()">
- {{ form.as_p}}
-<!--
- <label for="id_zone">zone:</label>{{ form.zone }}<br>
- <label for="id_admin">admin:</label>{{ form.admin }}<br>
- <label for="id_primary_master">primary_master:</label>{{ form.primary_master }}<br>
- <label for="id_expire">expire:</label>{{ form.expire }}<br>
- <label for="id_retry">retry:</label>{{ form.retry }}<br>
- <label for="id_refresh">refresh:</label>{{ form.refresh }}<br>
- <label for="id_ttl">ttl:</label>{{ form.ttl }}<br>
- <label for="id_name">nameserver:</label>{{ nsform.name }}<br>
- <label for="id_name">nameserver:</label>{{ nsform.name }}<br>
- <label for="id_name">nameserver:</label>{{ nsform.name }}<br>
- <label for="id_name">nameserver:</label>{{ nsform.name }}<br>
- <label for="id_mx">mx:</label>{{ mxform.priority }} {{ mxform.mx }}<br>
- <label for="id_mx">mx:</label>{{ mxform.priority }} {{ mxform.mx }}<br>
- <label for="id_mx">ip address:</label>{{ aform.ip_addr }}<br>
- <label for="id_mx">ip address:</label>{{ aform.ip_addr }}<br>
- <label for="id_aux">aux:
-(information not generated from Hostbase)</label>{{ form.aux }}<br>
---!>
- <p><input type="submit" value="Submit">
- </fieldset>
-</form>
-{% endblock %}
-
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zones.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zones.html
deleted file mode 100644
index c773e7922..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zones.html
+++ /dev/null
@@ -1,37 +0,0 @@
-{% extends "base.html" %}
-
-{% block pagebanner %}
- <div class="header">
- <h2>Zones</h2>
- <p>Hostbase generates DNS zone files for the following zones.
- </div>
- <br/>
-{% endblock %}
-
-{% block sidebar %}
-{% include "navbar.tmpl" %}
-{% include "logout.tmpl" %}
-{% endblock %}
-
-{% block content %}
-{% if zone_list %}
-<table border="0" width="100%">
- <colgroup>
- <col width="200">
- <col width="75">
- <col width="50">
- <col width="*">
- <tr> <td><b>zone</b></td>
- </tr>
- {% for zone in zone_list|dictsort:"zone" %}
- <tr> <td> {{ zone.zone }}</td>
- <td> <a href="{{ zone.id }}">view</a> </td>
- <td> <a href="{{ zone.id }}/edit">edit</a> </td>
- </tr>
- {% endfor %}
-</table>
-{% else %}
-There is no zone data currently in the database<br>
-{% endif %}
-{% endblock %}
-
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zoneview.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zoneview.html
deleted file mode 100644
index fa12e3ec5..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zoneview.html
+++ /dev/null
@@ -1,71 +0,0 @@
-{% extends "base.html" %}
-
-{% block pagebanner %}
- <div class="header">
- <h2>Zones</h2>
- <p>Hostbase generates DNS zone files for the following zones.
- </div>
- <br/>
-{% endblock %}
-
-{% block sidebar %}
-{% include "navbar.tmpl" %}
-<ul class="sidebar">
-<li><a href="/hostbase/zones/{{ zone.id }}/edit/" class="sidebar">edit zone</a><br>
-</li>
-</ul>
-{% include "logout.tmpl" %}
-{% endblock %}
-
-{% block content %}
-<table border="0" width="100%">
- <colgroup>
- <col width="200">
- <col width="*">
- <tr> <td> <b>zone</b></td>
- <td> {{ zone.zone }}</td></tr>
- <tr> <td> <b>serial</b></td>
- <td> {{ zone.serial }}</td></tr>
- <tr> <td> <b>admin</b></td>
- <td> {{ zone.admin }}</td></tr>
- <tr> <td> <b>primary_master</b></td>
- <td> {{ zone.primary_master }}</td></tr>
- <tr> <td> <b>expire</b></td>
- <td> {{ zone.expire }}</td></tr>
- <tr> <td> <b>retry</b></td>
- <td> {{ zone.retry }}</td></tr>
- <tr> <td> <b>refresh</b></td>
- <td> {{ zone.refresh }}</td></tr>
- <tr> <td> <b>ttl</b></td>
- <td> {{ zone.ttl }}</td></tr>
-
- <tr><td valign="top"> <b>nameservers</b></td>
- <td>
- {% for nameserver in zone.nameservers.all %}
- {{ nameserver.name }}<br>
- {% endfor %}
- </td></tr>
- <tr><td valign="top"> <b>mxs</b></td>
- <td>
- {% for mx in zone.mxs.all %}
- {{ mx.priority }} {{ mx.mx }}<br>
- {% endfor %}
- </td></tr>
- {% if addresses %}
- <tr><td valign="top"> <b>A records</b></td>
- <td>
- {% for address in sof.addresses.all %}
- {{ address.ip_addr }}<br>
- {% endfor %}
- </td></tr>
- {% endif %}
-
- <tr> <td valign="top"> <b>aux</b></td>
- <td>
- {{ zone.aux|linebreaksbr }}
- </td></tr>
-
-</table>
-<br><br>
-{% endblock %}
-
diff --git a/src/lib/Bcfg2/Server/Hostbase/ldapauth.py b/src/lib/Bcfg2/Server/Hostbase/ldapauth.py
deleted file mode 100644
index fc2ca1bf1..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/ldapauth.py
+++ /dev/null
@@ -1,179 +0,0 @@
-"""
-Checks with LDAP (ActiveDirectory) to see if the current user is an LDAP(AD)
-user, and returns a subset of the user's profile that is needed by Argonne/CIS
-to set user level privleges in Django
-"""
-
-import os
-import ldap
-
-
-class LDAPAUTHError(Exception):
- """LDAPAUTHError is raised when somehting goes boom."""
- pass
-
-
-class ldapauth(object):
- group_test = False
- check_member_of = os.environ['LDAP_CHECK_MBR_OF_GRP']
- securitylevel = 0
- distinguishedName = None
- sAMAccountName = None
- telephoneNumber = None
- title = None
- memberOf = None
- department = None # this will be a list
- mail = None
- extensionAttribute1 = None # badgenumber
- badge_no = None
-
- def __init__(self, login, passwd):
- """get username (if using ldap as auth the
- apache env var REMOTE_USER should be used)
- from username get user profile from AD/LDAP
- """
- #p = self.user_profile(login,passwd)
- d = self.user_dn(login) # success, distname
- print(d[1])
- if d[0] == 'success':
- pass
- p = self.user_bind(d[1], passwd)
- if p[0] == 'success':
- #parse results
- parsed = self.parse_results(p[2])
- print(self.department)
- self.group_test = self.member_of()
- securitylevel = self.security_level()
- print("ACCESS LEVEL: " + str(securitylevel))
- else:
- raise LDAPAUTHError(p[2])
- else:
- raise LDAPAUTHError(p[2])
-
- def user_profile(self, login, passwd=None):
- """NOT USED RIGHT NOW"""
- ldap_login = "CN=%s" % login
- svc_acct = os.environ['LDAP_SVC_ACCT_NAME']
- svc_pass = os.environ['LDAP_SVC_ACCT_PASS']
- #svc_acct = 'CN=%s,DC=anl,DC=gov' % login
- #svc_pass = passwd
-
- search_pth = os.environ['LDAP_SEARCH_PTH']
-
- try:
- conn = ldap.initialize(os.environ['LDAP_URI'])
- conn.bind(svc_acct, svc_pass, ldap.AUTH_SIMPLE)
- result_id = conn.search(search_pth,
- ldap.SCOPE_SUBTREE,
- ldap_login,
- None)
- result_type, result_data = conn.result(result_id, 0)
- return ('success', 'User profile found', result_data,)
- except ldap.LDAPError:
- e = sys.exc_info()[1]
- #connection failed
- return ('error', 'LDAP connect failed', e,)
-
- def user_bind(self, distinguishedName, passwd):
- """Binds to LDAP Server"""
- search_pth = os.environ['LDAP_SEARCH_PTH']
- try:
- conn = ldap.initialize(os.environ['LDAP_URI'])
- conn.bind(distinguishedName, passwd, ldap.AUTH_SIMPLE)
- cn = distinguishedName.split(",")
- result_id = conn.search(search_pth,
- ldap.SCOPE_SUBTREE,
- cn[0],
- None)
- result_type, result_data = conn.result(result_id, 0)
- return ('success', 'User profile found', result_data,)
- except ldap.LDAPError:
- e = sys.exc_info()[1]
- #connection failed
- return ('error', 'LDAP connect failed', e,)
-
- def user_dn(self, cn):
- """Uses Service Account to get distinguishedName"""
- ldap_login = "CN=%s" % cn
- svc_acct = os.environ['LDAP_SVC_ACCT_NAME']
- svc_pass = os.environ['LDAP_SVC_ACCT_PASS']
- search_pth = os.environ['LDAP_SEARCH_PTH']
-
- try:
- conn = ldap.initialize(os.environ['LDAP_URI'])
- conn.bind(svc_acct, svc_pass, ldap.AUTH_SIMPLE)
- result_id = conn.search(search_pth,
- ldap.SCOPE_SUBTREE,
- ldap_login,
- None)
- result_type, result_data = conn.result(result_id, 0)
- raw_obj = result_data[0][1]
- distinguishedName = raw_obj['distinguishedName']
- return ('success', distinguishedName[0],)
- except ldap.LDAPError:
- e = sys.exc_info()[1]
- #connection failed
- return ('error', 'LDAP connect failed', e,)
-
- def parse_results(self, user_obj):
- """Clean up the huge ugly object handed to us in the LDAP query"""
- #user_obj is a list formatted like this:
- #[('LDAP_DN',{user_dict},),]
- try:
- raw_obj = user_obj[0][1]
- self.memberOf = raw_obj['memberOf']
- self.sAMAccountName = raw_obj['sAMAccountName'][0]
- self.distinguishedName = raw_obj['distinguishedName'][0]
- self.telephoneNumber = raw_obj['telephoneNumber'][0]
- self.title = raw_obj['title'][0]
- self.department = raw_obj['department'][0]
- self.mail = raw_obj['mail'][0]
- self.badge_no = raw_obj['extensionAttribute1'][0]
- self.email = raw_obj['extensionAttribute2'][0]
- display_name = raw_obj['displayName'][0].split(",")
- self.name_f = raw_obj['givenName'][0]
- self.name_l = display_name[0]
- self.is_staff = False
- self.is_superuser = False
-
- return
- except KeyError:
- e = sys.exc_info()[1]
- raise LDAPAUTHError("Portions of the LDAP User profile not present")
-
- def member_of(self):
- """See if this user is in our group that is allowed to login"""
- m = [g for g in self.memberOf if g == self.check_member_of]
- if len(m) == 1:
- return True
- else:
- return False
-
- def security_level(self):
- level = self.securitylevel
-
- user = os.environ['LDAP_GROUP_USER']
- m = [g for g in self.memberOf if g == user]
- if len(m) == 1:
- if level < 1:
- level = 1
-
- cspr = os.environ['LDAP_GROUP_SECURITY_LOW']
- m = [g for g in self.memberOf if g == cspr]
- if len(m) == 1:
- if level < 2:
- level = 2
-
- cspo = os.environ['LDAP_GROUP_SECURITY_HIGH']
- m = [g for g in self.memberOf if g == cspo]
- if len(m) == 1:
- if level < 3:
- level = 3
-
- admin = os.environ['LDAP_GROUP_ADMIN']
- m = [g for g in self.memberOf if g == admin]
- if len(m) == 1:
- if level < 4:
- level = 4
-
- return level
diff --git a/src/lib/Bcfg2/Server/Hostbase/manage.py b/src/lib/Bcfg2/Server/Hostbase/manage.py
deleted file mode 100755
index 5e78ea979..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/manage.py
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env python
-from django.core.management import execute_manager
-try:
- import settings # Assumed to be in the same directory.
-except ImportError:
- import sys
- sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
- sys.exit(1)
-
-if __name__ == "__main__":
- execute_manager(settings)
diff --git a/src/lib/Bcfg2/Server/Hostbase/media/base.css b/src/lib/Bcfg2/Server/Hostbase/media/base.css
deleted file mode 100644
index ddbf02165..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/media/base.css
+++ /dev/null
@@ -1,5 +0,0 @@
-
-/* Import other styles */
-@import url('global.css');
-@import url('layout.css');
-@import url('boxypastel.css');
diff --git a/src/lib/Bcfg2/Server/Hostbase/media/boxypastel.css b/src/lib/Bcfg2/Server/Hostbase/media/boxypastel.css
deleted file mode 100644
index 7ae0684ef..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/media/boxypastel.css
+++ /dev/null
@@ -1,179 +0,0 @@
-body {
- background-color: #fff;
- color: #000;
- font: 12px 'Lucida Grande', Arial, Helvetica, sans-serif;
- margin-left:0px;
- margin-right:100px;
-}
-/* links */
-a:link {
- color: #00f;
- text-decoration: none;
-}
-a:visited {
- color: #00a;
- text-decoration: none;
-}
-a:hover {
- color: #00a;
- text-decoration: underline;
-}
-a:active {
- color: #00a;
- text-decoration: underline;
-}
-/* divs*/
-div.bad {
- border: 1px solid #660000;
- background: #FF6A6A;
- margin: 10px 0;
- padding: 8px;
- text-align: left;
- margin-left:50px;
- margin-right:50px;
-}
-div.modified {
- border: 1px solid #CC9900;
- background: #FFEC8B;
- margin: 10px 0;
- padding: 8px;
- text-align: left;
- margin-left:50px;
- margin-right:50px;
-}
-div.clean {
- border: 1px solid #006600;
- background: #9AFF9A;
- margin: 10px 0;
- padding: 8px;
- text-align: left;
- margin-left:50px;
- margin-right:50px;
-}
-div.extra {
- border: 1px solid #006600;
- background: #6699CC;
- margin: 10px 0;
- padding: 8px;
- text-align: left;
- margin-left:50px;
- margin-right:50px;
-}
-div.warning {
- border: 1px
- solid #CC3300;
- background: #FF9933;
- margin: 10px 0;
- padding: 8px;
- text-align: left;
- margin-left:50px;
- margin-right:50px;
-}
-div.all-warning {
- border: 1px solid #DD5544;
- background: #FFD9A2;
- margin: 10px 0;
- padding: 8px;
- text-align: left;
- margin-left:50px;
- margin-right:50px;
-}
-div.down {
- border: 1px
- solid #999;
- background-color: #DDD;
- margin: 10px 0;
- padding: 8px;
- text-align: left;
- margin-left:50px;
- margin-right:50px;
-}
-div.items{
- display: none;
-}
-div.nodebox {
- border: 1px solid #c7cfd5;
- background: #f1f5f9;
- margin: 20px 0;
- padding: 8px 8px 16px 8px;
- text-align: left;
- position:relative;
-}
-div.header {
- background-color: #DDD;
- padding: 8px;
- text-indent:50px;
- position:relative;
-}
-
-/*Spans*/
-.nodename {
- font-style: italic;
-}
-.nodelisttitle {
- font-size: 14px;
-}
-
-h2{
- font-size: 16px;
- color: #000;
-}
-
-ul.plain {
- list-style-type:none;
- text-align: left;
-}
-
-.notebox {
- position: absolute;
- top: 0px;
- right: 0px;
- padding: 1px;
- text-indent:0px;
- border: 1px solid #FFF;
- background: #999;
- color: #FFF;
-}
-
-.configbox {
- position: absolute;
- bottom: 0px;
- right: 0px;
- padding: 1px;
- text-indent:0px;
- border: 1px solid #999;
- background: #FFF;
- color: #999;
-}
-
-p.indented{
- text-indent: 50px
-}
-
-/*
- Sortable tables */
-table.sortable a.sortheader {
- background-color:#dfd;
- font-weight: bold;
- text-decoration: none;
- display: block;
-}
-table.sortable {
- padding: 2px 4px 2px 4px;
- border: 1px solid #000000;
- border-spacing: 0px
-}
-td.sortable{
- padding: 2px 8px 2px 8px;
-}
-
-th.sortable{
- background-color:#F3DD91;
- border: 1px solid #FFFFFF;
-}
-tr.tablelist {
- background-color:#EDF3FE;
-}
-tr.tablelist-alt{
- background-color:#FFFFFF;
-}
diff --git a/src/lib/Bcfg2/Server/Hostbase/media/global.css b/src/lib/Bcfg2/Server/Hostbase/media/global.css
deleted file mode 100644
index 73451e1bc..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/media/global.css
+++ /dev/null
@@ -1,8 +0,0 @@
-body {
- margin:0;
- padding:0;
- font-size:12px;
- font-family:"Lucida Grande","Bitstream Vera Sans",Verdana,Arial,sans-serif;
- color:#000;
- background:#fff;
- }
diff --git a/src/lib/Bcfg2/Server/Hostbase/media/layout.css b/src/lib/Bcfg2/Server/Hostbase/media/layout.css
deleted file mode 100644
index 9085cc220..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/media/layout.css
+++ /dev/null
@@ -1,62 +0,0 @@
-/* Page Structure */
-#container { position:absolute; top: 3em; margin-left:1em; margin-right:2em; padding:0; margin-top:1.5em; min-width:
- 650px; }
-#header { width:100%; }
-#content-main { float:left; }
-
-/* HEADER */
-#header {
-background:#000;
-color:#ffc;
-position:absolute;
-}
-#header a:link, #header a:visited { color:white; }
-#header a:hover { text-decoration:underline; }
-#branding h1 { padding:0 10px; font-size:18px; margin:8px 0; font-weight:normal; color:#f4f379; }
-#branding h2 { padding:0 10px; font-size:14px; margin:-8px 0 8px 0; font-weight:normal; color:#ffc; }
-#user-tools { position:absolute; top:0; right:0; padding:1.2em 10px; font-size:11px; text-align:right; }
-
-/*SIDEBAR*/
-#sidebar {
- float:left;
- position: relative;
- width: auto;
- height: 100%;
- margin-top: 3em;
- padding-right: 1.5em;
- padding-left: 1.5em;
- padding-top: 1em;
- padding-bottom:3em;
- background: #000;
- color:ffc;
-}
-
-a.sidebar:link {color: #fff;}
-a.sidebar:active {color: #fff;}
-a.sidebar:visited {color: #fff;}
-a.sidebar:hover {color: #fff;}
-
-ul.sidebar {
- color: #ffc;
- text-decoration: none;
- list-style-type: none;
- text-indent: -1em;
-}
-ul.sidebar-level2 {
- text-indent: -2em;
- list-style-type: none;
- font-size: 11px;
-}
-
-/* ALIGNED FIELDSETS */
-.aligned label { display:block; padding:0 1em 3px 0; float:left; width:8em; }
-.aligned label.inline { display:inline; float:none; }
-.colMS .aligned .vLargeTextField, .colMS .aligned .vXMLLargeTextField { width:350px; }
-form .aligned p, form .aligned ul { margin-left:7em; padding-left:30px; }
-form .aligned table p { margin-left:0; padding-left:0; }
-form .aligned p.help { padding-left:38px; }
-.aligned .vCheckboxLabel { float:none !important; display:inline; padding-left:4px; }
-.colM .aligned .vLargeTextField, colM .aligned .vXMLLargeTextField { width:610px; }
-.checkbox-row p.help { margin-left:0; padding-left:0 !important; }
-
-
diff --git a/src/lib/Bcfg2/Server/Hostbase/nisauth.py b/src/lib/Bcfg2/Server/Hostbase/nisauth.py
deleted file mode 100644
index ae4c6c021..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/nisauth.py
+++ /dev/null
@@ -1,40 +0,0 @@
-"""Checks with NIS to see if the current user is in the support group"""
-import os
-import crypt, nis
-from Bcfg2.Server.Hostbase.settings import AUTHORIZED_GROUP
-
-
-class NISAUTHError(Exception):
- """NISAUTHError is raised when somehting goes boom."""
- pass
-
-class nisauth(object):
- group_test = False
-# check_member_of = os.environ['LDAP_CHECK_MBR_OF_GRP']
- samAcctName = None
- distinguishedName = None
- sAMAccountName = None
- telephoneNumber = None
- title = None
- memberOf = None
- department = None #this will be a list
- mail = None
- extensionAttribute1 = None #badgenumber
- badge_no = None
- uid = None
-
- def __init__(self,login,passwd=None):
- """get user profile from NIS"""
- try:
- p = nis.match(login, 'passwd.byname').split(":")
- except:
- raise NISAUTHError('username')
- # check user password using crypt and 2 character salt from passwd file
- if p[1] == crypt.crypt(passwd, p[1][:2]):
- # check to see if user is in valid support groups
- # will have to include these groups in a settings file eventually
- if not login in nis.match(AUTHORIZED_GROUP, 'group.byname').split(':')[-1].split(',') and p[3] != nis.match(AUTHORIZED_GROUP, 'group.byname').split(':')[2]:
- raise NISAUTHError('group')
- self.uid = p[2]
- else:
- raise NISAUTHError('password')
diff --git a/src/lib/Bcfg2/Server/Hostbase/regex.py b/src/lib/Bcfg2/Server/Hostbase/regex.py
deleted file mode 100644
index 41cc0f6f0..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/regex.py
+++ /dev/null
@@ -1,6 +0,0 @@
-import re
-
-date = re.compile('^[0-9]{4}-[0-9]{2}-[0-9]{2}$')
-host = re.compile('^[a-z0-9-_]+(\.[a-z0-9-_]+)+$')
-macaddr = re.compile('^[0-9abcdefABCDEF]{2}(:[0-9abcdefABCDEF]{2}){5}$|virtual')
-ipaddr = re.compile('^[0-9]{1,3}(\.[0-9]{1,3}){3}$')
diff --git a/src/lib/Bcfg2/Server/Hostbase/settings.py b/src/lib/Bcfg2/Server/Hostbase/settings.py
deleted file mode 100644
index 7660e1bdc..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/settings.py
+++ /dev/null
@@ -1,143 +0,0 @@
-import os.path
-# Compatibility import
-from Bcfg2.Compat import ConfigParser
-
-PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
-
-c = ConfigParser.ConfigParser()
-#This needs to be configurable one day somehow
-c.read(['./bcfg2.conf'])
-
-defaults = {'database_engine':'sqlite3',
- 'database_name':'./dev.db',
- 'database_user':'',
- 'database_password':'',
- 'database_host':'',
- 'database_port':3306,
- 'default_mx':'localhost',
- 'priority':10,
- 'authorized_group':'admins',
- }
-
-if c.has_section('hostbase'):
- options = dict(c.items('hostbase'))
-else:
- options = defaults
-
-# Django settings for Hostbase project.
-DEBUG = True
-TEMPLATE_DEBUG = DEBUG
-ADMINS = (
- ('Root', 'root'),
-)
-MANAGERS = ADMINS
-
-# 'postgresql', 'mysql', 'sqlite3' or 'ado_mssql'.
-DATABASE_ENGINE = options['database_engine']
-# Or path to database file if using sqlite3.
-DATABASE_NAME = options['database_name']
-# Not used with sqlite3.
-DATABASE_USER = options['database_user']
-# Not used with sqlite3.
-DATABASE_PASSWORD = options['database_password']
-# Set to empty string for localhost. Not used with sqlite3.
-DATABASE_HOST = options['database_host']
-# Set to empty string for default. Not used with sqlite3.
-DATABASE_PORT = int(options['database_port'])
-# Local time zone for this installation. All choices can be found here:
-# http://docs.djangoproject.com/en/dev/ref/settings/#time-zone
-try:
- TIME_ZONE = c.get('statistics', 'time_zone')
-except:
- TIME_ZONE = None
-
-# enter the defauly MX record machines will get in Hostbase
-# this setting may move elsewhere eventually
-DEFAULT_MX = options['default_mx']
-PRIORITY = int(options['priority'])
-
-SESSION_EXPIRE_AT_BROWSER_CLOSE = True
-
-# Uncomment a backend below if you would like to use it for authentication
-AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',
- 'Bcfg2.Server.Hostbase.backends.NISBackend',
- #'Bcfg2.Server.Hostbase.backends.LDAPBacken',
- )
-# enter an NIS group name you'd like to give access to edit hostbase records
-AUTHORIZED_GROUP = options['authorized_group']
-
-#create login url area:
-import django.contrib.auth
-django.contrib.auth.LOGIN_URL = '/login'
-# Absolute path to the directory that holds media.
-# Example: "/home/media/media.lawrence.com/"
-MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')
-# Just for development
-SERVE_MEDIA = DEBUG
-
-# Language code for this installation. All choices can be found here:
-# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
-# http://blogs.law.harvard.edu/tech/stories/storyReader$15
-LANGUAGE_CODE = 'en-us'
-SITE_ID = 1
-# URL that handles the media served from MEDIA_ROOT.
-# Example: "http://media.lawrence.com"
-MEDIA_URL = '/site_media/'
-# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
-# trailing slash.
-# Examples: "http://foo.com/media/", "/media/".
-ADMIN_MEDIA_PREFIX = '/media/'
-# Make this unique, and don't share it with anybody.
-SECRET_KEY = '*%=fv=yh9zur&gvt4&*d#84o(cy^-*$ox-v1e9%32pzf2*qu#s'
-# List of callables that know how to import templates from various sources.
-TEMPLATE_LOADERS = (
- 'django.template.loaders.filesystem.load_template_source',
- 'django.template.loaders.app_directories.load_template_source',
-# 'django.template.loaders.eggs.load_template_source',
-)
-
-TEMPLATE_CONTEXT_PROCESSORS = (
- "django.core.context_processors.auth",
- "django.core.context_processors.debug",
- "django.core.context_processors.i18n",
- "django.core.context_processors.request",
- "django.core.context_processors.media",
-# Django development version.
-# "django.core.context_processors.csrf",
-)
-
-
-MIDDLEWARE_CLASSES = (
- 'django.middleware.common.CommonMiddleware',
- 'django.contrib.sessions.middleware.SessionMiddleware',
- 'django.middleware.locale.LocaleMiddleware',
- 'django.contrib.auth.middleware.AuthenticationMiddleware',
- 'django.middleware.doc.XViewMiddleware',
-)
-
-ROOT_URLCONF = 'Bcfg2.Server.Hostbase.urls'
-
-TEMPLATE_DIRS = (
- # Put strings here, like "/home/html/django_templates".
- # Always use forward slashes, even on Windows.
- '/usr/lib/python2.3/site-packages/Bcfg2/Server/Hostbase/hostbase/webtemplates',
- '/usr/lib/python2.4/site-packages/Bcfg2/Server/Hostbase/hostbase/webtemplates',
- '/usr/lib/python2.3/site-packages/Bcfg2/Server/Hostbase/templates',
- '/usr/lib/python2.4/site-packages/Bcfg2/Server/Hostbase/templates',
- '/usr/share/bcfg2/Hostbase/templates',
- os.path.join(PROJECT_ROOT, 'templates'),
- os.path.join(PROJECT_ROOT, 'hostbase/webtemplates'),
-)
-
-INSTALLED_APPS = (
- 'django.contrib.admin',
- 'django.contrib.admindocs',
- 'django.contrib.auth',
- 'django.contrib.contenttypes',
- 'django.contrib.sessions',
- 'django.contrib.sites',
- 'django.contrib.humanize',
- 'Bcfg2.Server.Hostbase.hostbase',
-)
-
-LOGIN_URL = '/login/'
diff --git a/src/lib/Bcfg2/Server/Hostbase/templates/batchadd.tmpl b/src/lib/Bcfg2/Server/Hostbase/templates/batchadd.tmpl
deleted file mode 100644
index 74ea3c047..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/templates/batchadd.tmpl
+++ /dev/null
@@ -1,29 +0,0 @@
-#mx ->
-#priority ->
-
-hostname ->
-whatami ->
-netgroup ->
-security_class ->
-support ->
-csi ->
-printq ->
-dhcp ->
-outbound_smtp ->
-primary_user ->
-administrator ->
-location ->
-expiration_date -> YYYY-MM-DD
-comments ->
-
-mac_addr ->
-hdwr_type ->
-ip_addr ->
-#ip_addr ->
-cname ->
-#cname ->
-
-#mac_addr ->
-#hdwr_type ->
-#ip_addr ->
-#cname ->
diff --git a/src/lib/Bcfg2/Server/Hostbase/templates/dhcpd.conf.head b/src/lib/Bcfg2/Server/Hostbase/templates/dhcpd.conf.head
deleted file mode 100644
index a3d19547e..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/templates/dhcpd.conf.head
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# dhcpd.conf
-#
-# Configuration file for ISC dhcpd
-#
diff --git a/src/lib/Bcfg2/Server/Hostbase/templates/dhcpd.tmpl b/src/lib/Bcfg2/Server/Hostbase/templates/dhcpd.tmpl
deleted file mode 100644
index 757b263cd..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/templates/dhcpd.tmpl
+++ /dev/null
@@ -1,17 +0,0 @@
-#
-# This file is automatically generated.
-# DO NOT EDIT IT BY HAND!
-#
-# This file contains {{ numips }} IP addresses
-# Generated on: {% now "r" %}
-#
-
-{% include "dhcpd.conf.head" %}
-
-# Hosts which require special configuration options can be listed in
-# host statements. If no address is specified, the address will be
-# allocated dynamically (if possible), but the host-specific information
-# will still come from the host declaration.
-
-{% for host in hosts %}host {{ host.0 }} {hardware ethernet {{ host.1 }};fixed-address {{ host.2 }};}
-{% endfor %}
diff --git a/src/lib/Bcfg2/Server/Hostbase/templates/hosts.tmpl b/src/lib/Bcfg2/Server/Hostbase/templates/hosts.tmpl
deleted file mode 100644
index 251cb5a79..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/templates/hosts.tmpl
+++ /dev/null
@@ -1,26 +0,0 @@
-##############################################################################
-# MCS hosts file
-#
-# This file is generated automatically - DO NOT EDIT IT.
-#
-# Generated on: {% now "r" %}
-#
-
-127.0.0.1 localhost.mcs.anl.gov localhost
-
-# This file lists hosts in these domains:
-{% for domain in domain_data %}# {{ domain.0 }}: {{ domain.1 }}
-{% endfor %}
-#
-# This file lists hosts on these networks:
-#
-# Network Hosts
-# ---------------------------------------------------------------------
-{% for octet in two_octets_data %}# {{ octet.0 }} {{octet.1 }}
-{% endfor %}
-#
-{% for octet in three_octets_data %}# {{ octet.0 }} {{ octet.1 }}
-{% endfor %}
-#
-# Total host interfaces (ip addresses) in this file: {{ num_ips }}
-
diff --git a/src/lib/Bcfg2/Server/Hostbase/templates/hostsappend.tmpl b/src/lib/Bcfg2/Server/Hostbase/templates/hostsappend.tmpl
deleted file mode 100644
index 00e0d5d04..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/templates/hostsappend.tmpl
+++ /dev/null
@@ -1,5 +0,0 @@
-##########################################################################
-# Hosts on subnet: {{ subnet.0 }}
-# total hosts: {{ subnet.1 }}
-{% for ip in ips %}{{ ip.0 }} {{ ip.1 }}{% if ip.4 and not ip.3 %} # {{ ip.5 }}{% else %}{% for name in ip.2 %} {{ name }}{% endfor %}{% for cname in ip.3 %} {{ cname }}{% endfor %} # {{ ip.5 }}{% endif %}
-{% endfor %}
diff --git a/src/lib/Bcfg2/Server/Hostbase/templates/named.tmpl b/src/lib/Bcfg2/Server/Hostbase/templates/named.tmpl
deleted file mode 100644
index 03e054198..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/templates/named.tmpl
+++ /dev/null
@@ -1,69 +0,0 @@
-// This is the primary configuration file for the BIND DNS server named.
-//
-// Please read /usr/share/doc/bind9/README.Debian.gz for information on the
-// structure of BIND configuration files in Debian, *BEFORE* you customize
-// this configuration file.
-//
-
-include "/etc/bind/named.conf.options";
-
-include "/etc/bind/rndc.key";
-
-// prime the server with knowledge of the root servers
-zone "." {
- type hint;
- file "/etc/bind/db.root";
-};
-
-// be authoritative for the localhost forward and reverse zones, and for
-// broadcast zones as per RFC 1912
-{% for zone in zones %}
-zone "{{ zone.1 }}" {
- type master;
- file "/etc/bind/hostbase/{{ zone.1 }}";
- notify no;
- also-notify { 140.221.9.6;140.221.8.10; };
-};{% endfor %}
-
-zone "localhost" {
- type master;
- file "/etc/bind/db.local";
-};
-
-zone "127.in-addr.arpa" {
- type master;
- file "/etc/bind/db.127";
-};
-
-zone "0.in-addr.arpa" {
- type master;
- file "/etc/bind/db.0";
-};
-
-zone "255.in-addr.arpa" {
- type master;
- file "/etc/bind/db.255";
-};
-{% for reverse in reverses %}
-zone "{{ reverse.0 }}.in-addr.arpa" {
- type master;
- file "/etc/bind/hostbase/{{ reverse.0 }}.rev";
- notify no;
- also-notify { 140.221.9.6;140.221.8.10; };
-};{% endfor %}
-
-// zone "com" { type delegation-only; };
-// zone "net" { type delegation-only; };
-
-// From the release notes:
-// Because many of our users are uncomfortable receiving undelegated answers
-// from root or top level domains, other than a few for whom that behaviour
-// has been trusted and expected for quite some length of time, we have now
-// introduced the "root-delegations-only" feature which applies delegation-only
-// logic to all top level domains, and to the root domain. An exception list
-// should be specified, including "MUSEUM" and "DE", and any other top level
-// domains from whom undelegated responses are expected and trusted.
-// root-delegation-only exclude { "DE"; "MUSEUM"; };
-
-include "/etc/bind/named.conf.local";
-include "/etc/bind/named.conf.static";
diff --git a/src/lib/Bcfg2/Server/Hostbase/templates/namedviews.tmpl b/src/lib/Bcfg2/Server/Hostbase/templates/namedviews.tmpl
deleted file mode 100644
index 52021620e..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/templates/namedviews.tmpl
+++ /dev/null
@@ -1,92 +0,0 @@
-// This is the primary configuration file for the BIND DNS server named.
-//
-// Please read /usr/share/doc/bind9/README.Debian.gz for information on the
-// structure of BIND configuration files in Debian, *BEFORE* you customize
-// this configuration file.
-//
-
-include "/etc/bind/named.conf.options";
-
-include "/etc/bind/rndc.key";
-
-view "internal" {
- match-clients { 140.221.9.6;140.221.8.10;140.221.8.88;140.221.8.15; };
- recursion yes;
- // prime the server with knowledge of the root servers
- zone "." {
- type hint;
- file "/etc/bind/db.root";
- };
- {% for zone in zones %}
- zone "{{ zone.1 }}" {
- type master;
- file "/etc/bind/hostbase/{{ zone.1 }}";
- notify no;
- also-notify { 140.221.9.6;140.221.8.10;140.221.8.88;140.221.8.15; };
- };{% endfor %}
- // be authoritative for the localhost forward and reverse zones, and for
- // broadcast zones as per RFC 1912
-
- zone "localhost" {
- type master;
- file "/etc/bind/db.local";
- };
-
- zone "127.in-addr.arpa" {
- type master;
- file "/etc/bind/db.127";
- };
-
- zone "0.in-addr.arpa" {
- type master;
- file "/etc/bind/db.0";
- };
-
- zone "255.in-addr.arpa" {
- type master;
- file "/etc/bind/db.255";
- };
- {% for reverse in reverses %}
- zone "{{ reverse.0 }}.in-addr.arpa" {
- type master;
- file "/etc/bind/hostbase/{{ reverse.0 }}.rev";
- notify no;
- also-notify { 140.221.9.6;140.221.8.10;140.221.8.88; };
- };{% endfor %}
- include "/etc/bind/named.conf.static";
-};
-
-view "external" {
- match-clients { any; };
- recursion no;
- {% for zone in zones %}
- zone "{{ zone.1 }}" {
- type master;
- file "/etc/bind/hostbase/{{ zone.1 }}.external";
- notify no;
- };{% endfor %}
-
- {% for reverse in reverses %}
- zone "{{ reverse.0 }}.in-addr.arpa" {
- type master;
- file "/etc/bind/hostbase/{{ reverse.0 }}.rev.external";
- notify no;
- };{% endfor %}
- include "/etc/bind/named.conf.static";
-};
-
-
-// zone "com" { type delegation-only; };
-// zone "net" { type delegation-only; };
-
-// From the release notes:
-// Because many of our users are uncomfortable receiving undelegated answers
-// from root or top level domains, other than a few for whom that behaviour
-// has been trusted and expected for quite some length of time, we have now
-// introduced the "root-delegations-only" feature which applies delegation-only
-// logic to all top level domains, and to the root domain. An exception list
-// should be specified, including "MUSEUM" and "DE", and any other top level
-// domains from whom undelegated responses are expected and trusted.
-// root-delegation-only exclude { "DE"; "MUSEUM"; };
-
-include "/etc/bind/named.conf.local";
diff --git a/src/lib/Bcfg2/Server/Hostbase/templates/reverseappend.tmpl b/src/lib/Bcfg2/Server/Hostbase/templates/reverseappend.tmpl
deleted file mode 100644
index 6ed520c98..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/templates/reverseappend.tmpl
+++ /dev/null
@@ -1,4 +0,0 @@
-{% if fileorigin %}$ORIGIN {{ fileorigin }}.in-addr.arpa.{% endif %}
-$ORIGIN {{ inaddr }}.in-addr.arpa.
-{% for host in hosts %}{{ host.0.3 }} PTR {{ host.1 }}.
-{% endfor %}
diff --git a/src/lib/Bcfg2/Server/Hostbase/templates/reversesoa.tmpl b/src/lib/Bcfg2/Server/Hostbase/templates/reversesoa.tmpl
deleted file mode 100644
index d142eaf7f..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/templates/reversesoa.tmpl
+++ /dev/null
@@ -1,13 +0,0 @@
-$ORIGIN .
-$TTL {{ zone.8 }}
-{{ inaddr }}.in-addr.arpa IN SOA {{ zone.4 }}. {{ zone.3 }} (
- {{ zone.2 }} ; serial
- {{ zone.7 }} ; refresh interval
- {{ zone.6 }} ; retry interval
- {{ zone.5 }} ; expire interval
- {{ zone.8 }} ; min ttl
- )
-
- {% for ns in nameservers %}NS {{ ns.0 }}
- {% endfor %}
-
diff --git a/src/lib/Bcfg2/Server/Hostbase/templates/zone.tmpl b/src/lib/Bcfg2/Server/Hostbase/templates/zone.tmpl
deleted file mode 100644
index aad48d179..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/templates/zone.tmpl
+++ /dev/null
@@ -1,18 +0,0 @@
-$ORIGIN .
-$TTL {{ zone.8 }}
-{{ zone.1 }}. IN SOA {{ zone.4 }}. {{ zone.3 }}. (
- {{ zone.2 }} ; serial
- {{ zone.7 }} ; refresh interval
- {{ zone.6 }} ; retry interval
- {{ zone.5 }} ; expire interval
- {{ zone.8 }} ; min ttl
- )
-
- {% for ns in nameservers %}NS {{ ns.0 }}
- {% endfor %}
- {% for a in addresses %}A {{ a.0 }}
- {% endfor %}
- {% for mx in mxs %}MX {{ mx.0 }} {{ mx.1 }}
- {% endfor %}
-$ORIGIN {{ zone.1 }}.
-localhost A 127.0.0.1
diff --git a/src/lib/Bcfg2/Server/Hostbase/urls.py b/src/lib/Bcfg2/Server/Hostbase/urls.py
deleted file mode 100644
index 4a0c33f98..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/urls.py
+++ /dev/null
@@ -1,27 +0,0 @@
-from Bcfg2.Reporting.Compat.django_urls import *
-from django.conf import settings
-from django.views.generic.simple import direct_to_template
-from django.contrib import admin
-
-
-admin.autodiscover()
-
-
-urlpatterns = patterns('',
- # Uncomment the admin/doc line below and add 'django.contrib.admindocs'
- # to INSTALLED_APPS to enable admin documentation:
- (r'^admin/doc/', include('django.contrib.admindocs.urls')),
-
- # Uncomment the next line to enable the admin:
- (r'^admin/', include(admin.site.urls)),
-
- (r'^$',direct_to_template, {'template':'index.html'}, 'index'),
- (r'^hostbase/', include('hostbase.urls')),
- (r'^login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}),
- (r'^logout/$', 'django.contrib.auth.views.logout', {'template_name': 'logout.html'})
-)
-
-if settings.SERVE_MEDIA:
- urlpatterns += patterns('',
- (r'^site_media/(?P<path>.*)$', 'django.views.static.serve',
- dict(document_root=settings.MEDIA_ROOT)),)
diff --git a/src/lib/Bcfg2/Server/Info.py b/src/lib/Bcfg2/Server/Info.py
new file mode 100644
index 000000000..6af561089
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Info.py
@@ -0,0 +1,879 @@
+""" Subcommands and helpers for bcfg2-info """
+# -*- coding: utf-8 -*-
+
+import os
+import sys
+import cmd
+import math
+import time
+import copy
+import pipes
+import fnmatch
+import argparse
+import operator
+import lxml.etree
+from code import InteractiveConsole
+import Bcfg2.Logger
+import Bcfg2.Options
+import Bcfg2.Server.Core
+import Bcfg2.Server.Plugin
+import Bcfg2.Client.Tools.POSIX
+from Bcfg2.Compat import any # pylint: disable=W0622
+
+try:
+ try:
+ import cProfile as profile
+ except ImportError:
+ import profile
+ import pstats
+ HAS_PROFILE = True
+except ImportError:
+ HAS_PROFILE = False
+
+
+def print_tabular(rows):
+ """Print data in tabular format."""
+ cmax = tuple([max([len(str(row[index])) for row in rows]) + 1
+ for index in range(len(rows[0]))])
+ fstring = (" %%-%ss |" * len(cmax)) % cmax
+ fstring = ('|'.join([" %%-%ss "] * len(cmax))) % cmax
+ print(fstring % rows[0])
+ print((sum(cmax) + (len(cmax) * 2) + (len(cmax) - 1)) * '=')
+ for row in rows[1:]:
+ print(fstring % row)
+
+
+def display_trace(trace):
+ """ display statistics from a profile trace """
+ stats = pstats.Stats(trace)
+ stats.sort_stats('cumulative', 'calls', 'time')
+ stats.print_stats(200)
+
+
+def load_interpreters():
+ """ Load a dict of available Python interpreters """
+ interpreters = dict(python=lambda v: InteractiveConsole(v).interact())
+ default = "python"
+ try:
+ import bpython.cli
+ interpreters["bpython"] = lambda v: bpython.cli.main(args=[],
+ locals_=v)
+ default = "bpython"
+ except ImportError:
+ pass
+
+ try:
+ # whether ipython is actually better than bpython is
+ # up for debate, but this is the behavior that existed
+ # before --interpreter was added, so we call IPython
+ # better
+ import IPython
+ # pylint: disable=E1101
+ if hasattr(IPython, "Shell"):
+ interpreters["ipython"] = lambda v: \
+ IPython.Shell.IPShell(argv=[], user_ns=v).mainloop()
+ default = "ipython"
+ elif hasattr(IPython, "embed"):
+ interpreters["ipython"] = lambda v: IPython.embed(user_ns=v)
+ default = "ipython"
+ else:
+ print("Unknown IPython API version")
+ # pylint: enable=E1101
+ except ImportError:
+ pass
+
+ return (interpreters, default)
+
+
+class InfoCmd(Bcfg2.Options.Subcommand): # pylint: disable=W0223
+ """ Base class for bcfg2-info subcommands """
+
+ def _expand_globs(self, globs, candidates):
+ """ Given a list of globs, select the items from candidates
+ that match the globs """
+ # special cases to speed things up:
+ if not globs or '*' in globs:
+ return candidates
+ has_wildcards = False
+ for glob in globs:
+ # check if any wildcard characters are in the string
+ if set('*?[]') & set(glob):
+ has_wildcards = True
+ break
+ if not has_wildcards:
+ return globs
+
+ rv = set()
+ cset = set(candidates)
+ for glob in globs:
+ rv.update(c for c in cset if fnmatch.fnmatch(c, glob))
+ cset.difference_update(rv)
+ return list(rv)
+
+ def get_client_list(self, globs):
+ """ given a list of host globs, get a list of clients that
+ match them """
+ return self._expand_globs(globs, self.core.metadata.clients)
+
+ def get_group_list(self, globs):
+ """ given a list of group glob, get a list of groups that
+ match them"""
+ # special cases to speed things up:
+ return self._expand_globs(globs,
+ list(self.core.metadata.groups.keys()))
+
+
+class Debug(InfoCmd):
+ """ Shell out to a Python interpreter """
+ interpreters, default_interpreter = load_interpreters()
+ options = [
+ Bcfg2.Options.BooleanOption(
+ "-n", "--non-interactive",
+ help="Do not enter the interactive debugger"),
+ Bcfg2.Options.PathOption(
+ "-f", dest="cmd_list", type=argparse.FileType('r'),
+ help="File containing commands to run"),
+ Bcfg2.Options.Option(
+ "--interpreter", cf=("bcfg2-info", "interpreter"),
+ env="BCFG2_INFO_INTERPRETER",
+ choices=interpreters.keys(), default=default_interpreter)]
+
+ def run(self, setup):
+ if setup.cmd_list:
+ console = InteractiveConsole(locals())
+ for command in setup.cmd_list.readlines():
+ command = command.strip()
+ if command:
+ console.push(command)
+ if not setup.non_interactive:
+ print("Dropping to interpreter; press ^D to resume")
+ self.interpreters[setup.interpreter](self.core.get_locals())
+
+
+class Build(InfoCmd):
+ """ Build config for hostname, writing to filename """
+
+ options = [Bcfg2.Options.PositionalArgument("hostname"),
+ Bcfg2.Options.PositionalArgument("filename", nargs='?',
+ default=sys.stdout,
+ type=argparse.FileType('w'))]
+
+ def run(self, setup):
+ etree = lxml.etree.ElementTree(
+ self.core.BuildConfiguration(setup.hostname))
+ try:
+ etree.write(
+ setup.filename,
+ encoding='UTF-8', xml_declaration=True,
+ pretty_print=True)
+ except IOError:
+ err = sys.exc_info()[1]
+ print("Failed to write %s: %s" % (setup.filename, err))
+
+
+class Builddir(InfoCmd):
+ """ Build config for hostname, writing separate files to directory
+ """
+
+ # don't try to isntall these types of entries
+ blacklisted_types = ["nonexistent", "permissions"]
+
+ options = Bcfg2.Client.Tools.POSIX.POSIX.options + [
+ Bcfg2.Options.PositionalArgument("hostname"),
+ Bcfg2.Options.PathOption("directory")]
+
+ help = """Generates a config for client <hostname> and writes the
+individual configuration files out separately in a tree under <output
+dir>. This only handles file entries, and does not respect 'owner' or
+'group' attributes unless run as root. """
+
+ def run(self, setup):
+ setup.paranoid = False
+ client_config = self.core.BuildConfiguration(setup.hostname)
+ if client_config.tag == 'error':
+ print("Building client configuration failed.")
+ return 1
+
+ entries = []
+ for struct in client_config:
+ for entry in struct:
+ if (entry.tag == 'Path' and
+ entry.get("type") not in self.blacklisted_types):
+ failure = entry.get("failure")
+ if failure is not None:
+ print("Skipping entry %s:%s with bind failure: %s" %
+ (entry.tag, entry.get("name"), failure))
+ continue
+ entry.set('name',
+ os.path.join(setup.directory,
+ entry.get('name').lstrip("/")))
+ entries.append(entry)
+
+ Bcfg2.Client.Tools.POSIX.POSIX(client_config).Install(entries)
+
+
+class Buildfile(InfoCmd):
+ """ Build config file for hostname """
+
+ options = [
+ Bcfg2.Options.Option("-f", "--outfile", metavar="<path>",
+ type=argparse.FileType('w'), default=sys.stdout),
+ Bcfg2.Options.PathOption("--altsrc"),
+ Bcfg2.Options.PathOption("filename"),
+ Bcfg2.Options.PositionalArgument("hostname")]
+
+ def run(self, setup):
+ entry = lxml.etree.Element('Path', name=setup.filename)
+ if setup.altsrc:
+ entry.set("altsrc", setup.altsrc)
+ try:
+ self.core.Bind(entry, self.core.build_metadata(setup.hostname))
+ except: # pylint: disable=W0702
+ print("Failed to build entry %s for host %s" % (setup.filename,
+ setup.hostname))
+ raise
+ try:
+ setup.outfile.write(
+ lxml.etree.tostring(entry,
+ xml_declaration=False).decode('UTF-8'))
+ setup.outfile.write("\n")
+ except IOError:
+ err = sys.exc_info()[1]
+ print("Failed to write %s: %s" % (setup.outfile.name, err))
+
+
+class BuildAllMixin(object):
+ """ InfoCmd mixin to make a version of an existing command that
+ applies to multiple hosts"""
+
+ directory_arg = Bcfg2.Options.PathOption("directory")
+ hostname_arg = Bcfg2.Options.PositionalArgument("hostname", nargs='*',
+ default=[])
+ options = [directory_arg, hostname_arg]
+
+ @property
+ def _parent(self):
+ """ the parent command """
+ for cls in self.__class__.__mro__:
+ if (cls != InfoCmd and cls != self.__class__ and
+ issubclass(cls, InfoCmd)):
+ return cls
+
+ def run(self, setup):
+ """ Run the command """
+ try:
+ os.makedirs(setup.directory)
+ except OSError:
+ err = sys.exc_info()[1]
+ if err.errno != 17:
+ print("Could not create %s: %s" % (setup.directory, err))
+ return 1
+ clients = self.get_client_list(setup.hostname)
+ for client in clients:
+ csetup = self._get_setup(client, copy.copy(setup))
+ csetup.hostname = client
+ self._parent.run(self, csetup) # pylint: disable=E1101
+
+ def _get_setup(self, client, setup):
+ """ This can be overridden by children to populate individual
+ setup options on a per-client basis """
+ raise NotImplementedError
+
+
+class Buildallfile(Buildfile, BuildAllMixin):
+ """ Build config file for all clients in directory """
+
+ options = [BuildAllMixin.directory_arg,
+ Bcfg2.Options.PathOption("--altsrc"),
+ Bcfg2.Options.PathOption("filename"),
+ BuildAllMixin.hostname_arg]
+
+ def run(self, setup):
+ BuildAllMixin.run(self, setup)
+
+ def _get_setup(self, client, setup):
+ setup.outfile = open(os.path.join(setup.directory, client), 'w')
+ return setup
+
+
+class Buildall(Build, BuildAllMixin):
+ """ Build configs for all clients in directory """
+
+ options = BuildAllMixin.options
+
+ def run(self, setup):
+ BuildAllMixin.run(self, setup)
+
+ def _get_setup(self, client, setup):
+ setup.filename = os.path.join(setup.directory, client + ".xml")
+ return setup
+
+
+class Buildbundle(InfoCmd):
+ """ Render a templated bundle for hostname """
+
+ options = [Bcfg2.Options.PositionalArgument("bundle"),
+ Bcfg2.Options.PositionalArgument("hostname")]
+
+ def run(self, setup):
+ bundler = self.core.plugins['Bundler']
+ bundle = None
+ if setup.bundle in bundler.entries:
+ bundle = bundler.entries[setup.bundle]
+ elif not setup.bundle.endswith(".xml"):
+ fname = setup.bundle + ".xml"
+ if fname in bundler.entries:
+ bundle = bundler.entries[bundle]
+ if not bundle:
+ print("No such bundle %s" % setup.bundle)
+ return 1
+ try:
+ metadata = self.core.build_metadata(setup.hostname)
+ print(lxml.etree.tostring(bundle.XMLMatch(metadata),
+ xml_declaration=False,
+ pretty_print=True).decode('UTF-8'))
+ except: # pylint: disable=W0702
+ print("Failed to render bundle %s for host %s: %s" %
+ (setup.bundle, setup.hostname, sys.exc_info()[1]))
+ raise
+
+
+class Automatch(InfoCmd):
+ """ Perform automatch on a Properties file """
+
+ options = [
+ Bcfg2.Options.BooleanOption(
+ "-f", "--force",
+ help="Force automatch even if it's disabled"),
+ Bcfg2.Options.PositionalArgument("propertyfile"),
+ Bcfg2.Options.PositionalArgument("hostname")]
+
+ def run(self, setup):
+ try:
+ props = self.core.plugins['Properties']
+ except KeyError:
+ print("Properties plugin not enabled")
+ return 1
+
+ pfile = props.entries[setup.propertyfile]
+ if (not Bcfg2.Options.setup.force and
+ not Bcfg2.Options.setup.automatch and
+ pfile.xdata.get("automatch", "false").lower() != "true"):
+ print("Automatch not enabled on %s" % setup.propertyfile)
+ else:
+ metadata = self.core.build_metadata(setup.hostname)
+ print(lxml.etree.tostring(pfile.XMLMatch(metadata),
+ xml_declaration=False,
+ pretty_print=True).decode('UTF-8'))
+
+
+class ExpireCache(InfoCmd):
+ """ Expire the metadata cache """
+
+ options = [
+ Bcfg2.Options.PositionalArgument(
+ "hostname", nargs="*", default=[],
+ help="Expire cache for the given host(s)")]
+
+ def run(self, setup):
+ if setup.clients:
+ for client in self.get_client_list(setup.clients):
+ self.core.expire_caches_by_type(Bcfg2.Server.Plugin.Metadata,
+ key=client)
+ else:
+ self.core.expire_caches_by_type(Bcfg2.Server.Plugin.Metadata)
+
+
+class Bundles(InfoCmd):
+ """ Print out group/bundle info """
+
+ options = [Bcfg2.Options.PositionalArgument("group", nargs='*')]
+
+ def run(self, setup):
+ data = [('Group', 'Bundles')]
+ groups = self.get_group_list(setup.group)
+ groups.sort()
+ for group in groups:
+ data.append((group,
+ ','.join(self.core.metadata.groups[group][0])))
+ print_tabular(data)
+
+
+class Clients(InfoCmd):
+ """ Print out client/profile info """
+
+ options = [Bcfg2.Options.PositionalArgument("hostname", nargs='*',
+ default=[])]
+
+ def run(self, setup):
+ data = [('Client', 'Profile')]
+ for client in sorted(self.get_client_list(setup.hostname)):
+ imd = self.core.metadata.get_initial_metadata(client)
+ data.append((client, imd.profile))
+ print_tabular(data)
+
+
+class Config(InfoCmd):
+ """ Print out the current configuration of Bcfg2"""
+
+ options = [
+ Bcfg2.Options.BooleanOption(
+ "--raw",
+ help="Produce more accurate but less readable raw output")]
+
+ def run(self, setup):
+ parser = Bcfg2.Options.get_parser()
+ data = [('Description', 'Value')]
+ for option in parser.option_list:
+ if hasattr(setup, option.dest):
+ value = getattr(setup, option.dest)
+ if any(issubclass(a.__class__,
+ Bcfg2.Options.ComponentAction)
+ for a in option.actions.values()):
+ if not setup.raw:
+ try:
+ if option.action.islist:
+ value = [v.__name__ for v in value]
+ else:
+ value = value.__name__
+ except AttributeError:
+ # just use the value as-is
+ pass
+ if setup.raw:
+ value = repr(value)
+ data.append((getattr(option, "help", option.dest), value))
+ print_tabular(data)
+
+
+class Probes(InfoCmd):
+ """ Get probes for the given host """
+
+ options = [
+ Bcfg2.Options.BooleanOption("-p", "--pretty",
+ help="Human-readable output"),
+ Bcfg2.Options.PositionalArgument("hostname")]
+
+ def run(self, setup):
+ if setup.pretty:
+ probes = []
+ else:
+ probes = lxml.etree.Element('probes')
+ metadata = self.core.build_metadata(setup.hostname)
+ for plugin in self.core.plugins_by_type(Bcfg2.Server.Plugin.Probing):
+ for probe in plugin.GetProbes(metadata):
+ probes.append(probe)
+ if setup.pretty:
+ for probe in probes:
+ pname = probe.get("name")
+ print("=" * (len(pname) + 2))
+ print(" %s" % pname)
+ print("=" * (len(pname) + 2))
+ print("")
+ print(probe.text)
+ print("")
+ else:
+ print(lxml.etree.tostring(probes, xml_declaration=False,
+ pretty_print=True).decode('UTF-8'))
+
+
+class Showentries(InfoCmd):
+ """ Show abstract configuration entries for a given host """
+
+ options = [Bcfg2.Options.PositionalArgument("hostname"),
+ Bcfg2.Options.PositionalArgument("type", nargs='?')]
+
+ def run(self, setup):
+ try:
+ metadata = self.core.build_metadata(setup.hostname)
+ except Bcfg2.Server.Plugin.MetadataConsistencyError:
+ print("Unable to build metadata for %s: %s" % (setup.hostname,
+ sys.exc_info()[1]))
+ structures = self.core.GetStructures(metadata)
+ output = [('Entry Type', 'Name')]
+ etypes = None
+ if setup.type:
+ etypes = [setup.type, "Bound%s" % setup.type]
+ for item in structures:
+ output.extend((child.tag, child.get('name'))
+ for child in item.getchildren()
+ if not etypes or child.tag in etypes)
+ print_tabular(output)
+
+
+class Groups(InfoCmd):
+ """ Print out group info """
+ options = [Bcfg2.Options.PositionalArgument("group", nargs='*')]
+
+ def _profile_flag(self, group):
+ """ Whether or not the group is a profile group """
+ if self.core.metadata.groups[group].is_profile:
+ return 'yes'
+ else:
+ return 'no'
+
+ def run(self, setup):
+ data = [("Groups", "Profile", "Category")]
+ groups = self.get_group_list(setup.group)
+ groups.sort()
+ for group in groups:
+ data.append((group,
+ self._profile_flag(group),
+ self.core.metadata.groups[group].category))
+ print_tabular(data)
+
+
+class Showclient(InfoCmd):
+ """ Show metadata for the given hosts """
+
+ options = [Bcfg2.Options.PositionalArgument("hostname", nargs='*')]
+
+ def run(self, setup):
+ for client in self.get_client_list(setup.hostname):
+ try:
+ metadata = self.core.build_metadata(client)
+ except Bcfg2.Server.Plugin.MetadataConsistencyError:
+ print("Could not build metadata for %s: %s" %
+ (client, sys.exc_info()[1]))
+ continue
+ fmt = "%-10s %s"
+ print(fmt % ("Hostname:", metadata.hostname))
+ print(fmt % ("Profile:", metadata.profile))
+
+ group_fmt = "%-10s %-30s %s"
+ header = False
+ for group in sorted(list(metadata.groups)):
+ category = ""
+ for cat, grp in metadata.categories.items():
+ if grp == group:
+ category = "Category: %s" % cat
+ break
+ if not header:
+ print(group_fmt % ("Groups:", group, category))
+ header = True
+ else:
+ print(group_fmt % ("", group, category))
+
+ if metadata.bundles:
+ sorted_bundles = sorted(list(metadata.bundles))
+ print(fmt % ("Bundles:", sorted_bundles[0]))
+ for bnd in sorted_bundles[1:]:
+ print(fmt % ("", bnd))
+ if metadata.connectors:
+ print("Connector data")
+ print("=" * 80)
+ for conn in metadata.connectors:
+ if getattr(metadata, conn):
+ print(fmt % (conn + ":", getattr(metadata, conn)))
+ print("=" * 80)
+
+
+class Mappings(InfoCmd):
+ """ Print generator mappings for optional type and name """
+
+ options = [Bcfg2.Options.PositionalArgument("type", nargs='?'),
+ Bcfg2.Options.PositionalArgument("name", nargs='?')]
+
+ def run(self, setup):
+ data = [('Plugin', 'Type', 'Name')]
+ for generator in self.core.plugins_by_type(
+ Bcfg2.Server.Plugin.Generator):
+ etypes = setup.type or list(generator.Entries.keys())
+ if setup.name:
+ interested = [(etype, [setup.name]) for etype in etypes]
+ else:
+ interested = [(etype, generator.Entries[etype])
+ for etype in etypes
+ if etype in generator.Entries]
+ for etype, names in interested:
+ data.extend((generator.name, etype, name)
+ for name in names
+ if name in generator.Entries.get(etype, {}))
+ print_tabular(data)
+
+
+class PackageResolve(InfoCmd):
+ """ Resolve packages for the given host"""
+
+ options = [Bcfg2.Options.PositionalArgument("hostname"),
+ Bcfg2.Options.PositionalArgument("package", nargs="*")]
+
+ def run(self, setup):
+ try:
+ pkgs = self.core.plugins['Packages']
+ except KeyError:
+ print("Packages plugin not enabled")
+ return 1
+
+ metadata = self.core.build_metadata(setup.hostname)
+
+ indep = lxml.etree.Element("Independent",
+ name=self.__class__.__name__.lower())
+ if setup.package:
+ structures = [lxml.etree.Element("Bundle", name="packages")]
+ for package in setup.package:
+ lxml.etree.SubElement(structures[0], "Package", name=package)
+ else:
+ structures = self.core.GetStructures(metadata)
+
+ pkgs._build_packages(metadata, indep, # pylint: disable=W0212
+ structures)
+ print("%d new packages added" % len(indep.getchildren()))
+ if len(indep.getchildren()):
+ print(" %s" % "\n ".join(lxml.etree.tostring(p)
+ for p in indep.getchildren()))
+
+
+class Packagesources(InfoCmd):
+ """ Show package sources """
+
+ options = [Bcfg2.Options.PositionalArgument("hostname")]
+
+ def run(self, setup):
+ try:
+ pkgs = self.core.plugins['Packages']
+ except KeyError:
+ print("Packages plugin not enabled")
+ return 1
+ try:
+ metadata = self.core.build_metadata(setup.hostname)
+ except Bcfg2.Server.Plugin.MetadataConsistencyError:
+ print("Unable to build metadata for %s: %s" % (setup.hostname,
+ sys.exc_info()[1]))
+ return 1
+ print(pkgs.get_collection(metadata).sourcelist())
+
+
+class Query(InfoCmd):
+ """ Query clients """
+
+ options = [
+ Bcfg2.Options.ExclusiveOptionGroup(
+ Bcfg2.Options.Option(
+ "-g", "--group", metavar="<group>", dest="querygroups",
+ type=Bcfg2.Options.Types.comma_list),
+ Bcfg2.Options.Option(
+ "-p", "--profile", metavar="<profile>", dest="queryprofiles",
+ type=Bcfg2.Options.Types.comma_list),
+ Bcfg2.Options.Option(
+ "-b", "--bundle", metavar="<bundle>", dest="querybundles",
+ type=Bcfg2.Options.Types.comma_list),
+ required=True)]
+
+ def run(self, setup):
+ if setup.queryprofiles:
+ res = self.core.metadata.get_client_names_by_profiles(
+ setup.queryprofiles)
+ elif setup.querygroups:
+ res = self.core.metadata.get_client_names_by_groups(
+ setup.querygroups)
+ elif setup.querybundles:
+ res = self.core.metadata.get_client_names_by_bundles(
+ setup.querybundles)
+ print("\n".join(res))
+
+
+class Shell(InfoCmd):
+ """ Open an interactive shell to run multiple bcfg2-info commands """
+ interactive = False
+
+ def run(self, setup):
+ try:
+ self.core.cmdloop('Welcome to bcfg2-info\n'
+ 'Type "help" for more information')
+ except KeyboardInterrupt:
+ print("\nCtrl-C pressed, exiting...")
+
+
+class ProfileTemplates(InfoCmd):
+ """ Benchmark template rendering times """
+
+ options = [
+ Bcfg2.Options.Option(
+ "--clients", type=Bcfg2.Options.Types.comma_list,
+ help="Benchmark templates for the named clients"),
+ Bcfg2.Options.Option(
+ "--runs", help="Number of rendering passes per template",
+ default=5, type=int),
+ Bcfg2.Options.PositionalArgument(
+ "templates", nargs="*", default=[],
+ help="Profile the named templates instead of all templates")]
+
+ def profile_entry(self, entry, metadata, runs=5):
+ """ Profile a single entry """
+ times = []
+ for i in range(runs): # pylint: disable=W0612
+ start = time.time()
+ try:
+ self.core.Bind(entry, metadata)
+ times.append(time.time() - start)
+ except: # pylint: disable=W0702
+ break
+ if times:
+ avg = sum(times) / len(times)
+ if avg:
+ self.logger.debug(" %s: %.02f sec" %
+ (metadata.hostname, avg))
+ return times
+
+ def profile_struct(self, struct, metadata, templates=None, runs=5):
+ """ Profile all entries in a given structure """
+ times = dict()
+ entries = struct.xpath("//Path")
+ entry_count = 0
+ for entry in entries:
+ entry_count += 1
+ if templates is None or entry.get("name") in templates:
+ self.logger.info("Rendering Path:%s (%s/%s)..." %
+ (entry.get("name"), entry_count,
+ len(entries)))
+ times.setdefault(entry.get("name"),
+ self.profile_entry(entry, metadata,
+ runs=runs))
+ return times
+
+ def profile_client(self, metadata, templates=None, runs=5):
+ """ Profile all structures for a given client """
+ structs = self.core.GetStructures(metadata)
+ struct_count = 0
+ times = dict()
+ for struct in structs:
+ struct_count += 1
+ self.logger.info("Rendering templates from structure %s:%s "
+ "(%s/%s)" %
+ (struct.tag, struct.get("name"), struct_count,
+ len(structs)))
+ times.update(self.profile_struct(struct, metadata,
+ templates=templates, runs=runs))
+ return times
+
+ def stdev(self, nums):
+ """ Calculate the standard deviation of a list of numbers """
+ mean = float(sum(nums)) / len(nums)
+ return math.sqrt(sum((n - mean) ** 2 for n in nums) / float(len(nums)))
+
+ def run(self, setup):
+ clients = self.get_client_list(setup.clients)
+
+ times = dict()
+ client_count = 0
+ for client in clients:
+ client_count += 1
+ self.logger.info("Rendering templates for client %s (%s/%s)" %
+ (client, client_count, len(clients)))
+ times.update(self.profile_client(self.core.build_metadata(client),
+ templates=setup.templates,
+ runs=setup.runs))
+
+ # print out per-file results
+ tmpltimes = []
+ for tmpl, ptimes in times.items():
+ try:
+ mean = float(sum(ptimes)) / len(ptimes)
+ except ZeroDivisionError:
+ continue
+ ptimes.sort()
+ median = ptimes[len(ptimes) / 2]
+ std = self.stdev(ptimes)
+ if mean > 0.01 or median > 0.01 or std > 1 or setup.templates:
+ tmpltimes.append((tmpl, mean, median, std))
+ print("%-50s %-9s %-11s %6s" %
+ ("Template", "Mean Time", "Median Time", "σ"))
+ for info in reversed(sorted(tmpltimes, key=operator.itemgetter(1))):
+ print("%-50s %9.02f %11.02f %6.02f" % info)
+
+
+if HAS_PROFILE:
+ class Profile(InfoCmd):
+ """ Profile a single bcfg2-info command """
+
+ options = [Bcfg2.Options.PositionalArgument("command"),
+ Bcfg2.Options.PositionalArgument("args", nargs="*")]
+
+ def run(self, setup):
+ prof = profile.Profile()
+ cls = self.core.commands[setup.command]
+ prof.runcall(cls, " ".join(pipes.quote(a) for a in setup.args))
+ display_trace(prof)
+
+
+class InfoCore(cmd.Cmd, Bcfg2.Server.Core.Core):
+ """Main class for bcfg2-info."""
+
+ def __init__(self):
+ cmd.Cmd.__init__(self)
+ Bcfg2.Server.Core.Core.__init__(self)
+ self.prompt = 'bcfg2-info> '
+
+ def get_locals(self):
+ """ Expose the local variables of the core to subcommands that
+ need to reference them (i.e., the interactive interpreter) """
+ return locals()
+
+ def do_quit(self, _):
+ """ quit|exit - Exit program """
+ raise SystemExit(0)
+
+ do_EOF = do_quit
+ do_exit = do_quit
+
+ def do_eventdebug(self, _):
+ """ eventdebug - Enable debugging output for FAM events """
+ self.fam.set_debug(True)
+
+ do_event_debug = do_eventdebug
+
+ def do_update(self, _):
+ """ update - Process pending filesystem events """
+ self.fam.handle_events_in_interval(0.1)
+
+ def run(self):
+ self.load_plugins()
+ self.block_for_fam_events(handle_events=True)
+
+ def _run(self):
+ pass
+
+ def _block(self):
+ pass
+
+ def shutdown(self):
+ Bcfg2.Server.Core.Core.shutdown(self)
+
+
+class CLI(Bcfg2.Options.CommandRegistry):
+ """ The bcfg2-info CLI """
+ options = [Bcfg2.Options.BooleanOption("-p", "--profile", help="Profile")]
+
+ def __init__(self):
+ Bcfg2.Options.CommandRegistry.__init__(self)
+ self.register_commands(globals().values(), parent=InfoCmd)
+ parser = Bcfg2.Options.get_parser(
+ description="Inspect a running Bcfg2 server",
+ components=[self, InfoCore])
+ parser.add_options(self.subcommand_options)
+ parser.parse()
+
+ if Bcfg2.Options.setup.profile and HAS_PROFILE:
+ prof = profile.Profile()
+ self.core = prof.runcall(InfoCore)
+ display_trace(prof)
+ else:
+ if Bcfg2.Options.setup.profile:
+ print("Profiling functionality not available.")
+ self.core = InfoCore()
+
+ for command in self.commands.values():
+ command.core = self.core
+
+ def run(self):
+ """ Run bcfg2-info """
+ try:
+ if Bcfg2.Options.setup.subcommand != 'help':
+ self.core.run()
+ return self.runcommand()
+ finally:
+ self.shutdown()
+
+ def shutdown(self):
+ Bcfg2.Options.CommandRegistry.shutdown(self)
+ self.core.shutdown()
diff --git a/src/lib/Bcfg2/Server/Lint/AWSTags.py b/src/lib/Bcfg2/Server/Lint/AWSTags.py
new file mode 100644
index 000000000..c6d7a3a30
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Lint/AWSTags.py
@@ -0,0 +1,33 @@
+""" ``bcfg2-lint`` plugin to check all given :ref:`AWSTags
+<server-plugins-connectors-awstags>` patterns for validity."""
+
+import re
+import sys
+import Bcfg2.Server.Lint
+
+
+class AWSTags(Bcfg2.Server.Lint.ServerPlugin):
+ """ ``bcfg2-lint`` plugin to check all given :ref:`AWSTags
+ <server-plugins-connectors-awstags>` patterns for validity. """
+ __serverplugin__ = 'AWSTags'
+
+ def Run(self):
+ cfg = self.core.plugins['AWSTags'].config
+ for entry in cfg.xdata.xpath('//Tag'):
+ self.check(entry, "name")
+ if entry.get("value"):
+ self.check(entry, "value")
+
+ @classmethod
+ def Errors(cls):
+ return {"pattern-fails-to-initialize": "error"}
+
+ def check(self, entry, attr):
+ """ Check a single attribute (``name`` or ``value``) of a
+ single entry for validity. """
+ try:
+ re.compile(entry.get(attr))
+ except re.error:
+ self.LintError("pattern-fails-to-initialize",
+ "'%s' regex could not be compiled: %s\n %s" %
+ (attr, sys.exc_info()[1], entry.get("name")))
diff --git a/src/lib/Bcfg2/Server/Lint/Bundler.py b/src/lib/Bcfg2/Server/Lint/Bundler.py
new file mode 100644
index 000000000..576e157ad
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Lint/Bundler.py
@@ -0,0 +1,59 @@
+""" ``bcfg2-lint`` plugin for :ref:`Bundler
+<server-plugins-structures-bundler>` """
+
+from Bcfg2.Server.Lint import ServerPlugin
+
+
+class Bundler(ServerPlugin):
+ """ Perform various :ref:`Bundler
+ <server-plugins-structures-bundler>` checks. """
+ __serverplugin__ = 'Bundler'
+
+ def Run(self):
+ self.missing_bundles()
+ for bundle in self.core.plugins['Bundler'].entries.values():
+ if self.HandlesFile(bundle.name):
+ self.bundle_names(bundle)
+
+ @classmethod
+ def Errors(cls):
+ return {"bundle-not-found": "error",
+ "unused-bundle": "warning",
+ "explicit-bundle-name": "error",
+ "genshi-extension-bundle": "error"}
+
+ def missing_bundles(self):
+ """ Find bundles listed in Metadata but not implemented in
+ Bundler. """
+ if self.files is None:
+ # when given a list of files on stdin, this check is
+ # useless, so skip it
+ groupdata = self.metadata.groups_xml.xdata
+ ref_bundles = set([b.get("name")
+ for b in groupdata.findall("//Bundle")])
+
+ allbundles = self.core.plugins['Bundler'].bundles.keys()
+ for bundle in ref_bundles:
+ if bundle not in allbundles:
+ self.LintError("bundle-not-found",
+ "Bundle %s referenced, but does not exist" %
+ bundle)
+
+ for bundle in allbundles:
+ if bundle not in ref_bundles:
+ self.LintError("unused-bundle",
+ "Bundle %s defined, but is not referenced "
+ "in Metadata" % bundle)
+
+ def bundle_names(self, bundle):
+ """ Verify that deprecated bundle .genshi bundles and explicit
+ bundle names aren't used """
+ if bundle.xdata.get('name'):
+ self.LintError("explicit-bundle-name",
+ "Deprecated explicit bundle name in %s" %
+ bundle.name)
+
+ if bundle.name.endswith(".genshi"):
+ self.LintError("genshi-extension-bundle",
+ "Bundle %s uses deprecated .genshi extension" %
+ bundle.name)
diff --git a/src/lib/Bcfg2/Server/Lint/Cfg.py b/src/lib/Bcfg2/Server/Lint/Cfg.py
new file mode 100644
index 000000000..13b04a6b8
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Lint/Cfg.py
@@ -0,0 +1,118 @@
+""" ``bcfg2-lint`` plugin for :ref:`Cfg
+<server-plugins-generators-cfg>` """
+
+import os
+import Bcfg2.Options
+from fnmatch import fnmatch
+from Bcfg2.Server.Lint import ServerPlugin
+from Bcfg2.Server.Plugins.Cfg import CfgGenerator
+
+
+class Cfg(ServerPlugin):
+ """ warn about Cfg issues """
+ __serverplugin__ = 'Cfg'
+
+ def Run(self):
+ for basename, entry in list(self.core.plugins['Cfg'].entries.items()):
+ self.check_pubkey(basename, entry)
+ self.check_missing_files()
+ self.check_conflicting_handlers()
+
+ @classmethod
+ def Errors(cls):
+ return {"no-pubkey-xml": "warning",
+ "unknown-cfg-files": "error",
+ "extra-cfg-files": "error",
+ "multiple-global-handlers": "error"}
+
+ def check_conflicting_handlers(self):
+ """ Check that a single entryset doesn't have multiple
+ non-specific (i.e., 'all') handlers. """
+ cfg = self.core.plugins['Cfg']
+ for eset in cfg.entries.values():
+ alls = [e for e in eset.entries.values()
+ if (e.specific.all and
+ issubclass(e.__class__, CfgGenerator))]
+ if len(alls) > 1:
+ self.LintError("multiple-global-handlers",
+ "%s has multiple global handlers: %s" %
+ (eset.path, ", ".join(os.path.basename(e.name)
+ for e in alls)))
+
+ def check_pubkey(self, basename, entry):
+ """ check that privkey.xml files have corresponding pubkey.xml
+ files """
+ if "privkey.xml" not in entry.entries:
+ return
+ privkey = entry.entries["privkey.xml"]
+ if not self.HandlesFile(privkey.name):
+ return
+
+ pubkey = basename + ".pub"
+ if pubkey not in self.core.plugins['Cfg'].entries:
+ self.LintError("no-pubkey-xml",
+ "%s has no corresponding pubkey.xml at %s" %
+ (basename, pubkey))
+ else:
+ pubset = self.core.plugins['Cfg'].entries[pubkey]
+ if "pubkey.xml" not in pubset.entries:
+ self.LintError("no-pubkey-xml",
+ "%s has no corresponding pubkey.xml at %s" %
+ (basename, pubkey))
+
+ def _list_path_components(self, path):
+ """ Get a list of all components of a path. E.g.,
+ ``self._list_path_components("/foo/bar/foobaz")`` would return
+ ``["foo", "bar", "foo", "baz"]``. The list is not guaranteed
+ to be in order."""
+ rv = []
+ remaining, component = os.path.split(path)
+ while component != '':
+ rv.append(component)
+ remaining, component = os.path.split(remaining)
+ return rv
+
+ def check_missing_files(self):
+ """ check that all files on the filesystem are known to Cfg """
+ cfg = self.core.plugins['Cfg']
+
+ # first, collect ignore patterns from handlers
+ ignore = set()
+ for hdlr in Bcfg2.Options.setup.cfg_handlers:
+ ignore.update(hdlr.__ignore__)
+
+ # next, get a list of all non-ignored files on the filesystem
+ all_files = set()
+ for root, _, files in os.walk(cfg.data):
+ for fname in files:
+ fpath = os.path.join(root, fname)
+ # check against the handler ignore patterns and the
+ # global FAM ignore list
+ if (not any(fname.endswith("." + i) for i in ignore) and
+ not any(fnmatch(fpath, p)
+ for p in Bcfg2.Options.setup.ignore_files) and
+ not any(fnmatch(c, p)
+ for p in Bcfg2.Options.setup.ignore_files
+ for c in self._list_path_components(fpath))):
+ all_files.add(fpath)
+
+ # next, get a list of all files known to Cfg
+ cfg_files = set()
+ for root, eset in cfg.entries.items():
+ cfg_files.update(os.path.join(cfg.data, root.lstrip("/"), fname)
+ for fname in eset.entries.keys())
+
+ # finally, compare the two
+ unknown_files = all_files - cfg_files
+ extra_files = cfg_files - all_files
+ if unknown_files:
+ self.LintError(
+ "unknown-cfg-files",
+ "Files on the filesystem could not be understood by Cfg: %s" %
+ "; ".join(unknown_files))
+ if extra_files:
+ self.LintError(
+ "extra-cfg-files",
+ "Cfg has entries for files that do not exist on the "
+ "filesystem: %s\nThis is probably a bug." %
+ "; ".join(extra_files))
diff --git a/src/lib/Bcfg2/Server/Lint/Comments.py b/src/lib/Bcfg2/Server/Lint/Comments.py
index f028e225e..fbe84de87 100644
--- a/src/lib/Bcfg2/Server/Lint/Comments.py
+++ b/src/lib/Bcfg2/Server/Lint/Comments.py
@@ -2,12 +2,14 @@
import os
import lxml.etree
+import Bcfg2.Options
import Bcfg2.Server.Lint
from Bcfg2.Server import XI_NAMESPACE
from Bcfg2.Server.Plugins.Cfg.CfgPlaintextGenerator \
import CfgPlaintextGenerator
from Bcfg2.Server.Plugins.Cfg.CfgGenshiGenerator import CfgGenshiGenerator
from Bcfg2.Server.Plugins.Cfg.CfgCheetahGenerator import CfgCheetahGenerator
+from Bcfg2.Server.Plugins.Cfg.CfgJinja2Generator import CfgJinja2Generator
from Bcfg2.Server.Plugins.Cfg.CfgInfoXML import CfgInfoXML
@@ -16,6 +18,97 @@ class Comments(Bcfg2.Server.Lint.ServerPlugin):
give information about the files. For instance, you can require
SVN keywords in a comment, or require the name of the maintainer
of a Genshi template, and so on. """
+
+ options = Bcfg2.Server.Lint.ServerPlugin.options + [
+ Bcfg2.Options.Option(
+ cf=("Comments", "global_keywords"),
+ type=Bcfg2.Options.Types.comma_list, default=[],
+ help="Required keywords for all file types"),
+ Bcfg2.Options.Option(
+ cf=("Comments", "global_comments"),
+ type=Bcfg2.Options.Types.comma_list, default=[],
+ help="Required comments for all file types"),
+ Bcfg2.Options.Option(
+ cf=("Comments", "bundler_keywords"),
+ type=Bcfg2.Options.Types.comma_list, default=[],
+ help="Required keywords for non-templated bundles"),
+ Bcfg2.Options.Option(
+ cf=("Comments", "bundler_comments"),
+ type=Bcfg2.Options.Types.comma_list, default=[],
+ help="Required comments for non-templated bundles"),
+ Bcfg2.Options.Option(
+ cf=("Comments", "genshibundler_keywords"),
+ type=Bcfg2.Options.Types.comma_list, default=[],
+ help="Required keywords for templated bundles"),
+ Bcfg2.Options.Option(
+ cf=("Comments", "genshibundler_comments"),
+ type=Bcfg2.Options.Types.comma_list, default=[],
+ help="Required comments for templated bundles"),
+ Bcfg2.Options.Option(
+ cf=("Comments", "properties_keywords"),
+ type=Bcfg2.Options.Types.comma_list, default=[],
+ help="Required keywords for Properties files"),
+ Bcfg2.Options.Option(
+ cf=("Comments", "properties_comments"),
+ type=Bcfg2.Options.Types.comma_list, default=[],
+ help="Required comments for Properties files"),
+ Bcfg2.Options.Option(
+ cf=("Comments", "cfg_keywords"),
+ type=Bcfg2.Options.Types.comma_list, default=[],
+ help="Required keywords for non-templated Cfg files"),
+ Bcfg2.Options.Option(
+ cf=("Comments", "cfg_comments"),
+ type=Bcfg2.Options.Types.comma_list, default=[],
+ help="Required comments for non-templated Cfg files"),
+ Bcfg2.Options.Option(
+ cf=("Comments", "genshi_keywords"),
+ type=Bcfg2.Options.Types.comma_list, default=[],
+ help="Required keywords for Genshi-templated Cfg files"),
+ Bcfg2.Options.Option(
+ cf=("Comments", "genshi_comments"),
+ type=Bcfg2.Options.Types.comma_list, default=[],
+ help="Required comments for Genshi-templated Cfg files"),
+ Bcfg2.Options.Option(
+ cf=("Comments", "cheetah_keywords"),
+ type=Bcfg2.Options.Types.comma_list, default=[],
+ help="Required keywords for Cheetah-templated Cfg files"),
+ Bcfg2.Options.Option(
+ cf=("Comments", "cheetah_comments"),
+ type=Bcfg2.Options.Types.comma_list, default=[],
+ help="Required comments for Cheetah-templated Cfg files"),
+ Bcfg2.Options.Option(
+ cf=("Comments", "jinja2_keywords"),
+ type=Bcfg2.Options.Types.comma_list, default=[],
+ help="Required keywords for Jinja2-templated Cfg files"),
+ Bcfg2.Options.Option(
+ cf=("Comments", "jinja2_comments"),
+ type=Bcfg2.Options.Types.comma_list, default=[],
+ help="Required comments for Jinja2-templated Cfg files"),
+ Bcfg2.Options.Option(
+ cf=("Comments", "infoxml_keywords"),
+ type=Bcfg2.Options.Types.comma_list, default=[],
+ help="Required keywords for info.xml files"),
+ Bcfg2.Options.Option(
+ cf=("Comments", "infoxml_comments"),
+ type=Bcfg2.Options.Types.comma_list, default=[],
+ help="Required comments for info.xml files"),
+ Bcfg2.Options.Option(
+ cf=("Comments", "probes_keywords"),
+ type=Bcfg2.Options.Types.comma_list, default=[],
+ help="Required keywords for probes"),
+ Bcfg2.Options.Option(
+ cf=("Comments", "probes_comments"),
+ type=Bcfg2.Options.Types.comma_list, default=[],
+ help="Required comments for probes"),
+ Bcfg2.Options.Option(
+ cf=("Comments", "metadata_keywords"),
+ type=Bcfg2.Options.Types.comma_list, default=[],
+ help="Required keywords for metadata files"),
+ Bcfg2.Options.Option(
+ cf=("Comments", "metadata_comments"),
+ type=Bcfg2.Options.Types.comma_list, default=[],
+ help="Required comments for metadata files")]
+
def __init__(self, *args, **kwargs):
Bcfg2.Server.Lint.ServerPlugin.__init__(self, *args, **kwargs)
self.config_cache = {}
@@ -73,17 +166,14 @@ class Comments(Bcfg2.Server.Lint.ServerPlugin):
if rtype not in self.config_cache[itype]:
rv = []
- global_item = "global_%ss" % itype
- if global_item in self.config:
- rv.extend(self.config[global_item].split(","))
-
- item = "%s_%ss" % (rtype.lower(), itype)
- if item in self.config:
- if self.config[item]:
- rv.extend(self.config[item].split(","))
- else:
- # config explicitly specifies nothing
- rv = []
+ rv.extend(getattr(Bcfg2.Options.setup, "global_%ss" % itype))
+ local_reqs = getattr(Bcfg2.Options.setup,
+ "%s_%ss" % (rtype.lower(), itype))
+ if local_reqs == ['']:
+ # explicitly specified as empty
+ rv = []
+ else:
+ rv.extend(local_reqs)
self.config_cache[itype][rtype] = rv
return self.config_cache[itype][rtype]
@@ -162,9 +252,11 @@ class Comments(Bcfg2.Server.Lint.ServerPlugin):
rtype = "cfg"
elif isinstance(entry, CfgCheetahGenerator):
rtype = "cheetah"
+ elif isinstance(entry, CfgJinja2Generator):
+ rtype = "jinja2"
elif isinstance(entry, CfgInfoXML):
self.check_xml(entry.infoxml.name,
- entry.infoxml.pnode.data,
+ entry.infoxml.xdata,
"infoxml")
continue
if rtype:
diff --git a/src/lib/Bcfg2/Server/Lint/Crypto.py b/src/lib/Bcfg2/Server/Lint/Crypto.py
new file mode 100644
index 000000000..53a54031c
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Lint/Crypto.py
@@ -0,0 +1,61 @@
+""" Check for data that claims to be encrypted, but is not. """
+
+import os
+import lxml.etree
+import Bcfg2.Options
+from Bcfg2.Server.Lint import ServerlessPlugin
+from Bcfg2.Server.Encryption import is_encrypted
+
+
+class Crypto(ServerlessPlugin):
+ """ Check for templated scripts or executables. """
+
+ def Run(self):
+ if os.path.exists(os.path.join(Bcfg2.Options.setup.repository, "Cfg")):
+ self.check_cfg()
+ if os.path.exists(os.path.join(Bcfg2.Options.setup.repository,
+ "Properties")):
+ self.check_properties()
+ # TODO: check all XML files
+
+ @classmethod
+ def Errors(cls):
+ return {"unencrypted-cfg": "error",
+ "empty-encrypted-properties": "error",
+ "unencrypted-properties": "error"}
+
+ def check_cfg(self):
+ """ Check for Cfg files that end in .crypt but aren't encrypted """
+ for root, _, files in os.walk(
+ os.path.join(Bcfg2.Options.setup.repository, "Cfg")):
+ for fname in files:
+ fpath = os.path.join(root, fname)
+ if self.HandlesFile(fpath) and fname.endswith(".crypt"):
+ if not is_encrypted(open(fpath).read()):
+ self.LintError(
+ "unencrypted-cfg",
+ "%s is a .crypt file, but it is not encrypted" %
+ fpath)
+
+ def check_properties(self):
+ """ Check for Properties data that has an ``encrypted`` attribute but
+ aren't encrypted """
+ for root, _, files in os.walk(
+ os.path.join(Bcfg2.Options.setup.repository, "Properties")):
+ for fname in files:
+ fpath = os.path.join(root, fname)
+ if self.HandlesFile(fpath) and fname.endswith(".xml"):
+ xdata = lxml.etree.parse(fpath)
+ for elt in xdata.xpath('//*[@encrypted]'):
+ if not elt.text:
+ self.LintError(
+ "empty-encrypted-properties",
+ "Element in %s has an 'encrypted' attribute, "
+ "but no text content: %s" %
+ (fpath, self.RenderXML(elt)))
+ elif not is_encrypted(elt.text):
+ self.LintError(
+ "unencrypted-properties",
+ "Element in %s has an 'encrypted' attribute, "
+ "but is not encrypted: %s" %
+ (fpath, self.RenderXML(elt)))
diff --git a/src/lib/Bcfg2/Server/Lint/Genshi.py b/src/lib/Bcfg2/Server/Lint/Genshi.py
index 1ecb6da42..a2581e70b 100755..100644
--- a/src/lib/Bcfg2/Server/Lint/Genshi.py
+++ b/src/lib/Bcfg2/Server/Lint/Genshi.py
@@ -4,7 +4,6 @@ import sys
import Bcfg2.Server.Lint
from genshi.template import TemplateLoader, NewTextTemplate, MarkupTemplate, \
TemplateSyntaxError
-from Bcfg2.Server.Plugins.Bundler import BundleTemplateFile
from Bcfg2.Server.Plugins.Cfg.CfgGenshiGenerator import CfgGenshiGenerator
@@ -14,60 +13,41 @@ class Genshi(Bcfg2.Server.Lint.ServerPlugin):
def Run(self):
if 'Cfg' in self.core.plugins:
self.check_cfg()
- if 'TGenshi' in self.core.plugins:
- self.check_tgenshi()
if 'Bundler' in self.core.plugins:
self.check_bundler()
@classmethod
def Errors(cls):
- return {"genshi-syntax-error": "error"}
+ return {"genshi-syntax-error": "error",
+ "unknown-genshi-error": "error"}
+
+ def check_template(self, loader, fname, cls=None):
+ """ Generic check for all genshi templates (XML and text) """
+ try:
+ loader.load(fname, cls=cls)
+ except TemplateSyntaxError:
+ err = sys.exc_info()[1]
+ self.LintError("genshi-syntax-error",
+ "Genshi syntax error in %s: %s" % (fname, err))
+ except:
+ err = sys.exc_info()[1]
+ self.LintError("unknown-genshi-error",
+ "Unknown Genshi error in %s: %s" % (fname, err))
def check_cfg(self):
""" Check genshi templates in Cfg for syntax errors. """
for entryset in self.core.plugins['Cfg'].entries.values():
for entry in entryset.entries.values():
if (self.HandlesFile(entry.name) and
- isinstance(entry, CfgGenshiGenerator) and
- not entry.template):
- try:
- entry.loader.load(entry.name,
- cls=NewTextTemplate)
- except TemplateSyntaxError:
- err = sys.exc_info()[1]
- self.LintError("genshi-syntax-error",
- "Genshi syntax error: %s" % err)
- except:
- etype, err = sys.exc_info()[:2]
- self.LintError(
- "genshi-syntax-error",
- "Unexpected Genshi error on %s: %s: %s" %
- (entry.name, etype.__name__, err))
-
- def check_tgenshi(self):
- """ Check templates in TGenshi for syntax errors. """
- loader = TemplateLoader()
-
- for eset in self.core.plugins['TGenshi'].entries.values():
- for fname, sdata in list(eset.entries.items()):
- if self.HandlesFile(fname):
- try:
- loader.load(sdata.name, cls=NewTextTemplate)
- except TemplateSyntaxError:
- err = sys.exc_info()[1]
- self.LintError("genshi-syntax-error",
- "Genshi syntax error: %s" % err)
+ isinstance(entry, CfgGenshiGenerator) and
+ not entry.template):
+ self.check_template(entry.loader, entry.name,
+ cls=NewTextTemplate)
def check_bundler(self):
""" Check templates in Bundler for syntax errors. """
loader = TemplateLoader()
-
for entry in self.core.plugins['Bundler'].entries.values():
if (self.HandlesFile(entry.name) and
- isinstance(entry, BundleTemplateFile)):
- try:
- loader.load(entry.name, cls=MarkupTemplate)
- except TemplateSyntaxError:
- err = sys.exc_info()[1]
- self.LintError("genshi-syntax-error",
- "Genshi syntax error: %s" % err)
+ entry.template is not None):
+ self.check_template(loader, entry.name, cls=MarkupTemplate)
diff --git a/src/lib/Bcfg2/Server/Lint/GroupNames.py b/src/lib/Bcfg2/Server/Lint/GroupNames.py
index b180083d5..e28080300 100644
--- a/src/lib/Bcfg2/Server/Lint/GroupNames.py
+++ b/src/lib/Bcfg2/Server/Lint/GroupNames.py
@@ -3,11 +3,6 @@
import os
import re
import Bcfg2.Server.Lint
-try:
- from Bcfg2.Server.Plugins.Bundler import BundleTemplateFile
- HAS_GENSHI = True
-except ImportError:
- HAS_GENSHI = False
class GroupNames(Bcfg2.Server.Lint.ServerPlugin):
@@ -44,14 +39,13 @@ class GroupNames(Bcfg2.Server.Lint.ServerPlugin):
continue
xdata = rules.pnode.data
self.check_entries(xdata.xpath("//Group"),
- os.path.join(self.config['repo'], rules.name))
+ os.path.join(Bcfg2.Options.setup.repository,
+ rules.name))
def check_bundles(self):
""" Check groups used in the Bundler plugin for validity. """
for bundle in self.core.plugins['Bundler'].entries.values():
- if (self.HandlesFile(bundle.name) and
- (not HAS_GENSHI or
- not isinstance(bundle, BundleTemplateFile))):
+ if self.HandlesFile(bundle.name) and bundle.template is None:
self.check_entries(bundle.xdata.xpath("//Group"),
bundle.name)
@@ -59,7 +53,7 @@ class GroupNames(Bcfg2.Server.Lint.ServerPlugin):
""" Check groups used or declared in the Metadata plugin for
validity. """
self.check_entries(self.metadata.groups_xml.xdata.xpath("//Group"),
- os.path.join(self.config['repo'],
+ os.path.join(Bcfg2.Options.setup.repository,
self.metadata.groups_xml.name))
def check_grouppatterns(self):
diff --git a/src/lib/Bcfg2/Server/Lint/GroupPatterns.py b/src/lib/Bcfg2/Server/Lint/GroupPatterns.py
new file mode 100644
index 000000000..e9813b7e9
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Lint/GroupPatterns.py
@@ -0,0 +1,44 @@
+""" ``bcfg2-lint`` plugin for :ref:`GroupPatterns
+<server-plugins-grouping-grouppatterns>` """
+
+import sys
+
+from Bcfg2.Server.Lint import ServerPlugin
+from Bcfg2.Server.Plugins.GroupPatterns import PatternMap, \
+ PatternInitializationError
+
+
+class GroupPatterns(ServerPlugin):
+ """ ``bcfg2-lint`` plugin to check all given :ref:`GroupPatterns
+ <server-plugins-grouping-grouppatterns>` patterns for validity.
+ This is simply done by trying to create a
+ :class:`Bcfg2.Server.Plugins.GroupPatterns.PatternMap` object for
+ each pattern, and catching exceptions and presenting them as
+ ``bcfg2-lint`` errors."""
+ __serverplugin__ = 'GroupPatterns'
+
+ def Run(self):
+ cfg = self.core.plugins['GroupPatterns'].config
+ for entry in cfg.xdata.xpath('//GroupPattern'):
+ groups = [g.text for g in entry.findall('Group')]
+ self.check(entry, groups, ptype='NamePattern')
+ self.check(entry, groups, ptype='NameRange')
+
+ @classmethod
+ def Errors(cls):
+ return {"pattern-fails-to-initialize": "error"}
+
+ def check(self, entry, groups, ptype="NamePattern"):
+ """ Check a single pattern for validity """
+ for el in entry.findall(ptype):
+ pat = el.text
+ try:
+ if ptype == "NamePattern":
+ PatternMap(pat, None, groups)
+ else:
+ PatternMap(None, pat, groups)
+ except PatternInitializationError:
+ err = sys.exc_info()[1]
+ self.LintError("pattern-fails-to-initialize",
+ "Failed to initialize %s %s for %s: %s" %
+ (ptype, pat, entry.get('pattern'), err))
diff --git a/src/lib/Bcfg2/Server/Lint/InfoXML.py b/src/lib/Bcfg2/Server/Lint/InfoXML.py
index 95657317e..950a86f01 100644
--- a/src/lib/Bcfg2/Server/Lint/InfoXML.py
+++ b/src/lib/Bcfg2/Server/Lint/InfoXML.py
@@ -4,7 +4,6 @@ import os
import Bcfg2.Options
import Bcfg2.Server.Lint
from Bcfg2.Server.Plugins.Cfg.CfgInfoXML import CfgInfoXML
-from Bcfg2.Server.Plugins.Cfg.CfgLegacyInfo import CfgLegacyInfo
class InfoXML(Bcfg2.Server.Lint.ServerPlugin):
@@ -16,6 +15,16 @@ class InfoXML(Bcfg2.Server.Lint.ServerPlugin):
* Paranoid mode disabled in an ``info.xml`` file;
* Required attributes missing from ``info.xml``
"""
+ __serverplugin__ = 'Cfg'
+
+ options = Bcfg2.Server.Lint.ServerPlugin.options + [
+ Bcfg2.Options.Common.default_paranoid,
+ Bcfg2.Options.Option(
+ cf=("InfoXML", "required_attrs"),
+ type=Bcfg2.Options.Types.comma_list,
+ default=["owner", "group", "mode"],
+ help="Attributes to require on <Info> tags")]
+
def Run(self):
if 'Cfg' not in self.core.plugins:
return
@@ -27,25 +36,15 @@ class InfoXML(Bcfg2.Server.Lint.ServerPlugin):
for entry in entryset.entries.values():
if isinstance(entry, CfgInfoXML):
self.check_infoxml(infoxml_fname,
- entry.infoxml.pnode.data)
+ entry.infoxml.xdata)
found = True
if not found:
self.LintError("no-infoxml",
"No info.xml found for %s" % filename)
- for entry in entryset.entries.values():
- if isinstance(entry, CfgLegacyInfo):
- if not self.HandlesFile(entry.path):
- continue
- self.LintError("deprecated-info-file",
- "Deprecated %s file found at %s" %
- (os.path.basename(entry.name),
- entry.path))
-
@classmethod
def Errors(cls):
return {"no-infoxml": "warning",
- "deprecated-info-file": "warning",
"paranoid-false": "warning",
"required-infoxml-attrs-missing": "error"}
@@ -53,8 +52,7 @@ class InfoXML(Bcfg2.Server.Lint.ServerPlugin):
""" Verify that info.xml contains everything it should. """
for info in xdata.getroottree().findall("//Info"):
required = []
- if "required_attrs" in self.config:
- required = self.config["required_attrs"].split(",")
+ required = Bcfg2.Options.setup.required_attrs
missing = [attr for attr in required if info.get(attr) is None]
if missing:
@@ -63,10 +61,10 @@ class InfoXML(Bcfg2.Server.Lint.ServerPlugin):
(",".join(missing), fname,
self.RenderXML(info)))
- if ((Bcfg2.Options.MDATA_PARANOID.value and
+ if ((Bcfg2.Options.setup.default_paranoid == "true" and
info.get("paranoid") is not None and
info.get("paranoid").lower() == "false") or
- (not Bcfg2.Options.MDATA_PARANOID.value and
+ (Bcfg2.Options.setup.default_paranoid == "false" and
(info.get("paranoid") is None or
info.get("paranoid").lower() != "true"))):
self.LintError("paranoid-false",
diff --git a/src/lib/Bcfg2/Server/Lint/Jinja2.py b/src/lib/Bcfg2/Server/Lint/Jinja2.py
new file mode 100644
index 000000000..333249cc2
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Lint/Jinja2.py
@@ -0,0 +1,41 @@
+""" Check Jinja2 templates for syntax errors. """
+
+import sys
+import Bcfg2.Server.Lint
+from jinja2 import Template, TemplateSyntaxError
+from Bcfg2.Server.Plugins.Cfg.CfgJinja2Generator import CfgJinja2Generator
+
+
+class Jinja2(Bcfg2.Server.Lint.ServerPlugin):
+ """ Check Jinja2 templates for syntax errors. """
+
+ def Run(self):
+ if 'Cfg' in self.core.plugins:
+ self.check_cfg()
+
+ @classmethod
+ def Errors(cls):
+ return {"jinja2-syntax-error": "error",
+ "unknown-jinja2-error": "error"}
+
+ def check_template(self, entry):
+ """ Generic check for all jinja2 templates """
+ try:
+ Template(entry.data.decode(entry.encoding))
+ except TemplateSyntaxError:
+ err = sys.exc_info()[1]
+ self.LintError("jinja2-syntax-error",
+ "Jinja2 syntax error in %s: %s" % (entry.name, err))
+ except:
+ err = sys.exc_info()[1]
+ self.LintError("unknown-jinja2-error",
+ "Unknown Jinja2 error in %s: %s" % (entry.name,
+ err))
+
+ def check_cfg(self):
+ """ Check jinja2 templates in Cfg for syntax errors. """
+ for entryset in self.core.plugins['Cfg'].entries.values():
+ for entry in entryset.entries.values():
+ if (self.HandlesFile(entry.name) and
+ isinstance(entry, CfgJinja2Generator)):
+ self.check_template(entry)
diff --git a/src/lib/Bcfg2/Server/Lint/MergeFiles.py b/src/lib/Bcfg2/Server/Lint/MergeFiles.py
index 2419c3d43..8e6a926ae 100644
--- a/src/lib/Bcfg2/Server/Lint/MergeFiles.py
+++ b/src/lib/Bcfg2/Server/Lint/MergeFiles.py
@@ -8,9 +8,24 @@ import Bcfg2.Server.Lint
from Bcfg2.Server.Plugins.Cfg import CfgGenerator
+def threshold(val):
+ """ Option type processor to accept either a percentage (e.g.,
+ "threshold=75") or a ratio (e.g., "threshold=.75") """
+ rv = float(val)
+ if rv > 1:
+ rv /= 100
+ return rv
+
+
class MergeFiles(Bcfg2.Server.Lint.ServerPlugin):
""" find Probes or Cfg files with multiple similar files that
might be merged into one """
+
+ options = Bcfg2.Server.Lint.ServerPlugin.options + [
+ Bcfg2.Options.Option(
+ cf=("MergeFiles", "threshold"), default="0.75", type=threshold,
+ help="The threshold at which to suggest merging files and probes")]
+
def Run(self):
if 'Cfg' in self.core.plugins:
self.check_cfg()
@@ -20,14 +35,25 @@ class MergeFiles(Bcfg2.Server.Lint.ServerPlugin):
@classmethod
def Errors(cls):
return {"merge-cfg": "warning",
- "merge-probes": "warning"}
+ "identical-cfg": "error",
+ "merge-probes": "warning",
+ "identical-probes": "error"}
def check_cfg(self):
""" check Cfg for similar files """
+ # ignore non-specific Cfg entries, e.g., privkey.xml
+ ignore = []
+ for hdlr in Bcfg2.Options.setup.cfg_handlers:
+ if not hdlr.__specific__:
+ ignore.extend(hdlr.__basenames__)
+
for filename, entryset in self.core.plugins['Cfg'].entries.items():
candidates = dict([(f, e) for f, e in entryset.entries.items()
- if isinstance(e, CfgGenerator)])
- for mset in self.get_similar(candidates):
+ if (isinstance(e, CfgGenerator) and
+ f not in ignore and
+ not f.endswith(".crypt"))])
+ similar, identical = self.get_similar(candidates)
+ for mset in similar:
self.LintError("merge-cfg",
"The following files are similar: %s. "
"Consider merging them into a single Genshi "
@@ -35,54 +61,69 @@ class MergeFiles(Bcfg2.Server.Lint.ServerPlugin):
", ".join([os.path.join(filename, p)
for p in mset]))
+ for mset in identical:
+ self.LintError("identical-cfg",
+ "The following files are identical: %s. "
+ "Strongly consider merging them into a single "
+ "Genshi template." %
+ ", ".join([os.path.join(filename, p)
+ for p in mset]))
+
def check_probes(self):
""" check Probes for similar files """
probes = self.core.plugins['Probes'].probes.entries
- for mset in self.get_similar(probes):
+ similar, identical = self.get_similar(probes)
+ for mset in similar:
self.LintError("merge-probes",
"The following probes are similar: %s. "
"Consider merging them into a single probe." %
", ".join([p for p in mset]))
+ for mset in identical:
+ self.LintError("identical-probes",
+ "The following probes are identical: %s. "
+ "Strongly consider merging them into a single "
+ "probe." %
+ ", ".join([p for p in mset]))
def get_similar(self, entries):
""" Get a list of similar files from the entry dict. Return
value is a list of lists, each of which gives the filenames of
similar files """
- if "threshold" in self.config:
- # accept threshold either as a percent (e.g., "threshold=75") or
- # as a ratio (e.g., "threshold=.75")
- threshold = float(self.config['threshold'])
- if threshold > 1:
- threshold /= 100
- else:
- threshold = 0.75
- rv = []
+ similar = []
+ identical = []
elist = list(entries.items())
while elist:
- result = self._find_similar(elist.pop(0), copy.copy(elist),
- threshold)
- if len(result) > 1:
- elist = [(fname, fdata)
- for fname, fdata in elist
- if fname not in result]
- rv.append(result)
- return rv
+ rv = self._find_similar(elist.pop(0), copy.copy(elist))
+ if rv[0]:
+ similar.append(rv[0])
+ if rv[1]:
+ identical.append(rv[1])
+ elist = [(fname, fdata)
+ for fname, fdata in elist
+ if fname not in rv[0] | rv[1]]
+ return similar, identical
- def _find_similar(self, ftuple, others, threshold):
+ def _find_similar(self, ftuple, others):
""" Find files similar to the one described by ftupe in the
list of other files. ftuple is a tuple of (filename, data);
others is a list of such tuples. threshold is a float between
0 and 1 that describes how similar two files much be to rate
as 'similar' """
fname, fdata = ftuple
- rv = [fname]
- while others:
- cname, cdata = others.pop(0)
+ similar = set()
+ identical = set()
+ for cname, cdata in others:
seqmatch = SequenceMatcher(None, fdata.data, cdata.data)
# perform progressively more expensive comparisons
- if (seqmatch.real_quick_ratio() > threshold and
- seqmatch.quick_ratio() > threshold and
- seqmatch.ratio() > threshold):
- rv.extend(self._find_similar((cname, cdata), copy.copy(others),
- threshold))
- return rv
+ if seqmatch.real_quick_ratio() == 1.0:
+ identical.add(cname)
+ elif (
+ seqmatch.real_quick_ratio() > Bcfg2.Options.setup.threshold and
+ seqmatch.quick_ratio() > Bcfg2.Options.setup.threshold and
+ seqmatch.ratio() > Bcfg2.Options.setup.threshold):
+ similar.add(cname)
+ if similar:
+ similar.add(fname)
+ if identical:
+ identical.add(fname)
+ return (similar, identical)
diff --git a/src/lib/Bcfg2/Server/Lint/Metadata.py b/src/lib/Bcfg2/Server/Lint/Metadata.py
new file mode 100644
index 000000000..e445892d1
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Lint/Metadata.py
@@ -0,0 +1,172 @@
+""" ``bcfg2-lint`` plugin for :ref:`Metadata
+<server-plugins-grouping-metadata>` """
+
+from Bcfg2.Server.Lint import ServerPlugin
+
+
+class Metadata(ServerPlugin):
+ """ ``bcfg2-lint`` plugin for :ref:`Metadata
+ <server-plugins-grouping-metadata>`. This checks for several things:
+
+ * ``<Client>`` tags nested inside other ``<Client>`` tags;
+ * Deprecated options (like ``location="floating"``);
+ * Profiles that don't exist, or that aren't profile groups;
+ * Groups or clients that are defined multiple times;
+ * Multiple default groups or a default group that isn't a profile
+ group.
+ """
+ __serverplugin__ = 'Metadata'
+
+ def Run(self):
+ self.nested_clients()
+ self.deprecated_options()
+ self.bogus_profiles()
+ self.duplicate_groups()
+ self.duplicate_default_groups()
+ self.duplicate_clients()
+ self.default_is_profile()
+
+ @classmethod
+ def Errors(cls):
+ return {"nested-client-tags": "warning",
+ "deprecated-clients-options": "warning",
+ "nonexistent-profile-group": "error",
+ "non-profile-set-as-profile": "error",
+ "duplicate-group": "error",
+ "duplicate-client": "error",
+ "multiple-default-groups": "error",
+ "default-is-not-profile": "error"}
+
+ def deprecated_options(self):
+ """ Check for the ``location='floating'`` option, which has
+ been deprecated in favor of ``floating='true'``. """
+ if not hasattr(self.metadata, "clients_xml"):
+ # using metadata database
+ return
+ clientdata = self.metadata.clients_xml.xdata
+ for el in clientdata.xpath("//Client"):
+ loc = el.get("location")
+ if loc:
+ if loc == "floating":
+ floating = True
+ else:
+ floating = False
+ self.LintError("deprecated-clients-options",
+ "The location='%s' option is deprecated. "
+ "Please use floating='%s' instead:\n%s" %
+ (loc, floating, self.RenderXML(el)))
+
+ def nested_clients(self):
+ """ Check for a ``<Client/>`` tag inside a ``<Client/>`` tag,
+ which is either redundant or will never match. """
+ groupdata = self.metadata.groups_xml.xdata
+ for el in groupdata.xpath("//Client//Client"):
+ self.LintError("nested-client-tags",
+ "Client %s nested within Client tag: %s" %
+ (el.get("name"), self.RenderXML(el)))
+
+ def bogus_profiles(self):
+ """ Check for clients that have profiles that are either not
+ flagged as profile groups in ``groups.xml``, or don't exist. """
+ if not hasattr(self.metadata, "clients_xml"):
+ # using metadata database
+ return
+ for client in self.metadata.clients_xml.xdata.findall('.//Client'):
+ profile = client.get("profile")
+ if profile not in self.metadata.groups:
+ self.LintError("nonexistent-profile-group",
+ "%s has nonexistent profile group %s:\n%s" %
+ (client.get("name"), profile,
+ self.RenderXML(client)))
+ elif not self.metadata.groups[profile].is_profile:
+ self.LintError("non-profile-set-as-profile",
+ "%s is set as profile for %s, but %s is not a "
+ "profile group:\n%s" %
+ (profile, client.get("name"), profile,
+ self.RenderXML(client)))
+
+ def duplicate_default_groups(self):
+ """ Check for multiple default groups. """
+ defaults = []
+ for grp in self.metadata.groups_xml.xdata.xpath("//Groups/Group") + \
+ self.metadata.groups_xml.xdata.xpath("//Groups/Group//Group"):
+ if grp.get("default", "false").lower() == "true":
+ defaults.append(self.RenderXML(grp))
+ if len(defaults) > 1:
+ self.LintError("multiple-default-groups",
+ "Multiple default groups defined:\n%s" %
+ "\n".join(defaults))
+
+ def duplicate_clients(self):
+ """ Check for clients that are defined more than once. """
+ if not hasattr(self.metadata, "clients_xml"):
+ # using metadata database
+ return
+ self.duplicate_entries(
+ self.metadata.clients_xml.xdata.xpath("//Client"),
+ "client")
+
+ def duplicate_groups(self):
+ """ Check for groups that are defined more than once. There
+ are two ways this can happen:
+
+ 1. The group is listed twice with contradictory options.
+ 2. The group is listed with no options *first*, and then with
+ options later.
+
+ In this context, 'first' refers to the order in which groups
+ are parsed; see the loop condition below and
+ _handle_groups_xml_event above for details. """
+ groups = dict()
+ duplicates = dict()
+ for grp in self.metadata.groups_xml.xdata.xpath("//Groups/Group") + \
+ self.metadata.groups_xml.xdata.xpath("//Groups/Group//Group"):
+ grpname = grp.get("name")
+ if grpname in duplicates:
+ duplicates[grpname].append(grp)
+ elif len(grp.attrib) > 1: # group has options
+ if grpname in groups:
+ duplicates[grpname] = [grp, groups[grpname]]
+ else:
+ groups[grpname] = grp
+ else: # group has no options
+ groups[grpname] = grp
+ for grpname, grps in duplicates.items():
+ self.LintError("duplicate-group",
+ "Group %s is defined multiple times:\n%s" %
+ (grpname,
+ "\n".join(self.RenderXML(g) for g in grps)))
+
+ def duplicate_entries(self, allentries, etype):
+ """ Generic duplicate entry finder.
+
+ :param allentries: A list of all entries to check for
+ duplicates.
+ :type allentries: list of lxml.etree._Element
+ :param etype: The entry type. This will be used to determine
+ the error name (``duplicate-<etype>``) and for
+ display to the end user.
+ :type etype: string
+ """
+ entries = dict()
+ for el in allentries:
+ if el.get("name") in entries:
+ entries[el.get("name")].append(self.RenderXML(el))
+ else:
+ entries[el.get("name")] = [self.RenderXML(el)]
+ for ename, els in entries.items():
+ if len(els) > 1:
+ self.LintError("duplicate-%s" % etype,
+ "%s %s is defined multiple times:\n%s" %
+ (etype.title(), ename, "\n".join(els)))
+
+ def default_is_profile(self):
+ """ Ensure that the default group is a profile group. """
+ if (self.metadata.default and
+ not self.metadata.groups[self.metadata.default].is_profile):
+ xdata = \
+ self.metadata.groups_xml.xdata.xpath("//Group[@name='%s']" %
+ self.metadata.default)[0]
+ self.LintError("default-is-not-profile",
+ "Default group is not a profile group:\n%s" %
+ self.RenderXML(xdata))
diff --git a/src/lib/Bcfg2/Server/Lint/Pkgmgr.py b/src/lib/Bcfg2/Server/Lint/Pkgmgr.py
new file mode 100644
index 000000000..eed6d4c19
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Lint/Pkgmgr.py
@@ -0,0 +1,50 @@
+""" ``bcfg2-lint`` plugin for :ref:`Pkgmgr
+<server-plugins-generators-pkgmgr>` """
+
+import os
+import glob
+import lxml.etree
+import Bcfg2.Options
+from Bcfg2.Server.Lint import ServerlessPlugin
+
+
+class Pkgmgr(ServerlessPlugin):
+ """ Find duplicate :ref:`Pkgmgr
+ <server-plugins-generators-pkgmgr>` entries with the same
+ priority. """
+ __serverplugin__ = 'Pkgmgr'
+
+ def Run(self):
+ pset = set()
+ for pfile in glob.glob(os.path.join(Bcfg2.Options.setup.repository,
+ 'Pkgmgr', '*.xml')):
+ if self.HandlesFile(pfile):
+ xdata = lxml.etree.parse(pfile).getroot()
+ # get priority, type, group
+ priority = xdata.get('priority')
+ ptype = xdata.get('type')
+ for pkg in xdata.xpath("//Package"):
+ if pkg.getparent().tag == 'Group':
+ grp = pkg.getparent().get('name')
+ if (type(grp) is not str and
+ grp.getparent().tag == 'Group'):
+ pgrp = grp.getparent().get('name')
+ else:
+ pgrp = 'none'
+ else:
+ grp = 'none'
+ pgrp = 'none'
+ ptuple = (pkg.get('name'), priority, ptype, grp, pgrp)
+ # check if package is already listed with same
+ # priority, type, grp
+ if ptuple in pset:
+ self.LintError(
+ "duplicate-package",
+ "Duplicate Package %s, priority:%s, type:%s" %
+ (pkg.get('name'), priority, ptype))
+ else:
+ pset.add(ptuple)
+
+ @classmethod
+ def Errors(cls):
+ return {"duplicate-packages": "error"}
diff --git a/src/lib/Bcfg2/Server/Lint/RequiredAttrs.py b/src/lib/Bcfg2/Server/Lint/RequiredAttrs.py
index ce8b237b9..ebf4c4954 100644
--- a/src/lib/Bcfg2/Server/Lint/RequiredAttrs.py
+++ b/src/lib/Bcfg2/Server/Lint/RequiredAttrs.py
@@ -3,16 +3,10 @@ verified with an XML schema alone. """
import os
import re
-import lxml.etree
import Bcfg2.Server.Lint
import Bcfg2.Client.Tools.VCS
from Bcfg2.Server.Plugins.Packages import Apt, Yum
from Bcfg2.Client.Tools.POSIX.base import device_map
-try:
- from Bcfg2.Server.Plugins.Bundler import BundleTemplateFile
- HAS_GENSHI = True
-except ImportError:
- HAS_GENSHI = False
# format verifying functions. TODO: These should be moved into XML
@@ -162,7 +156,7 @@ class RequiredAttrs(Bcfg2.Server.Lint.ServerPlugin):
for source in self.core.plugins['Packages'].sources:
if isinstance(source, Yum.YumSource):
if (not source.pulp_id and not source.url and
- not source.rawurl):
+ not source.rawurl):
self.LintError(
"required-attrs-missing",
"A %s source must have either a url, rawurl, or "
@@ -176,7 +170,7 @@ class RequiredAttrs(Bcfg2.Server.Lint.ServerPlugin):
(source.ptype, self.RenderXML(source.xsource)))
if (not isinstance(source, Apt.AptSource) and
- source.recommended):
+ source.recommended):
self.LintError(
"extra-attrs",
"The recommended attribute is not supported on %s sources:"
@@ -191,32 +185,34 @@ class RequiredAttrs(Bcfg2.Server.Lint.ServerPlugin):
for rules in self.core.plugins['Rules'].entries.values():
xdata = rules.pnode.data
for path in xdata.xpath("//Path"):
- self.check_entry(path, os.path.join(self.config['repo'],
- rules.name))
+ self.check_entry(path,
+ os.path.join(Bcfg2.Options.setup.repository,
+ rules.name))
def check_bundles(self):
- """ Check bundles for BoundPath entries with missing
+ """ Check bundles for BoundPath and BoundPackage entries with missing
attrs. """
if 'Bundler' not in self.core.plugins:
return
for bundle in self.core.plugins['Bundler'].entries.values():
- if (self.HandlesFile(bundle.name) and
- (not HAS_GENSHI or
- not isinstance(bundle, BundleTemplateFile))):
- try:
- xdata = lxml.etree.XML(bundle.data)
- except (lxml.etree.XMLSyntaxError, AttributeError):
- xdata = \
- lxml.etree.parse(bundle.template.filepath).getroot()
-
- for path in \
- xdata.xpath("//*[substring(name(), 1, 5) = 'Bound']"):
+ if self.HandlesFile(bundle.name) and bundle.template is None:
+ for path in bundle.xdata.xpath(
+ "//*[substring(name(), 1, 5) = 'Bound']"):
self.check_entry(path, bundle.name)
+ # ensure that abstract Path tags have either name
+ # or glob specified
+ for path in bundle.xdata.xpath("//Path"):
+ if ('name' not in path.attrib and
+ 'glob' not in path.attrib):
+ self.LintError(
+ "required-attrs-missing",
+ "Path tags require either a 'name' or 'glob' "
+ "attribute: \n%s" % self.RenderXML(path))
# ensure that abstract Package tags have either name
# or group specified
- for package in xdata.xpath("//Package"):
+ for package in bundle.xdata.xpath("//Package"):
if ('name' not in package.attrib and
'group' not in package.attrib):
self.LintError(
@@ -272,7 +268,7 @@ class RequiredAttrs(Bcfg2.Server.Lint.ServerPlugin):
fmt = required_attrs['__text__']
del required_attrs['__text__']
if (not entry.text and
- not entry.get('empty', 'false').lower() == 'true'):
+ not entry.get('empty', 'false').lower() == 'true'):
self.LintError("required-attrs-missing",
"Text missing for %s %s in %s: %s" %
(tag, name, filename,
diff --git a/src/lib/Bcfg2/Server/Lint/TemplateAbuse.py b/src/lib/Bcfg2/Server/Lint/TemplateAbuse.py
index fca9d14a9..a437c1318 100644
--- a/src/lib/Bcfg2/Server/Lint/TemplateAbuse.py
+++ b/src/lib/Bcfg2/Server/Lint/TemplateAbuse.py
@@ -4,20 +4,24 @@ import os
import stat
import Bcfg2.Server.Lint
from Bcfg2.Compat import any # pylint: disable=W0622
-from Bcfg2.Server.Plugin import DEFAULT_FILE_METADATA
+from Bcfg2.Server.Plugin import default_path_metadata
from Bcfg2.Server.Plugins.Cfg.CfgInfoXML import CfgInfoXML
from Bcfg2.Server.Plugins.Cfg.CfgGenshiGenerator import CfgGenshiGenerator
from Bcfg2.Server.Plugins.Cfg.CfgCheetahGenerator import CfgCheetahGenerator
+from Bcfg2.Server.Plugins.Cfg.CfgJinja2Generator import CfgJinja2Generator
from Bcfg2.Server.Plugins.Cfg.CfgEncryptedGenshiGenerator import \
CfgEncryptedGenshiGenerator
from Bcfg2.Server.Plugins.Cfg.CfgEncryptedCheetahGenerator import \
CfgEncryptedCheetahGenerator
+from Bcfg2.Server.Plugins.Cfg.CfgEncryptedJinja2Generator import \
+ CfgEncryptedJinja2Generator
class TemplateAbuse(Bcfg2.Server.Lint.ServerPlugin):
""" Check for templated scripts or executables. """
- templates = [CfgGenshiGenerator, CfgCheetahGenerator,
- CfgEncryptedGenshiGenerator, CfgEncryptedCheetahGenerator]
+ templates = [CfgGenshiGenerator, CfgCheetahGenerator, CfgJinja2Generator,
+ CfgEncryptedGenshiGenerator, CfgEncryptedCheetahGenerator,
+ CfgEncryptedJinja2Generator]
extensions = [".pl", ".py", ".sh", ".rb"]
def Run(self):
@@ -58,10 +62,11 @@ class TemplateAbuse(Bcfg2.Server.Lint.ServerPlugin):
# finally, check for executable permissions in info.xml
for entry in entryset.entries.values():
if isinstance(entry, CfgInfoXML):
- for pinfo in entry.infoxml.pnode.data.xpath("//FileInfo"):
+ for pinfo in entry.infoxml.xdata.xpath("//FileInfo/Info"):
try:
- mode = int(pinfo.get("mode",
- DEFAULT_FILE_METADATA['mode']), 8)
+ mode = int(
+ pinfo.get("mode",
+ default_path_metadata()['mode']), 8)
except ValueError:
# LintError will be produced by RequiredAttrs plugin
self.logger.warning("Non-octal mode: %s" % mode)
diff --git a/src/lib/Bcfg2/Server/Lint/TemplateHelper.py b/src/lib/Bcfg2/Server/Lint/TemplateHelper.py
new file mode 100644
index 000000000..9d05516f1
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Lint/TemplateHelper.py
@@ -0,0 +1,97 @@
+""" ``bcfg2-lint`` plugin for :ref:`TemplateHelper
+<server-plugins-connectors-templatehelper>` """
+
+import sys
+import imp
+from Bcfg2.Server.Lint import ServerPlugin
+from Bcfg2.Server.Plugins.TemplateHelper import HelperModule, MODULE_RE, \
+ safe_module_name
+
+
+class TemplateHelper(ServerPlugin):
+ """ ``bcfg2-lint`` plugin to ensure that all :ref:`TemplateHelper
+ <server-plugins-connectors-templatehelper>` modules are valid.
+ This can check for:
+
+ * A TemplateHelper module that cannot be imported due to syntax or
+ other compile-time errors;
+ * A TemplateHelper module that does not have an ``__export__``
+ attribute, or whose ``__export__`` is not a list;
+ * Bogus symbols listed in ``__export__``, including symbols that
+ don't exist, that are reserved, or that start with underscores.
+ """
+ __serverplugin__ = 'TemplateHelper'
+
+ def __init__(self, *args, **kwargs):
+ ServerPlugin.__init__(self, *args, **kwargs)
+ # we instantiate a dummy helper to discover which keywords and
+ # defaults are reserved
+ dummy = HelperModule("foo.py")
+ self.reserved_keywords = dir(dummy)
+ self.reserved_defaults = dummy.reserved_defaults
+
+ def Run(self):
+ for helper in self.core.plugins['TemplateHelper'].entries.values():
+ if self.HandlesFile(helper.name):
+ self.check_helper(helper.name)
+
+ def check_helper(self, helper):
+ """ Check a single helper module.
+
+ :param helper: The filename of the helper module
+ :type helper: string
+ """
+ module_name = MODULE_RE.search(helper).group(1)
+
+ try:
+ module = imp.load_source(safe_module_name(module_name), helper)
+ except: # pylint: disable=W0702
+ err = sys.exc_info()[1]
+ self.LintError("templatehelper-import-error",
+ "Failed to import %s: %s" %
+ (helper, err))
+ return
+
+ if not hasattr(module, "__export__"):
+ self.LintError("templatehelper-no-export",
+ "%s has no __export__ list" % helper)
+ return
+ elif not isinstance(module.__export__, list):
+ self.LintError("templatehelper-nonlist-export",
+ "__export__ is not a list in %s" % helper)
+ return
+
+ for sym in module.__export__:
+ if not hasattr(module, sym):
+ self.LintError("templatehelper-nonexistent-export",
+ "%s: exported symbol %s does not exist" %
+ (helper, sym))
+ elif sym in self.reserved_keywords:
+ self.LintError("templatehelper-reserved-export",
+ "%s: exported symbol %s is reserved" %
+ (helper, sym))
+ elif sym.startswith("_"):
+ self.LintError("templatehelper-underscore-export",
+ "%s: exported symbol %s starts with underscore"
+ % (helper, sym))
+ if sym in getattr(module, "__default__", []):
+ self.LintError("templatehelper-export-and-default",
+ "%s: %s is listed in both __default__ and "
+ "__export__" % (helper, sym))
+
+ for sym in getattr(module, "__default__", []):
+ if sym in self.reserved_defaults:
+ self.LintError("templatehelper-reserved-default",
+ "%s: default symbol %s is reserved" %
+ (helper, sym))
+
+ @classmethod
+ def Errors(cls):
+ return {"templatehelper-import-error": "error",
+ "templatehelper-no-export": "error",
+ "templatehelper-nonlist-export": "error",
+ "templatehelper-nonexistent-export": "error",
+ "templatehelper-reserved-export": "error",
+ "templatehelper-reserved-default": "error",
+ "templatehelper-underscore-export": "warning",
+ "templatehelper-export-and-default": "warning"}
diff --git a/src/lib/Bcfg2/Server/Lint/Validate.py b/src/lib/Bcfg2/Server/Lint/Validate.py
index 1e33ec398..d6f18afbb 100644
--- a/src/lib/Bcfg2/Server/Lint/Validate.py
+++ b/src/lib/Bcfg2/Server/Lint/Validate.py
@@ -9,15 +9,22 @@ import os
import sys
import lxml.etree
-from subprocess import Popen, PIPE, STDOUT
+import Bcfg2.Options
import Bcfg2.Server.Lint
+from Bcfg2.Utils import Executor
class Validate(Bcfg2.Server.Lint.ServerlessPlugin):
""" Ensure that all XML files in the Bcfg2 repository validate
according to their respective schemas. """
+ options = Bcfg2.Server.Lint.ServerlessPlugin.options + [
+ Bcfg2.Options.PathOption(
+ "--schema", cf=("Validate", "schema"),
+ default="/usr/share/bcfg2/schemas",
+ help="The full path to the XML schema files")]
+
def __init__(self, *args, **kwargs):
Bcfg2.Server.Lint.ServerlessPlugin.__init__(self, *args, **kwargs)
@@ -36,14 +43,14 @@ class Validate(Bcfg2.Server.Lint.ServerlessPlugin):
"Cfg/**/pubkey.xml": "pubkey.xsd",
"Cfg/**/authorizedkeys.xml": "authorizedkeys.xsd",
"Cfg/**/authorized_keys.xml": "authorizedkeys.xsd",
+ "Cfg/**/sslcert.xml": "sslca-cert.xsd",
+ "Cfg/**/sslkey.xml": "sslca-key.xsd",
"SSHbase/**/info.xml": "info.xsd",
- "SSLCA/**/info.xml": "info.xsd",
"TGenshi/**/info.xml": "info.xsd",
"TCheetah/**/info.xml": "info.xsd",
"Bundler/*.xml": "bundle.xsd",
"Bundler/*.genshi": "bundle.xsd",
"Pkgmgr/*.xml": "pkglist.xsd",
- "Base/*.xml": "base.xsd",
"Rules/*.xml": "rules.xsd",
"Defaults/*.xml": "defaults.xsd",
"etc/report-configuration.xml": "report-configuration.xsd",
@@ -54,16 +61,14 @@ class Validate(Bcfg2.Server.Lint.ServerlessPlugin):
"AWSTags/config.xml": "awstags.xsd",
"NagiosGen/config.xml": "nagiosgen.xsd",
"FileProbes/config.xml": "fileprobes.xsd",
- "SSLCA/**/cert.xml": "sslca-cert.xsd",
- "SSLCA/**/key.xml": "sslca-key.xsd",
"GroupLogic/groups.xml": "grouplogic.xsd"
}
self.filelists = {}
self.get_filelists()
+ self.cmd = Executor()
def Run(self):
- schemadir = self.config['schema']
for path, schemaname in self.filesets.items():
try:
@@ -73,7 +78,8 @@ class Validate(Bcfg2.Server.Lint.ServerlessPlugin):
if filelist:
# avoid loading schemas for empty file lists
- schemafile = os.path.join(schemadir, schemaname)
+ schemafile = os.path.join(Bcfg2.Options.setup.schema,
+ schemaname)
schema = self._load_schema(schemafile)
if schema:
for filename in filelist:
@@ -122,11 +128,10 @@ class Validate(Bcfg2.Server.Lint.ServerlessPlugin):
if self.files is None:
cmd.append("--xinclude")
cmd.append(filename)
- lint = Popen(cmd, stdout=PIPE, stderr=STDOUT)
+ result = self.cmd.run(cmd)
self.LintError("xml-failed-to-parse",
- "%s fails to parse:\n%s" % (filename,
- lint.communicate()[0]))
- lint.wait()
+ "%s fails to parse:\n%s" %
+ (filename, result.stdout + result.stderr))
return False
except IOError:
self.LintError("xml-failed-to-read",
@@ -188,14 +193,11 @@ class Validate(Bcfg2.Server.Lint.ServerlessPlugin):
if self.files is None:
cmd.append("--xinclude")
cmd.extend(["--noout", "--schema", schemafile, filename])
- lint = Popen(cmd, stdout=PIPE, stderr=STDOUT)
- output = lint.communicate()[0]
- # py3k fix
- if not isinstance(output, str):
- output = output.decode('utf-8')
- if lint.wait():
+ result = self.cmd.run(cmd)
+ if not result.success:
self.LintError("xml-failed-to-verify",
- "%s fails to verify:\n%s" % (filename, output))
+ "%s fails to verify:\n%s" %
+ (filename, result.stdout + result.stderr))
return False
return True
@@ -215,9 +217,9 @@ class Validate(Bcfg2.Server.Lint.ServerlessPlugin):
else: # self.files is None
fpath, fname = path.split('/**/')
self.filelists[path] = []
- for root, _, files in \
- os.walk(os.path.join(self.config['repo'],
- fpath)):
+ for root, _, files in os.walk(
+ os.path.join(Bcfg2.Options.setup.repository,
+ fpath)):
self.filelists[path].extend([os.path.join(root, f)
for f in files
if f == fname])
diff --git a/src/lib/Bcfg2/Server/Lint/ValidateJSON.py b/src/lib/Bcfg2/Server/Lint/ValidateJSON.py
index bdbe6a271..f7cf5d549 100644
--- a/src/lib/Bcfg2/Server/Lint/ValidateJSON.py
+++ b/src/lib/Bcfg2/Server/Lint/ValidateJSON.py
@@ -57,9 +57,9 @@ class ValidateJSON(Bcfg2.Server.Lint.ServerlessPlugin):
rv.extend(self.list_matching_files(path))
else: # self.files is None
fpath, fname = path.split('/**/')
- for root, _, files in \
- os.walk(os.path.join(self.config['repo'],
- fpath)):
+ for root, _, files in os.walk(
+ os.path.join(Bcfg2.Options.setup.repository,
+ fpath)):
rv.extend([os.path.join(root, f)
for f in files if f == fname])
else:
diff --git a/src/lib/Bcfg2/Server/Lint/__init__.py b/src/lib/Bcfg2/Server/Lint/__init__.py
index ae2b81a61..873e5f149 100644
--- a/src/lib/Bcfg2/Server/Lint/__init__.py
+++ b/src/lib/Bcfg2/Server/Lint/__init__.py
@@ -1,22 +1,25 @@
""" Base classes for Lint plugins and error handling """
-import os
+import copy
+import fcntl
import fnmatch
import glob
-import sys
import logging
-from copy import copy
+import os
+import struct
+import sys
+import termios
import textwrap
import time
import lxml.etree
-import fcntl
-import termios
-import struct
-from Bcfg2.Compat import walk_packages
-plugins = [m[1] for m in walk_packages(path=__path__)] # pylint: disable=C0103
+import Bcfg2.Options
+import Bcfg2.Server.Core
+import Bcfg2.Server.Plugins
+from Bcfg2.Compat import walk_packages
+from Bcfg2.Options import _debug
def _ioctl_GWINSZ(fd): # pylint: disable=C0103
@@ -24,7 +27,7 @@ def _ioctl_GWINSZ(fd): # pylint: disable=C0103
from the given file descriptor """
try:
return struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
- except: # pylint: disable=W0702
+ except (IOError, struct.error):
return None
@@ -38,7 +41,7 @@ def get_termsize():
fd = os.open(os.ctermid(), os.O_RDONLY)
dims = _ioctl_GWINSZ(fd)
os.close(fd)
- except: # pylint: disable=W0702
+ except IOError:
pass
if not dims:
try:
@@ -51,10 +54,15 @@ def get_termsize():
class Plugin(object):
""" Base class for all bcfg2-lint plugins """
- def __init__(self, config, errorhandler=None, files=None):
+ #: Name of the matching server plugin or None if there is no
+ #: matching one. If this is None the lint plugin will only loaded
+ #: by default if the matching server plugin is enabled, too.
+ __serverplugin__ = None
+
+ options = [Bcfg2.Options.Common.repository]
+
+ def __init__(self, errorhandler=None, files=None):
"""
- :param config: A :mod:`Bcfg2.Options` setup dict
- :type config: dict
:param errorhandler: A :class:`Bcfg2.Server.Lint.ErrorHandler`
that will be used to handle lint errors.
If one is not provided, a new one will be
@@ -68,9 +76,6 @@ class Plugin(object):
#: The list of files that bcfg2-lint should be run against
self.files = files
- #: The Bcfg2.Options setup dict
- self.config = config
-
self.logger = logging.getLogger('bcfg2-lint')
if errorhandler is None:
#: The error handler
@@ -101,9 +106,10 @@ class Plugin(object):
False otherwise. """
return (self.files is None or
fname in self.files or
- os.path.join(self.config['repo'], fname) in self.files or
+ os.path.join(Bcfg2.Options.setup.repository,
+ fname) in self.files or
os.path.abspath(fname) in self.files or
- os.path.abspath(os.path.join(self.config['repo'],
+ os.path.abspath(os.path.join(Bcfg2.Options.setup.repository,
fname)) in self.files)
def LintError(self, err, msg):
@@ -130,7 +136,7 @@ class Plugin(object):
"""
xml = None
if len(element) or element.text:
- el = copy(element)
+ el = copy.copy(element)
if el.text and not keep_text:
el.text = '...'
for child in el.iterchildren():
@@ -149,7 +155,8 @@ class Plugin(object):
if self.files is not None:
return fnmatch.filter(self.files, os.path.join('*', path))
else:
- return glob.glob(os.path.join(self.config['repo'], path))
+ return glob.glob(os.path.join(Bcfg2.Options.setup.repository,
+ path))
class ErrorHandler(object):
@@ -157,8 +164,8 @@ class ErrorHandler(object):
def __init__(self, errors=None):
"""
- :param config: An initial dict of errors to register
- :type config: dict
+ :param errors: An initial dict of errors to register
+ :type errors: dict
"""
#: The number of errors passed to this error handler
self.errors = 0
@@ -279,12 +286,10 @@ class ServerPlugin(Plugin): # pylint: disable=W0223
""" Base class for bcfg2-lint plugins that check things that
require the running Bcfg2 server. """
- def __init__(self, core, config, errorhandler=None, files=None):
+ def __init__(self, core, errorhandler=None, files=None):
"""
:param core: The Bcfg2 server core
:type core: Bcfg2.Server.Core.BaseCore
- :param config: A :mod:`Bcfg2.Options` setup dict
- :type config: dict
:param errorhandler: A :class:`Bcfg2.Server.Lint.ErrorHandler`
that will be used to handle lint errors.
If one is not provided, a new one will be
@@ -294,7 +299,7 @@ class ServerPlugin(Plugin): # pylint: disable=W0223
the bcfg2-lint ``--stdin`` option.)
:type files: list of strings
"""
- Plugin.__init__(self, config, errorhandler=errorhandler, files=files)
+ Plugin.__init__(self, errorhandler=errorhandler, files=files)
#: The server core
self.core = core
@@ -302,3 +307,171 @@ class ServerPlugin(Plugin): # pylint: disable=W0223
#: The metadata plugin
self.metadata = self.core.metadata
+
+
+class LintPluginAction(Bcfg2.Options.ComponentAction):
+ """ Option parser action to load lint plugins """
+ bases = ['Bcfg2.Server.Lint']
+
+
+class LintPluginOption(Bcfg2.Options.Option):
+ """ Option class for the lint_plugins """
+
+ def early_parsing_hook(self, namespace):
+ """
+ We want a usefull default for the enabled lint plugins.
+ Therfore we use all importable plugins, that either pertain
+ with enabled server plugins or that has no matching plugin.
+ """
+
+ plugins = [p.__name__ for p in namespace.plugins]
+ for loader, name, _is_pkg in walk_packages(path=__path__):
+ try:
+ module = loader.find_module(name).load_module(name)
+ plugin = getattr(module, name)
+ if plugin.__serverplugin__ is None or \
+ plugin.__serverplugin__ in plugins:
+ _debug("Automatically adding lint plugin %s" %
+ plugin.__name__)
+ self.default.append(plugin.__name__)
+ except ImportError:
+ pass
+
+
+class _EarlyOptions(object):
+ """ We need the server.plugins options in an early parsing hook
+ for determining the default value for the lint_plugins. So we
+ create a component that is parsed before the other options. """
+
+ parse_first = True
+ options = [Bcfg2.Options.Common.plugins]
+
+
+class CLI(object):
+ """ The bcfg2-lint CLI """
+ options = Bcfg2.Server.Core.Core.options + [
+ Bcfg2.Options.PathOption(
+ '--lint-config', default='/etc/bcfg2-lint.conf',
+ action=Bcfg2.Options.ConfigFileAction,
+ help='Specify bcfg2-lint configuration file'),
+ LintPluginOption(
+ "--lint-plugins", cf=('lint', 'plugins'), default=[],
+ type=Bcfg2.Options.Types.comma_list, action=LintPluginAction,
+ help='bcfg2-lint plugin list'),
+ Bcfg2.Options.BooleanOption(
+ '--list-errors', help='Show error handling'),
+ Bcfg2.Options.BooleanOption(
+ '--stdin', help='Operate on a list of files supplied on stdin'),
+ Bcfg2.Options.Option(
+ cf=("errors", '*'), dest="lint_errors",
+ help="How to handle bcfg2-lint errors")]
+
+ def __init__(self):
+ parser = Bcfg2.Options.get_parser(
+ description="Manage a running Bcfg2 server",
+ components=[self, _EarlyOptions])
+ parser.parse()
+
+ self.logger = logging.getLogger(parser.prog)
+
+ self.logger.debug("Running lint with plugins: %s" %
+ [p.__name__
+ for p in Bcfg2.Options.setup.lint_plugins])
+
+ if Bcfg2.Options.setup.stdin:
+ self.files = [s.strip() for s in sys.stdin.readlines()]
+ else:
+ self.files = None
+ self.errorhandler = self.get_errorhandler()
+ self.serverlessplugins = []
+ self.serverplugins = []
+ for plugin in Bcfg2.Options.setup.lint_plugins:
+ if issubclass(plugin, ServerPlugin):
+ self.serverplugins.append(plugin)
+ else:
+ self.serverlessplugins.append(plugin)
+
+ def run(self):
+ """ Run bcfg2-lint """
+ if Bcfg2.Options.setup.list_errors:
+ for plugin in self.serverplugins + self.serverlessplugins:
+ self.errorhandler.RegisterErrors(getattr(plugin, 'Errors')())
+
+ print("%-35s %-35s" % ("Error name", "Handler"))
+ for err, handler in self.errorhandler.errortypes.items():
+ print("%-35s %-35s" % (err, handler.__name__))
+ return 0
+
+ if not self.serverplugins and not self.serverlessplugins:
+ self.logger.error("No lint plugins loaded!")
+ return 1
+
+ self.run_serverless_plugins()
+
+ if self.serverplugins:
+ if self.errorhandler.errors:
+ # it would be swell if we could try to start the server
+ # even if there were errors with the serverless plugins,
+ # but since XML parsing errors occur in the FAM thread
+ # (not in the core server thread), there's no way we can
+ # start the server and try to catch exceptions --
+ # bcfg2-lint isn't in the same stack as the exceptions.
+ # so we're forced to assume that a serverless plugin error
+ # will prevent the server from starting
+ print("Serverless plugins encountered errors, skipping server "
+ "plugins")
+ else:
+ self.run_server_plugins()
+
+ if (self.errorhandler.errors or
+ self.errorhandler.warnings or
+ Bcfg2.Options.setup.verbose):
+ print("%d errors" % self.errorhandler.errors)
+ print("%d warnings" % self.errorhandler.warnings)
+
+ if self.errorhandler.errors:
+ return 2
+ elif self.errorhandler.warnings:
+ return 3
+ else:
+ return 0
+
+ def get_errorhandler(self):
+ """ get a Bcfg2.Server.Lint.ErrorHandler object """
+ return Bcfg2.Server.Lint.ErrorHandler(
+ errors=Bcfg2.Options.setup.lint_errors)
+
+ def run_serverless_plugins(self):
+ """ Run serverless plugins """
+ self.logger.debug("Running serverless plugins: %s" %
+ [p.__name__ for p in self.serverlessplugins])
+ for plugin in self.serverlessplugins:
+ self.logger.debug(" Running %s" % plugin.__name__)
+ plugin(files=self.files, errorhandler=self.errorhandler).Run()
+
+ def run_server_plugins(self):
+ """ run plugins that require a running server to run """
+ core = Bcfg2.Server.Core.Core()
+ try:
+ core.load_plugins()
+ core.block_for_fam_events(handle_events=True)
+ self.logger.debug("Running server plugins: %s" %
+ [p.__name__ for p in self.serverplugins])
+ for plugin in self.serverplugins:
+ self.logger.debug(" Running %s" % plugin.__name__)
+ plugin(core,
+ files=self.files, errorhandler=self.errorhandler).Run()
+ finally:
+ core.shutdown()
+
+ def _run_plugin(self, plugin, args=None):
+ """ Run a single bcfg2-lint plugin """
+ if args is None:
+ args = []
+ start = time.time()
+ # python 2.5 doesn't support mixing *magic and keyword arguments
+ kwargs = dict(files=self.files, errorhandler=self.errorhandler)
+ rv = plugin(*args, **kwargs).Run()
+ self.logger.debug(" Ran %s in %0.2f seconds" % (plugin.__name__,
+ time.time() - start))
+ return rv
diff --git a/src/lib/Bcfg2/Server/MultiprocessingCore.py b/src/lib/Bcfg2/Server/MultiprocessingCore.py
index 4986aac60..724b34d8d 100644
--- a/src/lib/Bcfg2/Server/MultiprocessingCore.py
+++ b/src/lib/Bcfg2/Server/MultiprocessingCore.py
@@ -15,32 +15,16 @@ import time
import threading
import lxml.etree
import multiprocessing
+import Bcfg2.Options
+import Bcfg2.Server.Cache
import Bcfg2.Server.Plugin
from itertools import cycle
-from Bcfg2.Cache import Cache
-from Bcfg2.Compat import Empty, wraps
-from Bcfg2.Server.Core import BaseCore, exposed
-from Bcfg2.Server.BuiltinCore import Core as BuiltinCore
+from Bcfg2.Compat import Queue, Empty, wraps
+from Bcfg2.Server.Core import Core, exposed
+from Bcfg2.Server.BuiltinCore import BuiltinCore
from multiprocessing.connection import Listener, Client
-class DispatchingCache(Cache, Bcfg2.Server.Plugin.Debuggable):
- """ Implementation of :class:`Bcfg2.Cache.Cache` that propagates
- cache expiration events to child nodes. """
-
- #: The method to send over the pipe to expire the cache
- method = "expire_metadata_cache"
-
- def __init__(self, *args, **kwargs):
- self.rpc_q = kwargs.pop("queue")
- Bcfg2.Server.Plugin.Debuggable.__init__(self)
- Cache.__init__(self, *args, **kwargs)
-
- def expire(self, key=None):
- self.rpc_q.publish(self.method, args=[key])
- Cache.expire(self, key=key)
-
-
class RPCQueue(Bcfg2.Server.Plugin.Debuggable):
""" An implementation of a :class:`multiprocessing.Queue` designed
for several additional use patterns:
@@ -148,7 +132,7 @@ class DualEvent(object):
return self._threading_event.wait(timeout=timeout)
-class ChildCore(BaseCore):
+class ChildCore(Core):
""" A child process for :class:`Bcfg2.MultiprocessingCore.Core`.
This core builds configurations from a given
:class:`multiprocessing.Pipe`. Note that this is a full-fledged
@@ -167,12 +151,10 @@ class ChildCore(BaseCore):
#: every ``poll_wait`` seconds.
poll_wait = 3.0
- def __init__(self, name, setup, rpc_q, terminate):
+ def __init__(self, name, rpc_q, terminate):
"""
:param name: The name of this child
:type name: string
- :param setup: A Bcfg2 options dict
- :type setup: Bcfg2.Options.OptionParser
:param read_q: The queue the child will read from for RPC
communications from the parent process.
:type read_q: multiprocessing.Queue
@@ -183,7 +165,7 @@ class ChildCore(BaseCore):
themselves down.
:type terminate: multiprocessing.Event
"""
- BaseCore.__init__(self, setup)
+ Core.__init__(self)
#: The name of this child
self.name = name
@@ -197,7 +179,7 @@ class ChildCore(BaseCore):
# override this setting so that the child doesn't try to write
# the pidfile
- self.setup['daemon'] = False
+ Bcfg2.Options.setup.daemon = False
# ensure that the child doesn't start a perflog thread
self.perflog_thread = None
@@ -207,12 +189,6 @@ class ChildCore(BaseCore):
def _run(self):
return True
- def _daemonize(self):
- return True
-
- def _drop_privileges(self):
- pass
-
def _dispatch(self, address, data):
""" Method dispatcher used for commands received from
the RPC queue. """
@@ -267,7 +243,7 @@ class ChildCore(BaseCore):
self.shutdown()
def shutdown(self):
- BaseCore.shutdown(self)
+ Core.shutdown(self)
self.logger.info("%s: Closing RPC command queue" % self.name)
self.rpc_q.close()
@@ -292,16 +268,9 @@ class ChildCore(BaseCore):
return rmi
@exposed
- def expire_metadata_cache(self, client=None):
- """ Expire the metadata cache for a client """
- self.metadata_cache.expire(client)
-
- @exposed
- def RecvProbeData(self, address, _):
- """ Expire the probe cache for a client """
- self.expire_caches_by_type(Bcfg2.Server.Plugin.Probing,
- key=self.resolve_client(address,
- metadata=False)[0])
+ def expire_cache(self, *tags, **kwargs):
+ """ Expire cached data """
+ Bcfg2.Server.Cache.expire(*tags, exact=kwargs.pop("exact", False))
@exposed
def GetConfig(self, client):
@@ -312,7 +281,7 @@ class ChildCore(BaseCore):
return lxml.etree.tostring(self.BuildConfiguration(client))
-class Core(BuiltinCore):
+class MultiprocessingCore(BuiltinCore):
""" A multiprocessing core that delegates building the actual
client configurations to
:class:`Bcfg2.Server.MultiprocessingCore.ChildCore` objects. The
@@ -320,14 +289,34 @@ class Core(BuiltinCore):
:func:`GetConfig` are delegated to children. All other calls are
handled by the parent process. """
+ options = BuiltinCore.options + [
+ Bcfg2.Options.Option(
+ '--children', dest="core_children",
+ cf=('server', 'children'), type=int,
+ default=multiprocessing.cpu_count(),
+ help='Spawn this number of children for the multiprocessing core')]
+
#: How long to wait for a child process to shut down cleanly
#: before it is terminated.
shutdown_timeout = 10.0
- def __init__(self, setup):
- BuiltinCore.__init__(self, setup)
- if setup['children'] is None:
- setup['children'] = multiprocessing.cpu_count()
+ def __init__(self):
+ BuiltinCore.__init__(self)
+
+ #: A dict of child name -> one end of the
+ #: :class:`multiprocessing.Pipe` object used to communicate
+ #: with that child. (The child is given the other end of the
+ #: Pipe.)
+ self.pipes = dict()
+
+ #: A queue that keeps track of which children are available to
+ #: render a configuration. A child is popped from the queue
+ #: when it starts to render a config, then it's pushed back on
+ #: when it's done. This lets us use a blocking call to
+ #: :func:`Queue.Queue.get` when waiting for an available
+ #: child.
+ self.available_children = \
+ Queue(maxsize=Bcfg2.Options.setup.core_children)
#: The flag that indicates when to stop child threads and
#: processes
@@ -337,8 +326,6 @@ class Core(BuiltinCore):
#: used to send or publish commands to children.
self.rpc_q = RPCQueue()
- self.metadata_cache = DispatchingCache(queue=self.rpc_q)
-
#: A list of children that will be cycled through
self._all_children = []
@@ -346,13 +333,22 @@ class Core(BuiltinCore):
#: to provide a round-robin distribution of render requests
self.children = None
+ def __str__(self):
+ if hasattr(Bcfg2.Options.setup, "location"):
+ return "%s(%s; %s children)" % (self.__class__.__name__,
+ Bcfg2.Options.setup.location,
+ len(self._all_children))
+ else:
+ return "%s(%s children)" % (self.__class__.__name__,
+ len(self._all_children))
+
def _run(self):
- for cnum in range(self.setup['children']):
+ for cnum in range(Bcfg2.Options.setup.core_children):
name = "Child-%s" % cnum
self.logger.debug("Starting child %s" % name)
child_q = self.rpc_q.add_subscriber(name)
- childcore = ChildCore(name, self.setup, child_q, self.terminate)
+ childcore = ChildCore(name, child_q, self.terminate)
child = multiprocessing.Process(target=childcore.run, name=name)
child.start()
self.logger.debug("Child %s started with PID %s" % (name,
@@ -361,6 +357,7 @@ class Core(BuiltinCore):
self.logger.debug("Started %s children: %s" % (len(self._all_children),
self._all_children))
self.children = cycle(self._all_children)
+ Bcfg2.Server.Cache.add_expire_hook(self.cache_dispatch)
return BuiltinCore._run(self)
def shutdown(self):
@@ -433,16 +430,11 @@ class Core(BuiltinCore):
def set_debug(self, address, debug):
self.rpc_q.set_debug(debug)
self.rpc_q.publish("set_debug", args=[address, debug])
- self.metadata_cache.set_debug(debug)
return BuiltinCore.set_debug(self, address, debug)
- @exposed
- def RecvProbeData(self, address, probedata):
- rv = BuiltinCore.RecvProbeData(self, address, probedata)
- # we don't want the children to actually process probe data,
- # so we don't send the data, just the fact that we got some.
- self.rpc_q.publish("RecvProbeData", args=[address, None])
- return rv
+ def cache_dispatch(self, tags, exact, _):
+ """ Publish cache expiration events to child nodes. """
+ self.rpc_q.publish("expire_cache", args=tags, kwargs=dict(exact=exact))
@exposed
def GetConfig(self, address):
diff --git a/src/lib/Bcfg2/Server/Plugin/__init__.py b/src/lib/Bcfg2/Server/Plugin/__init__.py
index 05bb92223..69fc90b2f 100644
--- a/src/lib/Bcfg2/Server/Plugin/__init__.py
+++ b/src/lib/Bcfg2/Server/Plugin/__init__.py
@@ -11,8 +11,40 @@ documentation it's not necessary to use the submodules. E.g., you can
from Bcfg2.Server.Plugin.base import Plugin
"""
+import Bcfg2.Options
+
# pylint: disable=W0401
from Bcfg2.Server.Plugin.base import *
from Bcfg2.Server.Plugin.interfaces import *
from Bcfg2.Server.Plugin.helpers import *
from Bcfg2.Server.Plugin.exceptions import *
+
+
+class _OptionContainer(object):
+ """ Container for plugin options that are loaded at import time
+ """
+ options = [
+ Bcfg2.Options.Common.default_paranoid,
+ Bcfg2.Options.Option(
+ cf=('mdata', 'owner'), dest="default_owner", default='root',
+ help='Default Path owner'),
+ Bcfg2.Options.Option(
+ cf=('mdata', 'group'), dest="default_group", default='root',
+ help='Default Path group'),
+ Bcfg2.Options.Option(
+ cf=('mdata', 'important'), dest="default_important",
+ default='false', choices=['true', 'false'],
+ help='Default Path priority (importance)'),
+ Bcfg2.Options.Option(
+ cf=('mdata', 'mode'), dest="default_mode", default='644',
+ help='Default mode for Path'),
+ Bcfg2.Options.Option(
+ cf=('mdata', 'secontext'), dest="default_secontext",
+ default='__default__', help='Default SELinux context'),
+ Bcfg2.Options.Option(
+ cf=('mdata', 'sensitive'), dest="default_sensitive",
+ default='false',
+ help='Default Path sensitivity setting')]
+
+
+Bcfg2.Options.get_parser().add_component(_OptionContainer)
diff --git a/src/lib/Bcfg2/Server/Plugin/base.py b/src/lib/Bcfg2/Server/Plugin/base.py
index 03feceb6f..549f7b543 100644
--- a/src/lib/Bcfg2/Server/Plugin/base.py
+++ b/src/lib/Bcfg2/Server/Plugin/base.py
@@ -1,66 +1,11 @@
"""This module provides the base class for Bcfg2 server plugins."""
import os
-import logging
+import Bcfg2.Options
+from Bcfg2.Logger import Debuggable
from Bcfg2.Utils import ClassName
-class Debuggable(object):
- """ Mixin to add a debugging interface to an object and expose it
- via XML-RPC on :class:`Bcfg2.Server.Plugin.base.Plugin` objects """
-
- #: List of names of methods to be exposed as XML-RPC functions
- __rmi__ = ['toggle_debug', 'set_debug']
-
- #: How exposed XML-RPC functions should be dispatched to child
- #: processes.
- __child_rmi__ = __rmi__[:]
-
- def __init__(self, name=None):
- """
- :param name: The name of the logger object to get. If none is
- supplied, the full name of the class (including
- module) will be used.
- :type name: string
-
- .. autoattribute:: __rmi__
- """
- if name is None:
- name = "%s.%s" % (self.__class__.__module__,
- self.__class__.__name__)
- self.debug_flag = False
- self.logger = logging.getLogger(name)
-
- def set_debug(self, debug):
- """ Explicitly enable or disable debugging. This method is exposed
- via XML-RPC.
-
- :returns: bool - The new value of the debug flag
- """
- self.debug_flag = debug
- return debug
-
- def toggle_debug(self):
- """ Turn debugging output on or off. This method is exposed
- via XML-RPC.
-
- :returns: bool - The new value of the debug flag
- """
- return self.set_debug(not self.debug_flag)
-
- def debug_log(self, message, flag=None):
- """ Log a message at the debug level.
-
- :param message: The message to log
- :type message: string
- :param flag: Override the current debug flag with this value
- :type flag: bool
- :returns: None
- """
- if (flag is None and self.debug_flag) or flag:
- self.logger.error(message)
-
-
class Plugin(Debuggable):
""" The base class for all Bcfg2 Server plugins. """
@@ -109,13 +54,10 @@ class Plugin(Debuggable):
#: but not ``__rmi__`` will be ignored.
__child_rmi__ = Debuggable.__child_rmi__
- def __init__(self, core, datastore):
+ def __init__(self, core):
"""
:param core: The Bcfg2.Server.Core initializing the plugin
:type core: Bcfg2.Server.Core
- :param datastore: The path to the Bcfg2 repository on the
- filesystem
- :type datastore: string
:raises: :exc:`OSError` if adding a file monitor failed;
:class:`Bcfg2.Server.Plugin.exceptions.PluginInitError`
on other errors
@@ -125,7 +67,7 @@ class Plugin(Debuggable):
Debuggable.__init__(self, name=self.name)
self.Entries = {}
self.core = core
- self.data = os.path.join(datastore, self.name)
+ self.data = os.path.join(Bcfg2.Options.setup.repository, self.name)
if self.create and not os.path.exists(self.data):
self.logger.warning("%s: %s does not exist, creating" %
(self.name, self.data))
diff --git a/src/lib/Bcfg2/Server/Plugin/helpers.py b/src/lib/Bcfg2/Server/Plugin/helpers.py
index 55dd255cd..5cfc8998c 100644
--- a/src/lib/Bcfg2/Server/Plugin/helpers.py
+++ b/src/lib/Bcfg2/Server/Plugin/helpers.py
@@ -4,55 +4,52 @@ import os
import re
import sys
import copy
-import time
import glob
import logging
+import genshi
import operator
import lxml.etree
import Bcfg2.Server
import Bcfg2.Options
-import Bcfg2.Statistics
+import Bcfg2.Server.FileMonitor
+from Bcfg2.Logger import Debuggable
from Bcfg2.Compat import CmpMixin, wraps
-from Bcfg2.Server.Plugin.base import Debuggable, Plugin
-from Bcfg2.Server.Plugin.interfaces import Generator
+from Bcfg2.Server.Plugin.base import Plugin
+from Bcfg2.Server.Plugin.interfaces import Generator, TemplateDataProvider
from Bcfg2.Server.Plugin.exceptions import SpecificityError, \
PluginExecutionError, PluginInitError
try:
+ import Bcfg2.Server.Encryption
+ HAS_CRYPTO = True
+except ImportError:
+ HAS_CRYPTO = False
+
+try:
import django # pylint: disable=W0611
HAS_DJANGO = True
except ImportError:
HAS_DJANGO = False
-#: A dict containing default metadata for Path entries from bcfg2.conf
-DEFAULT_FILE_METADATA = Bcfg2.Options.OptionParser(
- dict(configfile=Bcfg2.Options.CFILE,
- owner=Bcfg2.Options.MDATA_OWNER,
- group=Bcfg2.Options.MDATA_GROUP,
- mode=Bcfg2.Options.MDATA_MODE,
- secontext=Bcfg2.Options.MDATA_SECONTEXT,
- important=Bcfg2.Options.MDATA_IMPORTANT,
- paranoid=Bcfg2.Options.MDATA_PARANOID,
- sensitive=Bcfg2.Options.MDATA_SENSITIVE))
-DEFAULT_FILE_METADATA.parse([Bcfg2.Options.CFILE.cmd, Bcfg2.Options.CFILE])
-del DEFAULT_FILE_METADATA['args']
-del DEFAULT_FILE_METADATA['configfile']
-
LOGGER = logging.getLogger(__name__)
-#: a compiled regular expression for parsing info and :info files
-INFO_REGEX = re.compile(r'owner:\s*(?P<owner>\S+)|' +
- r'group:\s*(?P<group>\S+)|' +
- r'mode:\s*(?P<mode>\w+)|' +
- r'secontext:\s*(?P<secontext>\S+)|' +
- r'paranoid:\s*(?P<paranoid>\S+)|' +
- r'sensitive:\s*(?P<sensitive>\S+)|' +
- r'encoding:\s*(?P<encoding>\S+)|' +
- r'important:\s*(?P<important>\S+)|' +
- r'mtime:\s*(?P<mtime>\w+)')
+def removecomment(stream):
+ """ A Genshi filter that removes comments from the stream. This
+ function is a generator.
-def bind_info(entry, metadata, infoxml=None, default=DEFAULT_FILE_METADATA):
+ :param stream: The Genshi stream to remove comments from
+ :type stream: genshi.core.Stream
+ :returns: tuple of ``(kind, data, pos)``, as when iterating
+ through a Genshi stream
+ """
+ for kind, data, pos in stream:
+ if kind is genshi.core.COMMENT:
+ continue
+ yield kind, data, pos
+
+
+def bind_info(entry, metadata, infoxml=None, default=None):
""" Bind the file metadata in the given
:class:`Bcfg2.Server.Plugin.helpers.InfoXML` object to the given
entry.
@@ -69,6 +66,8 @@ def bind_info(entry, metadata, infoxml=None, default=DEFAULT_FILE_METADATA):
:returns: None
:raises: :class:`Bcfg2.Server.Plugin.exceptions.PluginExecutionError`
"""
+ if default is None:
+ default = default_path_metadata()
for attr, val in list(default.items()):
entry.set(attr, val)
if infoxml:
@@ -82,81 +81,126 @@ def bind_info(entry, metadata, infoxml=None, default=DEFAULT_FILE_METADATA):
entry.set(attr, val)
-class track_statistics(object): # pylint: disable=C0103
- """ Decorator that tracks execution time for the given
- :class:`Plugin` method with :mod:`Bcfg2.Statistics` for reporting
- via ``bcfg2-admin perf`` """
+def default_path_metadata():
+ """ Get the default Path entry metadata from the config.
- def __init__(self, name=None):
- """
- :param name: The name under which statistics for this function
- will be tracked. By default, the name will be
- the name of the function concatenated with the
- name of the class the function is a member of.
- :type name: string
- """
- # if this is None, it will be set later during __call_
- self.name = name
+ :returns: dict of metadata attributes and their default values
+ """
+ return dict([(k, getattr(Bcfg2.Options.setup, "default_%s" % k))
+ for k in ['owner', 'group', 'mode', 'secontext', 'important',
+ 'paranoid', 'sensitive']])
+
+
+class DefaultTemplateDataProvider(TemplateDataProvider):
+ """ A base
+ :class:`Bcfg2.Server.Plugin.interfaces.TemplateDataProvider` that
+ provides default data for text and XML templates.
+
+ Note that, since Cheetah and Genshi text templates treat the
+ ``path`` variable differently, this is overridden, by
+ :class:`Bcfg2.Server.Plugins.Cfg.CfgCheetahGenerator.DefaultCheetahDataProvider`
+ and
+ :class:`Bcfg2.Server.Plugins.Cfg.CfgGenshiGenerator.DefaultGenshiDataProvider`,
+ respectively. """
+
+ def get_template_data(self, entry, metadata, template):
+ return dict(name=entry.get('realname', entry.get('name')),
+ metadata=metadata,
+ source_path=template,
+ repo=Bcfg2.Options.setup.repository)
+
+ def get_xml_template_data(self, _, metadata):
+ return dict(metadata=metadata,
+ repo=Bcfg2.Options.setup.repository)
+
+_sentinel = object() # pylint: disable=C0103
+
+
+def _get_template_data(func_name, args, default=_sentinel):
+ """ Generic template data getter for both text and XML templates.
+
+ :param func_name: The name of the function to call on
+ :class:`Bcfg2.Server.Plugin.interfaces.TemplateDataProvider`
+ objects to get data for this template type.
+ Should be one of either ``get_template_data``
+ for text templates, or ``get_xml_template_data``
+ for XML templates.
+ :type func_name: string
+ :param args: The arguments to pass to the data retrieval function
+ :type args: list
+ :param default: An object that provides a set of base values. If
+ this is not provided, an instance of
+ :class:`Bcfg2.Server.Plugin.helpers.DefaultTemplateDataProvider`
+ is used. This can be set to None to avoid setting
+ any base values at all.
+ :type default: Bcfg2.Server.Plugin.interfaces.TemplateDataProvider
+ """
+ if default is _sentinel:
+ default = DefaultTemplateDataProvider()
+ providers = Bcfg2.Server.core.plugins_by_type(TemplateDataProvider)
+ if default is not None:
+ providers.insert(0, default)
+
+ rv = dict()
+ source = dict()
+ for prov in providers:
+ pdata = getattr(prov, func_name)(*args)
+ for key, val in pdata.items():
+ if key not in rv:
+ rv[key] = val
+ source[key] = prov
+ else:
+ LOGGER.warning("Duplicate template variable %s provided by "
+ "both %s and %s" % (key, prov, source[key]))
+ return rv
- def __call__(self, func):
- if self.name is None:
- self.name = func.__name__
- @wraps(func)
- def inner(obj, *args, **kwargs):
- """ The decorated function """
- name = "%s:%s" % (obj.__class__.__name__, self.name)
+def get_template_data(entry, metadata, template, default=_sentinel):
+ """ Get all template variables for a text (i.e., Cfg) template """
+ return _get_template_data("get_template_data", [entry, metadata, template],
+ default=default)
- start = time.time()
- try:
- return func(obj, *args, **kwargs)
- finally:
- Bcfg2.Statistics.stats.add_value(name, time.time() - start)
- return inner
+def get_xml_template_data(structfile, metadata, default=_sentinel):
+ """ Get all template variables for an XML template """
+ return _get_template_data("get_xml_template_data", [structfile, metadata],
+ default=default)
class DatabaseBacked(Plugin):
""" Provides capabilities for a plugin to read and write to a
- database.
+ database. The plugin must add an option to flag database use with
+ something like:
+
+ options = Bcfg2.Server.Plugin.Plugins.options + [
+ Bcfg2.Options.BooleanOption(
+ cf=('metadata', 'use_database'), dest="metadata_db",
+ help="Use database capabilities of the Metadata plugin")
+
+ This must be done manually due to various limitations in Python.
.. private-include: _use_db
.. private-include: _must_lock
"""
- #: The option to look up in :attr:`section` to determine whether or
- #: not to use the database capabilities of this plugin. The option
- #: is retrieved with
- #: :py:func:`ConfigParser.SafeConfigParser.getboolean`, and so must
- #: conform to the possible values that function can handle.
- option = "use_database"
-
- def __init__(self, core, datastore):
- Plugin.__init__(self, core, datastore)
- use_db = self.core.setup.cfp.getboolean(self.section,
- self.option,
- default=False)
+ def __init__(self, core):
+ Plugin.__init__(self, core)
+ use_db = getattr(Bcfg2.Options.setup, "%s_db" % self.name.lower(),
+ False)
if use_db and not HAS_DJANGO:
- raise PluginInitError("%s.%s is True but Django not found" %
- (self.section, self.option))
+ raise PluginInitError("%s is configured to use the database but "
+ "Django libraries are not found" % self.name)
elif use_db and not self.core.database_available:
- raise PluginInitError("%s.%s is True but the database is "
- "unavailable due to prior errors" %
- (self.section, self.option))
-
- def _section(self):
- """ The section to look in for :attr:`DatabaseBacked.option`
- """
- return self.name.lower()
- section = property(_section)
+ raise PluginInitError("%s is configured to use the database but "
+ "the database is unavailable due to prior "
+ "errors" % self.name)
@property
def _use_db(self):
""" Whether or not this plugin is configured to use the
database. """
- use_db = self.core.setup.cfp.getboolean(self.section,
- self.option,
- default=False)
+ use_db = getattr(Bcfg2.Options.setup, "%s_db" % self.name.lower(),
+ False)
if use_db and HAS_DJANGO and self.core.database_available:
return True
else:
@@ -167,11 +211,7 @@ class DatabaseBacked(Plugin):
""" Whether or not the backend database must acquire a thread
lock before writing, because it does not allow multiple
threads to write."""
- engine = \
- self.core.setup.cfp.get(Bcfg2.Options.DB_ENGINE.cf[0],
- Bcfg2.Options.DB_ENGINE.cf[1],
- default=Bcfg2.Options.DB_ENGINE.default)
- return engine == 'sqlite3'
+ return self._use_db and Bcfg2.Options.setup.db_engine == 'sqlite3'
@staticmethod
def get_db_lock(func):
@@ -209,13 +249,10 @@ class FileBacked(Debuggable):
principally meant to be used as a part of
:class:`Bcfg2.Server.Plugin.helpers.DirectoryBacked`. """
- def __init__(self, name, fam=None):
+ def __init__(self, name):
"""
:param name: The full path to the file to cache and monitor
:type name: string
- :param fam: The FAM object used to receive notifications of
- changes
- :type fam: Bcfg2.Server.FileMonitor.FileMonitor
"""
Debuggable.__init__(self)
@@ -226,10 +263,10 @@ class FileBacked(Debuggable):
self.name = name
#: The FAM object used to receive notifications of changes
- self.fam = fam
+ self.fam = Bcfg2.Server.FileMonitor.get_fam()
def HandleEvent(self, event=None):
- """ HandleEvent is called whenever the FAM registers an event.
+ """HandleEvent is called whenever the FAM registers an event.
:param event: The event object
:type event: Bcfg2.Server.FileMonitor.Event
@@ -239,13 +276,11 @@ class FileBacked(Debuggable):
return
try:
self.data = open(self.name).read()
- self.Index()
except IOError:
err = sys.exc_info()[1]
self.logger.error("Failed to read file %s: %s" % (self.name, err))
- except:
- err = sys.exc_info()[1]
- self.logger.error("Failed to parse file %s: %s" % (self.name, err))
+
+ self.Index()
def Index(self):
""" Index() is called by :func:`HandleEvent` every time the
@@ -279,14 +314,11 @@ class DirectoryBacked(Debuggable):
#: :attr:`patterns` or ``ignore``, then a warning will be produced.
ignore = None
- def __init__(self, data, fam):
+ def __init__(self, data):
"""
:param data: The path to the data directory that will be
monitored
:type data: string
- :param fam: The FAM object used to receive notifications of
- changes
- :type fam: Bcfg2.Server.FileMonitor.FileMonitor
.. -----
.. autoattribute:: __child__
@@ -294,7 +326,7 @@ class DirectoryBacked(Debuggable):
Debuggable.__init__(self)
self.data = os.path.normpath(data)
- self.fam = fam
+ self.fam = Bcfg2.Server.FileMonitor.get_fam()
#: self.entries contains information about the files monitored
#: by this object. The keys of the dict are the relative
@@ -366,8 +398,7 @@ class DirectoryBacked(Debuggable):
:returns: None
"""
self.entries[relative] = self.__child__(os.path.join(self.data,
- relative),
- self.fam)
+ relative))
self.entries[relative].HandleEvent(event)
def HandleEvent(self, event): # pylint: disable=R0912
@@ -492,13 +523,10 @@ class XMLFileBacked(FileBacked):
#: to the constructor.
create = None
- def __init__(self, filename, fam=None, should_monitor=False, create=None):
+ def __init__(self, filename, should_monitor=False, create=None):
"""
:param filename: The full path to the file to cache and monitor
:type filename: string
- :param fam: The FAM object used to receive notifications of
- changes
- :type fam: Bcfg2.Server.FileMonitor.FileMonitor
:param should_monitor: Whether or not to monitor this file for
changes. It may be useful to disable
monitoring when, for instance, the file
@@ -518,7 +546,7 @@ class XMLFileBacked(FileBacked):
.. -----
.. autoattribute:: __identifier__
"""
- FileBacked.__init__(self, filename, fam=fam)
+ FileBacked.__init__(self, filename)
#: The raw XML data contained in the file as an
#: :class:`lxml.etree.ElementTree` object, with XIncludes
@@ -542,7 +570,7 @@ class XMLFileBacked(FileBacked):
self.extra_monitors = []
if ((create is not None or self.create not in [None, False]) and
- not os.path.exists(self.name)):
+ not os.path.exists(self.name)):
toptag = create or self.create
self.logger.warning("%s does not exist, creating" % self.name)
if hasattr(toptag, "getroottree"):
@@ -554,7 +582,7 @@ class XMLFileBacked(FileBacked):
#: Whether or not to monitor this file for changes.
self.should_monitor = should_monitor
- if fam and should_monitor:
+ if should_monitor:
self.fam.AddMonitor(filename, self)
def _follow_xincludes(self, fname=None, xdata=None):
@@ -619,18 +647,14 @@ class XMLFileBacked(FileBacked):
Index.__doc__ = FileBacked.Index.__doc__
def add_monitor(self, fpath):
- """ Add a FAM monitor to a file that has been XIncluded. This
- is only done if the constructor got a ``fam`` object,
- regardless of whether ``should_monitor`` is set to True (i.e.,
- whether or not the base file is monitored).
+ """ Add a FAM monitor to a file that has been XIncluded.
:param fpath: The full path to the file to monitor
:type fpath: string
:returns: None
"""
self.extra_monitors.append(fpath)
- if self.fam:
- self.fam.AddMonitor(fpath, self)
+ self.fam.AddMonitor(fpath, self)
def __iter__(self):
return iter(self.entries)
@@ -642,44 +666,171 @@ class XMLFileBacked(FileBacked):
class StructFile(XMLFileBacked):
""" StructFiles are XML files that contain a set of structure file
formatting logic for handling ``<Group>`` and ``<Client>``
- tags. """
+ tags.
+
+ .. -----
+ .. autoattribute:: __identifier__
+ .. automethod:: _include_element
+ """
#: If ``__identifier__`` is not None, then it must be the name of
#: an XML attribute that will be required on the top-level tag of
#: the file being cached
__identifier__ = None
- def _include_element(self, item, metadata):
- """ determine if an XML element matches the metadata """
+ #: Whether or not to enable encryption
+ encryption = True
+
+ #: Callbacks used to determine if children of items with the given
+ #: tags should be included in the return value of
+ #: :func:`Bcfg2.Server.Plugin.helpers.StructFile.Match` and
+ #: :func:`Bcfg2.Server.Plugin.helpers.StructFile.XMLMatch`. Each
+ #: callback is passed the same arguments as
+ #: :func:`Bcfg2.Server.Plugin.helpers.StructFile._include_element`.
+ #: It should return True if children of the element should be
+ #: included in the match, False otherwise. The callback does
+ #: *not* need to consider negation; that will be handled in
+ #: :func:`Bcfg2.Server.Plugin.helpers.StructFile._include_element`
+ _include_tests = \
+ dict(Group=lambda el, md, *args: el.get('name') in md.groups,
+ Client=lambda el, md, *args: el.get('name') == md.hostname)
+
+ def __init__(self, filename, should_monitor=False, create=None):
+ XMLFileBacked.__init__(self, filename, should_monitor=should_monitor,
+ create=create)
+ self.template = None
+
+ def Index(self):
+ XMLFileBacked.Index(self)
+ if (self.name.endswith('.genshi') or
+ ('py' in self.xdata.nsmap and
+ self.xdata.nsmap['py'] == 'http://genshi.edgewall.org/')):
+ try:
+ loader = genshi.template.TemplateLoader()
+ self.template = \
+ loader.load(self.name,
+ cls=genshi.template.MarkupTemplate,
+ encoding=Bcfg2.Options.setup.encoding)
+ except LookupError:
+ err = sys.exc_info()[1]
+ self.logger.error('Genshi lookup error in %s: %s' % (self.name,
+ err))
+ except genshi.template.TemplateError:
+ err = sys.exc_info()[1]
+ self.logger.error('Genshi template error in %s: %s' %
+ (self.name, err))
+ except genshi.input.ParseError:
+ err = sys.exc_info()[1]
+ self.logger.error('Genshi parse error in %s: %s' % (self.name,
+ err))
+
+ if HAS_CRYPTO and self.encryption:
+ for el in self.xdata.xpath("//*[@encrypted]"):
+ try:
+ el.text = self._decrypt(el).encode('ascii',
+ 'xmlcharrefreplace')
+ except UnicodeDecodeError:
+ self.logger.info("%s: Decrypted %s to gibberish, skipping"
+ % (self.name, el.tag))
+ except Bcfg2.Server.Encryption.EVPError:
+ lax_decrypt = self.xdata.get(
+ "lax_decryption",
+ str(Bcfg2.Options.setup.lax_decryption)).lower() == \
+ "true"
+ msg = "Failed to decrypt %s element in %s" % (el.tag,
+ self.name)
+ if lax_decrypt:
+ self.logger.debug(msg)
+ else:
+ raise PluginExecutionError(msg)
+ Index.__doc__ = XMLFileBacked.Index.__doc__
+
+ def _decrypt(self, element):
+ """ Decrypt a single encrypted properties file element """
+ if not element.text or not element.text.strip():
+ return
+ passes = Bcfg2.Options.setup.passphrases
+ try:
+ passphrase = passes[element.get("encrypted")]
+ return Bcfg2.Server.Encryption.ssl_decrypt(element.text,
+ passphrase)
+ except KeyError:
+ raise Bcfg2.Server.Encryption.EVPError("No passphrase named '%s'" %
+ element.get("encrypted"))
+ raise Bcfg2.Server.Encryption.EVPError("Failed to decrypt")
+
+ def _include_element(self, item, metadata, *args):
+ """ Determine if an XML element matches the other arguments.
+
+ The first argument is always the XML element to match, and the
+ second will always be a single
+ :class:`Bcfg2.Server.Plugins.Metadata.ClientMetadata` object
+ representing the metadata to match against. Subsequent
+ arguments are as given to
+ :func:`Bcfg2.Server.Plugin.helpers.StructFile.Match` or
+ :func:`Bcfg2.Server.Plugin.helpers.StructFile.XMLMatch`. In
+ the base StructFile implementation, there are no additional
+ arguments; in classes that inherit from StructFile, see the
+ :func:`Match` and :func:`XMLMatch` method signatures."""
if isinstance(item, lxml.etree._Comment): # pylint: disable=W0212
return False
- negate = item.get('negate', 'false').lower() == 'true'
- if item.tag == 'Group':
- return negate == (item.get('name') not in metadata.groups)
- elif item.tag == 'Client':
- return negate == (item.get('name') != metadata.hostname)
+ if item.tag in self._include_tests:
+ negate = item.get('negate', 'false').lower() == 'true'
+ return negate != self._include_tests[item.tag](item, metadata,
+ *args)
else:
return True
- def _match(self, item, metadata):
- """ recursive helper for Match() """
- if self._include_element(item, metadata):
- if item.tag == 'Group' or item.tag == 'Client':
+ def _render(self, metadata):
+ """ Render the template for the given client metadata
+
+ :param metadata: Client metadata to match against.
+ :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
+ :returns: lxml.etree._Element object representing the rendered
+ XML data
+ """
+ stream = self.template.generate(
+ **get_xml_template_data(self, metadata)).filter(removecomment)
+ return lxml.etree.XML(stream.render('xml',
+ strip_whitespace=False).encode(),
+ parser=Bcfg2.Server.XMLParser)
+
+ def _match(self, item, metadata, *args):
+ """ recursive helper for
+ :func:`Bcfg2.Server.Plugin.helpers.StructFile.Match` """
+ if self._include_element(item, metadata, *args):
+ if item.tag in self._include_tests.keys():
rv = []
- if self._include_element(item, metadata):
+ if self._include_element(item, metadata, *args):
for child in item.iterchildren():
- rv.extend(self._match(child, metadata))
+ rv.extend(self._match(child, metadata, *args))
return rv
else:
rv = copy.deepcopy(item)
for child in rv.iterchildren():
rv.remove(child)
for child in item.iterchildren():
- rv.extend(self._match(child, metadata))
+ rv.extend(self._match(child, metadata, *args))
return [rv]
else:
return []
+ def _do_match(self, metadata, *args):
+ """ Helper for
+ :func:`Bcfg2.Server.Plugin.helpers.StructFile.Match` that lets
+ a subclass of StructFile easily redefine the public Match()
+ interface to accept a different number of arguments. This
+ provides a sane prototype for the Match() function while
+ keeping the internals consistent. """
+ rv = []
+ if self.template is None:
+ entries = self.entries
+ else:
+ entries = self._render(metadata).getchildren()
+ for child in entries:
+ rv.extend(self._match(child, metadata, *args))
+ return rv
+
def Match(self, metadata):
""" Return matching fragments of the data in this file. A tag
is considered to match if all ``<Group>`` and ``<Client>``
@@ -690,22 +841,22 @@ class StructFile(XMLFileBacked):
Match() (and *not* their descendents) should be considered to
match the metadata.
+ Match() returns matching fragments in document order.
+
:param metadata: Client metadata to match against.
:type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
:returns: list of lxml.etree._Element objects """
- rv = []
- for child in self.entries:
- rv.extend(self._match(child, metadata))
- return rv
+ return self._do_match(metadata)
- def _xml_match(self, item, metadata):
- """ recursive helper for XMLMatch """
- if self._include_element(item, metadata):
- if item.tag == 'Group' or item.tag == 'Client':
+ def _xml_match(self, item, metadata, *args):
+ """ recursive helper for
+ :func:`Bcfg2.Server.Plugin.helpers.StructFile.XMLMatch` """
+ if self._include_element(item, metadata, *args):
+ if item.tag in self._include_tests.keys():
for child in item.iterchildren():
item.remove(child)
item.getparent().append(child)
- self._xml_match(child, metadata)
+ self._xml_match(child, metadata, *args)
if item.text:
if item.getparent().text is None:
item.getparent().text = item.text
@@ -714,10 +865,25 @@ class StructFile(XMLFileBacked):
item.getparent().remove(item)
else:
for child in item.iterchildren():
- self._xml_match(child, metadata)
+ self._xml_match(child, metadata, *args)
else:
item.getparent().remove(item)
+ def _do_xmlmatch(self, metadata, *args):
+ """ Helper for
+ :func:`Bcfg2.Server.Plugin.helpers.StructFile.XMLMatch` that lets
+ a subclass of StructFile easily redefine the public Match()
+ interface to accept a different number of arguments. This
+ provides a sane prototype for the Match() function while
+ keeping the internals consistent. """
+ if self.template is None:
+ rv = copy.deepcopy(self.xdata)
+ else:
+ rv = self._render(metadata)
+ for child in rv.iterchildren():
+ self._xml_match(child, metadata, *args)
+ return rv
+
def XMLMatch(self, metadata):
""" Return a rebuilt XML document that only contains the
matching portions of the original file. A tag is considered
@@ -727,169 +893,58 @@ class StructFile(XMLFileBacked):
All ``<Group>`` and ``<Client>`` tags will have been stripped
out.
+ The new document produced by XMLMatch() is not necessarily in
+ the same order as the original document.
+
:param metadata: Client metadata to match against.
:type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
:returns: lxml.etree._Element """
- rv = copy.deepcopy(self.xdata)
- for child in rv.iterchildren():
- self._xml_match(child, metadata)
- return rv
-
+ return self._do_xmlmatch(metadata)
-class INode(object):
- """ INodes provide lists of things available at a particular group
- intersection. INodes are deprecated; new plugins should use
- :class:`Bcfg2.Server.Plugin.helpers.StructFile` instead. """
-
- raw = dict(
- Client="lambda m, e:'%(name)s' == m.hostname and predicate(m, e)",
- Group="lambda m, e:'%(name)s' in m.groups and predicate(m, e)")
- nraw = dict(
- Client="lambda m, e:'%(name)s' != m.hostname and predicate(m, e)",
- Group="lambda m, e:'%(name)s' not in m.groups and predicate(m, e)")
- containers = ['Group', 'Client']
- ignore = []
-
- def __init__(self, data, idict, parent=None):
- self.data = data
- self.contents = {}
- if parent is None:
- self.predicate = lambda m, e: True
- else:
- predicate = parent.predicate
- if data.get('negate', 'false').lower() == 'true':
- psrc = self.nraw
- else:
- psrc = self.raw
- if data.tag in list(psrc.keys()):
- self.predicate = eval(psrc[data.tag] %
- {'name': data.get('name')},
- {'predicate': predicate})
- else:
- raise PluginExecutionError("Unknown tag: %s" % data.tag)
- self.children = []
- self._load_children(data, idict)
- def _load_children(self, data, idict):
- """ load children """
- for item in data.getchildren():
- if item.tag in self.ignore:
- continue
- elif item.tag in self.containers:
- self.children.append(self.__class__(item, idict, self))
- else:
- try:
- self.contents[item.tag][item.get('name')] = \
- dict(item.attrib)
- except KeyError:
- self.contents[item.tag] = \
- {item.get('name'): dict(item.attrib)}
- if item.text:
- self.contents[item.tag][item.get('name')]['__text__'] = \
- item.text
- if item.getchildren():
- self.contents[item.tag][item.get('name')]['__children__'] \
- = item.getchildren()
- try:
- idict[item.tag].append(item.get('name'))
- except KeyError:
- idict[item.tag] = [item.get('name')]
-
- def Match(self, metadata, data, entry=lxml.etree.Element("None")):
- """Return a dictionary of package mappings."""
- if self.predicate(metadata, entry):
- for key in self.contents:
- try:
- data[key].update(self.contents[key])
- except: # pylint: disable=W0702
- data[key] = {}
- data[key].update(self.contents[key])
- for child in self.children:
- child.Match(metadata, data, entry=entry)
-
-
-class InfoNode (INode):
- """ :class:`Bcfg2.Server.Plugin.helpers.INode` implementation that
- includes ``<Path>`` tags, suitable for use with :file:`info.xml`
- files."""
-
- raw = dict(
- Client="lambda m, e: '%(name)s' == m.hostname and predicate(m, e)",
- Group="lambda m, e: '%(name)s' in m.groups and predicate(m, e)",
- Path="lambda m, e: ('%(name)s' == e.get('name') or " +
- "'%(name)s' == e.get('realname')) and " +
- "predicate(m, e)")
- nraw = dict(
- Client="lambda m, e: '%(name)s' != m.hostname and predicate(m, e)",
- Group="lambda m, e: '%(name)s' not in m.groups and predicate(m, e)",
- Path="lambda m, e: '%(name)s' != e.get('name') and " +
- "'%(name)s' != e.get('realname') and " +
- "predicate(m, e)")
- containers = ['Group', 'Client', 'Path']
-
-
-class XMLSrc(XMLFileBacked):
- """ XMLSrc files contain a
- :class:`Bcfg2.Server.Plugin.helpers.INode` hierarchy that returns
- matching entries. XMLSrc objects are deprecated and
- :class:`Bcfg2.Server.Plugin.helpers.StructFile` should be
- preferred where possible."""
- __node__ = INode
- __cacheobj__ = dict
- __priority_required__ = True
-
- def __init__(self, filename, fam=None, should_monitor=False, create=None):
- XMLFileBacked.__init__(self, filename, fam, should_monitor, create)
- self.items = {}
- self.cache = None
- self.pnode = None
- self.priority = -1
+class InfoXML(StructFile):
+ """ InfoXML files contain Group, Client, and Path tags to set the
+ metadata (permissions, owner, etc.) of files. """
+ encryption = False
- def HandleEvent(self, _=None):
- """Read file upon update."""
- self.items = {}
- try:
- xdata = lxml.etree.parse(self.name,
- parser=Bcfg2.Server.XMLParser).getroot()
- except lxml.etree.XMLSyntaxError:
- msg = "Failed to parse file %s: %s" % (self.name,
- sys.exc_info()[1])
- self.logger.error(msg)
- raise PluginExecutionError(msg)
- self.pnode = self.__node__(xdata, self.items)
- self.cache = None
- try:
- self.priority = int(xdata.get('priority'))
- except (ValueError, TypeError):
- if self.__priority_required__:
- msg = "Got bogus priority %s for file %s" % \
- (xdata.get('priority'), self.name)
- self.logger.error(msg)
- raise PluginExecutionError(msg)
+ _include_tests = copy.copy(StructFile._include_tests)
+ _include_tests['Path'] = lambda el, md, entry, *args: \
+ entry.get('realname', entry.get('name')) == el.get("name")
- def Cache(self, metadata):
- """Build a package dict for a given host."""
- if self.cache is None or self.cache[0] != metadata:
- cache = (metadata, self.__cacheobj__())
- if self.pnode is None:
- self.logger.error("Cache method called early for %s; "
- "forcing data load" % self.name)
- self.HandleEvent()
- return
- self.pnode.Match(metadata, cache[1])
- self.cache = cache
+ def Match(self, metadata, entry): # pylint: disable=W0221
+ """ Implementation of
+ :func:`Bcfg2.Server.Plugin.helpers.StructFile.Match` that
+ considers Path tags to allow ``info.xml`` files to set
+ different file metadata for different file paths. """
+ return self._do_match(metadata, entry)
- def __str__(self):
- return str(self.items)
+ def XMLMatch(self, metadata, entry): # pylint: disable=W0221
+ """ Implementation of
+ :func:`Bcfg2.Server.Plugin.helpers.StructFile.XMLMatch` that
+ considers Path tags to allow ``info.xml`` files to set
+ different file metadata for different file paths. """
+ return self._do_xmlmatch(metadata, entry)
+ def BindEntry(self, entry, metadata):
+ """ Bind the matching file metadata for this client and entry
+ to the entry.
-class InfoXML(XMLSrc):
- """ InfoXML files contain a
- :class:`Bcfg2.Server.Plugin.helpers.InfoNode` hierarchy that
- returns matching entries, suitable for use with :file:`info.xml`
- files."""
- __node__ = InfoNode
- __priority_required__ = False
+ :param entry: The abstract entry to bind the info to. This
+ will be modified in place
+ :type entry: lxml.etree._Element
+ :param metadata: The client metadata to get info for
+ :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
+ :returns: None
+ """
+ fileinfo = self.Match(metadata, entry)
+ if len(fileinfo) == 0:
+ raise PluginExecutionError("No metadata found in %s for %s" %
+ (self.name, entry.get('name')))
+ elif len(fileinfo) > 1:
+ self.logger.warning("Multiple file metadata found in %s for %s" %
+ (self.name, entry.get('name')))
+ for attr, val in fileinfo[0].attrib.items():
+ entry.set(attr, val)
class XMLDirectoryBacked(DirectoryBacked):
@@ -905,6 +960,25 @@ class XMLDirectoryBacked(DirectoryBacked):
__child__ = XMLFileBacked
+class PriorityStructFile(StructFile):
+ """ A StructFile where each file has a priority, given as a
+ top-level XML attribute. """
+
+ def __init__(self, filename, should_monitor=False):
+ StructFile.__init__(self, filename, should_monitor=should_monitor)
+ self.priority = -1
+ __init__.__doc__ = StructFile.__init__.__doc__
+
+ def Index(self):
+ StructFile.Index(self)
+ try:
+ self.priority = int(self.xdata.get('priority'))
+ except (ValueError, TypeError):
+ raise PluginExecutionError("Got bogus priority %s for file %s" %
+ (self.xdata.get('priority'), self.name))
+ Index.__doc__ = StructFile.Index.__doc__
+
+
class PrioDir(Plugin, Generator, XMLDirectoryBacked):
""" PrioDir handles a directory of XML files where each file has a
set priority.
@@ -915,42 +989,43 @@ class PrioDir(Plugin, Generator, XMLDirectoryBacked):
#: The type of child objects to create for files contained within
#: the directory that is tracked. Default is
- #: :class:`Bcfg2.Server.Plugin.helpers.XMLSrc`
- __child__ = XMLSrc
+ #: :class:`Bcfg2.Server.Plugin.helpers.PriorityStructFile`
+ __child__ = PriorityStructFile
- def __init__(self, core, datastore):
- Plugin.__init__(self, core, datastore)
+ def __init__(self, core):
+ Plugin.__init__(self, core)
Generator.__init__(self)
- XMLDirectoryBacked.__init__(self, self.data, self.core.fam)
+ XMLDirectoryBacked.__init__(self, self.data)
__init__.__doc__ = Plugin.__init__.__doc__
def HandleEvent(self, event):
XMLDirectoryBacked.HandleEvent(self, event)
self.Entries = {}
- for src in list(self.entries.values()):
- for itype, children in list(src.items.items()):
- for child in children:
- try:
- self.Entries[itype][child] = self.BindEntry
- except KeyError:
- self.Entries[itype] = {child: self.BindEntry}
+ for src in self.entries.values():
+ for child in src.xdata.iterchildren():
+ if child.tag in ['Group', 'Client']:
+ continue
+ if child.tag not in self.Entries:
+ self.Entries[child.tag] = dict()
+ self.Entries[child.tag][child.get("name")] = self.BindEntry
HandleEvent.__doc__ = XMLDirectoryBacked.HandleEvent.__doc__
- def _matches(self, entry, metadata, rules): # pylint: disable=W0613
- """ Whether or not a given entry has a matching entry in this
- PrioDir. By default this does strict matching (i.e., the
- entry name is in ``rules.keys()``), but this can be overridden
- to provide regex matching, etc.
+ def _matches(self, entry, metadata, candidate): # pylint: disable=W0613
+ """ Whether or not a given candidate matches the abstract
+ entry given. By default this does strict matching (i.e., the
+ entry name matches the candidate name), but this can be
+ overridden to provide regex matching, etc.
:param entry: The entry to find a match for
:type entry: lxml.etree._Element
:param metadata: The metadata to get attributes for
:type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
- :rules: A dict of rules to look in for a matching rule
- :type rules: dict
+ :candidate: A candidate concrete entry to match with
+ :type candidate: lxml.etree._Element
:returns: bool
"""
- return entry.get('name') in rules
+ return (entry.tag == candidate.tag and
+ entry.get('name') == candidate.get('name'))
def BindEntry(self, entry, metadata):
""" Bind the attributes that apply to an entry to it. The
@@ -962,71 +1037,40 @@ class PrioDir(Plugin, Generator, XMLDirectoryBacked):
:type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
:returns: None
"""
- attrs = self.get_attrs(entry, metadata)
- for key, val in list(attrs.items()):
- entry.attrib[key] = val
-
- def get_attrs(self, entry, metadata):
- """ Get a list of attributes to add to the entry during the
- bind. This is a complex method, in that it both modifies the
- entry, and returns attributes that need to be added to the
- entry. That seems sub-optimal, and should probably be changed
- at some point. Namely:
-
- * The return value includes all XML attributes that need to be
- added to the entry, but it does not add them.
- * If text contents or child tags need to be added to the
- entry, they are added to the entry in place.
-
- :param entry: The entry to add attributes to.
- :type entry: lxml.etree._Element
- :param metadata: The metadata to get attributes for
- :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
- :returns: dict of <attr name>:<attr value>
- :raises: :class:`Bcfg2.Server.Plugin.exceptions.PluginExecutionError`
- """
+ matching = []
for src in self.entries.values():
- src.Cache(metadata)
-
- matching = [src for src in list(self.entries.values())
- if (src.cache and
- entry.tag in src.cache[1] and
- self._matches(entry, metadata,
- src.cache[1][entry.tag]))]
+ for candidate in src.XMLMatch(metadata).xpath("//%s" % entry.tag):
+ if self._matches(entry, metadata, candidate):
+ matching.append((src, candidate))
if len(matching) == 0:
raise PluginExecutionError("No matching source for entry when "
- "retrieving attributes for %s(%s)" %
- (entry.tag, entry.attrib.get('name')))
+ "retrieving attributes for %s:%s" %
+ (entry.tag, entry.get('name')))
elif len(matching) == 1:
- index = 0
+ data = matching[0][1]
else:
- prio = [int(src.priority) for src in matching]
- if prio.count(max(prio)) > 1:
- msg = "Found conflicting sources with same priority for " + \
- "%s:%s for %s" % (entry.tag, entry.get("name"),
- metadata.hostname)
+ prio = [int(m[0].priority) for m in matching]
+ priority = max(prio)
+ if prio.count(priority) > 1:
+ msg = "Found conflicting sources with same priority (%s) " \
+ "for %s:%s for %s" % (priority, entry.tag,
+ entry.get("name"), metadata.hostname)
self.logger.error(msg)
- self.logger.error([item.name for item in matching])
- self.logger.error("Priority was %s" % max(prio))
+ self.logger.error([m[0].name for m in matching])
raise PluginExecutionError(msg)
- index = prio.index(max(prio))
- for rname in list(matching[index].cache[1][entry.tag].keys()):
- if self._matches(entry, metadata, [rname]):
- data = matching[index].cache[1][entry.tag][rname]
- break
- else:
- # Fall back on __getitem__. Required if override used
- data = matching[index].cache[1][entry.tag][entry.get('name')]
- if '__text__' in data:
- entry.text = data['__text__']
- if '__children__' in data:
- for item in data['__children__']:
- entry.append(copy.copy(item))
+ for src, candidate in matching:
+ if int(src.priority) == priority:
+ data = candidate
+ break
+
+ entry.text = data.text
+ for item in data.getchildren():
+ entry.append(copy.copy(item))
- return dict([(key, data[key])
- for key in list(data.keys())
- if not key.startswith('__')])
+ for key, val in list(data.attrib.items()):
+ if key not in entry.attrib:
+ entry.attrib[key] = val
class Specificity(CmpMixin):
@@ -1115,11 +1159,11 @@ class Specificity(CmpMixin):
return "".join(rv)
-class SpecificData(object):
+class SpecificData(Debuggable):
""" A file that is specific to certain clients, groups, or all
clients. """
- def __init__(self, name, specific, encoding): # pylint: disable=W0613
+ def __init__(self, name, specific): # pylint: disable=W0613
"""
:param name: The full path to the file
:type name: string
@@ -1128,9 +1172,8 @@ class SpecificData(object):
object describing what clients this file
applies to.
:type specific: Bcfg2.Server.Plugin.helpers.Specificity
- :param encoding: The encoding to use for data in this file
- :type encoding: string
"""
+ Debuggable.__init__(self)
self.name = name
self.specific = specific
self.data = None
@@ -1151,9 +1194,8 @@ class SpecificData(object):
self.data = open(self.name).read()
except UnicodeDecodeError:
self.data = open(self.name, mode='rb').read()
- except: # pylint: disable=W0201
- LOGGER.error("Failed to read file %s: %s" % (self.name,
- sys.exc_info()[1]))
+ except IOError:
+ self.logger.error("Failed to read file %s" % self.name)
class EntrySet(Debuggable):
@@ -1177,7 +1219,7 @@ class EntrySet(Debuggable):
#: considered a plain string and filenames must match exactly.
basename_is_regex = False
- def __init__(self, basename, path, entry_type, encoding):
+ def __init__(self, basename, path, entry_type):
"""
:param basename: The filename or regular expression that files
in this EntrySet must match. See
@@ -1192,12 +1234,10 @@ class EntrySet(Debuggable):
be an object factory or similar callable.
See below for the expected signature.
:type entry_type: callable
- :param encoding: The encoding of all files in this entry set.
- :type encoding: string
The ``entry_type`` callable must have the following signature::
- entry_type(filepath, specificity, encoding)
+ entry_type(filepath, specificity)
Where the parameters are:
@@ -1208,8 +1248,6 @@ class EntrySet(Debuggable):
object describing what clients this file
applies to.
:type specific: Bcfg2.Server.Plugin.helpers.Specificity
- :param encoding: The encoding to use for data in this file
- :type encoding: string
Additionally, the object returned by ``entry_type`` must have
a ``specific`` attribute that is sortable (e.g., a
@@ -1222,9 +1260,8 @@ class EntrySet(Debuggable):
self.path = path
self.entry_type = entry_type
self.entries = {}
- self.metadata = DEFAULT_FILE_METADATA.copy()
+ self.metadata = default_path_metadata()
self.infoxml = None
- self.encoding = encoding
if self.basename_is_regex:
base_pat = basename
@@ -1241,6 +1278,12 @@ class EntrySet(Debuggable):
#: be overridden on a per-entry basis in :func:`entry_init`.
self.specific = re.compile(pattern)
+ def set_debug(self, debug):
+ rv = Debuggable.set_debug(self, debug)
+ for entry in self.entries.values():
+ entry.set_debug(debug)
+ return rv
+
def get_matching(self, metadata):
""" Get a list of all entries that apply to the given client.
This gets all matching entries; for example, there could be an
@@ -1299,7 +1342,7 @@ class EntrySet(Debuggable):
"""
action = event.code2str()
- if event.filename in ['info', 'info.xml', ':info']:
+ if event.filename == 'info.xml':
if action in ['exists', 'created', 'changed']:
self.update_metadata(event)
elif action == 'deleted':
@@ -1358,8 +1401,7 @@ class EntrySet(Debuggable):
self.logger.error("Could not process filename %s; ignoring"
% fpath)
return
- self.entries[event.filename] = entry_type(fpath, spec,
- self.encoding)
+ self.entries[event.filename] = entry_type(fpath, spec)
self.entries[event.filename].handle_event(event)
def specificity_from_filename(self, fname, specific=None):
@@ -1404,8 +1446,8 @@ class EntrySet(Debuggable):
return Specificity(**kwargs)
def update_metadata(self, event):
- """ Process changes to or creation of info, :info, and
- info.xml files for the EntrySet.
+ """ Process changes to or creation of info.xml files for the
+ EntrySet.
:param event: An event that applies to an info handled by this
EntrySet
@@ -1417,24 +1459,9 @@ class EntrySet(Debuggable):
if not self.infoxml:
self.infoxml = InfoXML(fpath)
self.infoxml.HandleEvent(event)
- elif event.filename in [':info', 'info']:
- for line in open(fpath).readlines():
- match = INFO_REGEX.match(line)
- if not match:
- self.logger.warning("Failed to match line in %s: %s" %
- (fpath, line))
- continue
- else:
- mgd = match.groupdict()
- for key, value in list(mgd.items()):
- if value:
- self.metadata[key] = value
- if len(self.metadata['mode']) == 3:
- self.metadata['mode'] = "0%s" % self.metadata['mode']
def reset_metadata(self, event):
- """ Reset metadata to defaults if info. :info, or info.xml are
- removed.
+ """ Reset metadata to defaults if info.xml is removed.
:param event: An event that applies to an info handled by this
EntrySet
@@ -1443,12 +1470,10 @@ class EntrySet(Debuggable):
"""
if event.filename == 'info.xml':
self.infoxml = None
- elif event.filename in [':info', 'info']:
- self.metadata = DEFAULT_FILE_METADATA.copy()
def bind_info_to_entry(self, entry, metadata):
- """ Shortcut to call :func:`bind_info` with the base
- info/info.xml for this EntrySet.
+ """ Bind the metadata for the given client in the base
+ info.xml for this EntrySet to the entry.
:param entry: The abstract entry to bind the info to. This
will be modified in place
@@ -1457,7 +1482,10 @@ class EntrySet(Debuggable):
:type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
:returns: None
"""
- bind_info(entry, metadata, infoxml=self.infoxml, default=self.metadata)
+ for attr, val in list(self.metadata.items()):
+ entry.set(attr, val)
+ if self.infoxml is not None:
+ self.infoxml.BindEntry(entry, metadata)
def bind_entry(self, entry, metadata):
""" Return the single best fully-bound entry from the set of
@@ -1503,10 +1531,12 @@ class GroupSpool(Plugin, Generator):
#: object.
entry_type = 'Path'
- def __init__(self, core, datastore):
- Plugin.__init__(self, core, datastore)
+ def __init__(self, core):
+ Plugin.__init__(self, core)
Generator.__init__(self)
+ self.fam = Bcfg2.Server.FileMonitor.get_fam()
+
#: See :class:`Bcfg2.Server.Plugins.interfaces.Generator` for
#: details on the Entries attribute.
self.Entries[self.entry_type] = {}
@@ -1518,7 +1548,6 @@ class GroupSpool(Plugin, Generator):
self.entries = {}
self.handles = {}
self.AddDirectoryMonitor('')
- self.encoding = core.setup['encoding']
__init__.__doc__ = Plugin.__init__.__doc__
def add_entry(self, event):
@@ -1542,8 +1571,7 @@ class GroupSpool(Plugin, Generator):
dirpath = self.data + ident
self.entries[ident] = self.es_cls(self.filename_pattern,
dirpath,
- self.es_child_cls,
- self.encoding)
+ self.es_child_cls)
self.Entries[self.entry_type][ident] = \
self.entries[ident].bind_entry
if not os.path.isdir(epath):
@@ -1653,5 +1681,5 @@ class GroupSpool(Plugin, Generator):
if not os.path.isdir(name):
self.logger.error("Failed to open directory %s" % name)
return
- reqid = self.core.fam.AddMonitor(name, self)
+ reqid = self.fam.AddMonitor(name, self)
self.handles[reqid] = relative
diff --git a/src/lib/Bcfg2/Server/Plugin/interfaces.py b/src/lib/Bcfg2/Server/Plugin/interfaces.py
index 07717a710..da39ac77a 100644
--- a/src/lib/Bcfg2/Server/Plugin/interfaces.py
+++ b/src/lib/Bcfg2/Server/Plugin/interfaces.py
@@ -6,11 +6,17 @@ import copy
import threading
import lxml.etree
import Bcfg2.Server
+import Bcfg2.Options
from Bcfg2.Compat import Queue, Empty, Full, cPickle
from Bcfg2.Server.Plugin.base import Plugin
from Bcfg2.Server.Plugin.exceptions import PluginInitError, \
MetadataRuntimeError, MetadataConsistencyError
+# Since this file basically just contains abstract interface
+# descriptions, just about every function declaration has unused
+# arguments. Disable this pylint warning for the whole file.
+# pylint: disable=W0613
+
class Generator(object):
""" Generator plugins contribute to literal client configurations.
@@ -26,13 +32,12 @@ class Generator(object):
generate the content. The callable will receive two arguments:
the abstract entry (as an lxml.etree._Element object), and the
client metadata object the entry is being generated for.
-
#. If the entry is not listed in ``Entries``, the Bcfg2 core calls
:func:`HandlesEntry`; if that returns True, then it calls
:func:`HandleEntry`.
"""
- def HandlesEntry(self, entry, metadata): # pylint: disable=W0613
+ def HandlesEntry(self, entry, metadata):
""" HandlesEntry is the slow path method for routing
configuration binding requests. It is called if the
``Entries`` dict does not contain a method for binding the
@@ -47,7 +52,7 @@ class Generator(object):
"""
return False
- def HandleEntry(self, entry, metadata): # pylint: disable=W0613
+ def HandleEntry(self, entry, metadata):
""" HandleEntry is the slow path method for binding
configuration binding requests. It is called if the
``Entries`` dict does not contain a method for binding the
@@ -137,7 +142,6 @@ class Metadata(object):
"""
pass
- # pylint: disable=W0613
def resolve_client(self, address, cleanup_cache=False):
""" Resolve the canonical name of this client. If this method
is not implemented, the hostname claimed by the client is
@@ -155,7 +159,6 @@ class Metadata(object):
:class:`Bcfg2.Server.Plugin.exceptions.MetadataConsistencyError`
"""
return address[1]
- # pylint: enable=W0613
def AuthenticateConnection(self, cert, user, password, address):
""" Authenticate the given client.
@@ -222,7 +225,7 @@ class Connector(object):
""" Connector plugins augment client metadata instances with
additional data, additional groups, or both. """
- def get_additional_groups(self, metadata): # pylint: disable=W0613
+ def get_additional_groups(self, metadata):
""" Return a list of additional groups for the given client.
Each group can be either the name of a group (a string), or a
:class:`Bcfg2.Server.Plugins.Metadata.MetadataGroup` object
@@ -253,7 +256,7 @@ class Connector(object):
"""
return list()
- def get_additional_data(self, metadata): # pylint: disable=W0613
+ def get_additional_data(self, metadata):
""" Return arbitrary additional data for the given
ClientMetadata object. By convention this is usually a dict
object, but doesn't need to be.
@@ -346,14 +349,14 @@ class ThreadedStatistics(Statistics, Threaded, threading.Thread):
""" ThreadedStatistics plugins process client statistics in a
separate thread. """
- def __init__(self, core, datastore):
- Statistics.__init__(self, core, datastore)
+ def __init__(self, core):
+ Statistics.__init__(self, core)
Threaded.__init__(self)
threading.Thread.__init__(self)
# Event from the core signaling an exit
self.terminate = core.terminate
self.work_queue = Queue(100000)
- self.pending_file = os.path.join(datastore, "etc",
+ self.pending_file = os.path.join(Bcfg2.Options.setup.repository, "etc",
"%s.pending" % self.name)
self.daemon = False
@@ -442,8 +445,14 @@ class ThreadedStatistics(Statistics, Threaded, threading.Thread):
except Empty:
continue
except:
- err = sys.exc_info()[1]
- self.logger.error("ThreadedStatistics: %s" % err)
+ # we want to catch all exceptions here so that a stray
+ # error doesn't kill the entire statistics thread. For
+ # instance, if a bad value gets pushed onto the queue
+ # and the assignment above raises TypeError, we want
+ # to report the error, ignore the bad value, and
+ # continue processing statistics.
+ self.logger.error("Unknown error processing statistics: %s" %
+ sys.exc_info()[1])
continue
self.handle_statistic(client, xdata)
if self.work_queue is not None and not self.work_queue.empty():
@@ -476,7 +485,7 @@ class ThreadedStatistics(Statistics, Threaded, threading.Thread):
# Someone who understands these interfaces better needs to write docs
# for PullSource and PullTarget
class PullSource(object):
- def GetExtra(self, client): # pylint: disable=W0613
+ def GetExtra(self, client):
return []
def GetCurrentEntry(self, client, e_type, e_name):
@@ -556,20 +565,23 @@ class Version(Plugin):
create = False
+ options = Plugin.options + [
+ Bcfg2.Options.PathOption(cf=('server', 'vcs_root'),
+ default='<repository>',
+ help='Server VCS repository root')]
+
#: The path to the VCS metadata file or directory, relative to the
#: base of the Bcfg2 repository. E.g., for Subversion this would
#: be ".svn"
__vcs_metadata_path__ = None
- def __init__(self, core, datastore):
- Plugin.__init__(self, core, datastore)
+ __rmi__ = Plugin.__rmi__ + ['get_revision']
+
+ def __init__(self, core):
+ Plugin.__init__(self, core)
- if core.setup['vcs_root']:
- self.vcs_root = core.setup['vcs_root']
- else:
- self.vcs_root = datastore
if self.__vcs_metadata_path__:
- self.vcs_path = os.path.join(self.vcs_root,
+ self.vcs_path = os.path.join(Bcfg2.Options.setup.vcs_root,
self.__vcs_metadata_path__)
if not os.path.exists(self.vcs_path):
@@ -626,20 +638,46 @@ class ClientRunHooks(object):
pass
-class Caching(object):
- """ A plugin that caches more than just the data received from the
- FAM. This presents a unified interface to clear the cache. """
+class ClientACLs(object):
+ """ ClientACLs are used to grant or deny access to different
+ XML-RPC calls based on client IP or metadata. """
- def expire_cache(self, key=None):
- """ Expire the cache associated with the given key.
+ def check_acl_ip(self, address, rmi):
+ """ Check if the given IP address is authorized to make the
+ named XML-RPC call.
- :param key: The key to expire the cache for. Because cache
- implementations vary tremendously between plugins,
- this could be any number of things, but generally
- a hostname. It also may or may not be possible to
- expire the cache for a single host; this interface
- does not require any guarantee about that.
- :type key: varies
- :returns: None
+ :param address: The address pair of the client to check ACLs for
+ :type address: tuple of (<ip address>, <port>)
+ :param rmi: The fully-qualified name of the RPC call
+ :param rmi: string
+ :returns: bool or None - True to allow, False to deny, None to
+ defer to metadata ACLs
"""
- raise NotImplementedError
+ return True
+
+ def check_acl_metadata(self, metadata, rmi):
+ """ Check if the given client is authorized to make the named
+ XML-RPC call.
+
+ :param metadata: The client metadata
+ :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
+ :param rmi: The fully-qualified name of the RPC call
+ :param rmi: string
+ :returns: bool
+ """
+ return True
+
+
+class TemplateDataProvider(object):
+ """ TemplateDataProvider plugins provide variables to templates
+ for use in rendering. """
+
+ def get_template_data(self, entry, metadata, template):
+ """ Get a dict of variables that will be supplied to a Cfg
+ template for rendering """
+ return dict()
+
+ def get_xml_template_data(self, structfile, metadata):
+ """ Get a dict of variables that will be supplied to an XML
+ template (e.g., a bundle) for rendering """
+ return dict()
diff --git a/src/lib/Bcfg2/Server/Plugins/ACL.py b/src/lib/Bcfg2/Server/Plugins/ACL.py
new file mode 100644
index 000000000..37f51a2a1
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/ACL.py
@@ -0,0 +1,146 @@
+""" Support for client ACLs based on IP address and client metadata """
+
+import os
+import struct
+import socket
+import Bcfg2.Server.Plugin
+
+
+def rmi_names_equal(first, second):
+ """ Compare two XML-RPC method names and see if they match.
+ Resolves some limited wildcards; see
+ :ref:`server-plugins-misc-acl-wildcards` for details.
+
+ :param first: One of the ACLs to compare
+ :type first: string
+ :param second: The other ACL to compare
+ :type second: string
+ :returns: bool """
+ if first == second:
+ # single wildcard is special, and matches everything
+ return True
+ if first is None or second is None:
+ return False
+ if '*' not in first + second:
+ # no wildcards, and not exactly equal
+ return False
+ first_parts = first.split('.')
+ second_parts = second.split('.')
+ if len(first_parts) != len(second_parts):
+ return False
+ for i in range(len(first_parts)):
+ if (first_parts[i] != second_parts[i] and first_parts[i] != '*' and
+ second_parts[i] != '*'):
+ return False
+ return True
+
+
+def ip2int(ip):
+ """ convert a dotted-quad IP address into an integer
+ representation of the same """
+ return struct.unpack('>L', socket.inet_pton(socket.AF_INET, ip))[0]
+
+
+def ip_matches(ip, entry):
+ """ Return True if the given IP matches the IP or IP and netmask
+ in the given ACL entry; False otherwise """
+ if entry.get("netmask"):
+ try:
+ mask = int("1" * int(entry.get("netmask")) +
+ "0" * (32 - int(entry.get("netmask"))), 2)
+ except ValueError:
+ mask = ip2int(entry.get("netmask"))
+ return ip2int(ip) & mask == ip2int(entry.get("address")) & mask
+ elif entry.get("address") is None:
+ # no address, no netmask -- match all
+ return True
+ elif ip == entry.get("address"):
+ # just a plain ip address
+ return True
+ return False
+
+
+class IPACLFile(Bcfg2.Server.Plugin.XMLFileBacked):
+ """ representation of ACL ip.xml, for IP-based ACLs """
+ __identifier__ = None
+ actions = dict(Allow=True,
+ Deny=False,
+ Defer=None)
+
+ def check_acl(self, address, rmi):
+ """ Check a client address against the ACL list """
+ if not len(self.entries):
+ # default defer if no ACLs are defined.
+ self.debug_log("ACL: %s requests %s: No IP ACLs, defer" %
+ (address, rmi))
+ return self.actions["Defer"]
+ for entry in self.entries:
+ if (ip_matches(address, entry) and
+ rmi_names_equal(entry.get("method"), rmi)):
+ self.debug_log("ACL: %s requests %s: Found matching IP ACL, "
+ "%s" % (address, rmi, entry.tag.lower()))
+ return self.actions[entry.tag]
+ if address == "127.0.0.1":
+ self.debug_log("ACL: %s requests %s: No matching IP ACLs, "
+ "localhost allowed" % (address, rmi))
+ return self.actions['Allow'] # default allow for localhost
+
+ self.debug_log("ACL: %s requests %s: No matching IP ACLs, defer" %
+ (address, rmi))
+ return self.actions["Defer"] # default defer for other machines
+
+
+class MetadataACLFile(Bcfg2.Server.Plugin.StructFile):
+ """ representation of ACL metadata.xml, for metadata-based ACLs """
+ def check_acl(self, metadata, rmi):
+ """ check client metadata against the ACL list """
+ if not len(self.entries):
+ # default allow if no ACLs are defined.
+ self.debug_log("ACL: %s requests %s: No metadata ACLs, allow" %
+ (metadata.hostname, rmi))
+ return True
+ for el in self.Match(metadata):
+ if rmi_names_equal(el.get("method"), rmi):
+ self.debug_log("ACL: %s requests %s: Found matching metadata "
+ "ACL, %s" % (metadata.hostname, rmi,
+ el.tag.lower()))
+ return el.tag == "Allow"
+ if metadata.hostname in ['localhost', 'localhost.localdomain']:
+ # default allow for localhost
+ self.debug_log("ACL: %s requests %s: No matching metadata ACLs, "
+ "localhost allowed" % (metadata.hostname, rmi))
+ return True
+ self.debug_log("ACL: %s requests %s: No matching metadata ACLs, deny" %
+ (metadata.hostname, rmi))
+ return False # default deny for other machines
+
+
+class ACL(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.ClientACLs):
+ """ allow connections to bcfg-server based on IP address """
+
+ def __init__(self, core):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core)
+ Bcfg2.Server.Plugin.ClientACLs.__init__(self)
+ self.ip_acls = IPACLFile(os.path.join(self.data, 'ip.xml'),
+ should_monitor=True)
+ self.metadata_acls = MetadataACLFile(os.path.join(self.data,
+ 'metadata.xml'),
+ should_monitor=True)
+
+ def check_acl_ip(self, address, rmi):
+ self.debug_log("ACL: %s requests %s: Checking IP ACLs" %
+ (address[0], rmi))
+ return self.ip_acls.check_acl(address[0], rmi)
+
+ def check_acl_metadata(self, metadata, rmi):
+ self.debug_log("ACL: %s requests %s: Checking metadata ACLs" %
+ (metadata.hostname, rmi))
+ return self.metadata_acls.check_acl(metadata, rmi)
+
+ def set_debug(self, debug):
+ rv = Bcfg2.Server.Plugin.Plugin.set_debug(self, debug)
+ self.ip_acls.set_debug(debug)
+ self.metadata_acls.set_debug(debug)
+ return rv
+ set_debug.__doc__ = Bcfg2.Server.Plugin.Plugin.set_debug.__doc__
diff --git a/src/lib/Bcfg2/Server/Plugins/AWSTags.py b/src/lib/Bcfg2/Server/Plugins/AWSTags.py
index 147f37fbf..0d6eefaaa 100644
--- a/src/lib/Bcfg2/Server/Plugins/AWSTags.py
+++ b/src/lib/Bcfg2/Server/Plugins/AWSTags.py
@@ -1,12 +1,11 @@
-""" Query tags from AWS via boto, optionally setting group membership """
+"""Query tags from AWS via boto, optionally setting group membership."""
import os
import re
import sys
-import Bcfg2.Server.Lint
import Bcfg2.Server.Plugin
from boto import connect_ec2
-from Bcfg2.Cache import Cache
+from Bcfg2.Server.Cache import Cache
from Bcfg2.Compat import ConfigParser
@@ -66,11 +65,7 @@ class PatternFile(Bcfg2.Server.Plugin.XMLFileBacked):
create = 'AWSTags'
def __init__(self, filename, core=None):
- try:
- fam = core.fam
- except AttributeError:
- fam = None
- Bcfg2.Server.Plugin.XMLFileBacked.__init__(self, filename, fam=fam,
+ Bcfg2.Server.Plugin.XMLFileBacked.__init__(self, filename,
should_monitor=True)
self.core = core
self.tags = []
@@ -87,35 +82,28 @@ class PatternFile(Bcfg2.Server.Plugin.XMLFileBacked):
self.tags.append(AWSTagPattern(entry.get("name"),
entry.get("value"),
groups))
- except: # pylint: disable=W0702
+ except re.error:
self.logger.error("AWSTags: Failed to initialize pattern %s: "
"%s" % (entry.get("name"),
sys.exc_info()[1]))
- def get_groups(self, hostname, tags):
+ def get_groups(self, tags):
""" return a list of groups that should be added to the given
- client based on patterns that match the hostname """
+ client based on patterns that match the tags """
ret = []
for pattern in self.tags:
- try:
- ret.extend(pattern.get_groups(tags))
- except: # pylint: disable=W0702
- self.logger.error("AWSTags: Failed to process pattern %s for "
- "%s" % (pattern, hostname),
- exc_info=1)
+ ret.extend(pattern.get_groups(tags))
return ret
class AWSTags(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Caching,
Bcfg2.Server.Plugin.ClientRunHooks,
Bcfg2.Server.Plugin.Connector):
""" Query tags from AWS via boto, optionally setting group membership """
__rmi__ = Bcfg2.Server.Plugin.Plugin.__rmi__ + ['expire_cache']
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Caching.__init__(self)
+ def __init__(self, core):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core)
Bcfg2.Server.Plugin.ClientRunHooks.__init__(self)
Bcfg2.Server.Plugin.Connector.__init__(self)
try:
@@ -178,6 +166,8 @@ class AWSTags(Bcfg2.Server.Plugin.Plugin,
return self._tagcache[metadata.hostname]
def expire_cache(self, key=None):
+ """ Expire the cache for one host, or for all hosts. This is
+ exposed as an XML-RPC RMI. """
self._tagcache.expire(key=key)
def start_client_run(self, metadata):
@@ -187,31 +177,4 @@ class AWSTags(Bcfg2.Server.Plugin.Plugin,
return self.get_tags(metadata)
def get_additional_groups(self, metadata):
- return self.config.get_groups(metadata.hostname,
- self.get_tags(metadata))
-
-
-class AWSTagsLint(Bcfg2.Server.Lint.ServerPlugin):
- """ ``bcfg2-lint`` plugin to check all given :ref:`AWSTags
- <server-plugins-connectors-awstags>` patterns for validity. """
-
- def Run(self):
- cfg = self.core.plugins['AWSTags'].config
- for entry in cfg.xdata.xpath('//Tag'):
- self.check(entry, "name")
- if entry.get("value"):
- self.check(entry, "value")
-
- @classmethod
- def Errors(cls):
- return {"pattern-fails-to-initialize": "error"}
-
- def check(self, entry, attr):
- """ Check a single attribute (``name`` or ``value``) of a
- single entry for validity. """
- try:
- re.compile(entry.get(attr))
- except re.error:
- self.LintError("pattern-fails-to-initialize",
- "'%s' regex could not be compiled: %s\n %s" %
- (attr, sys.exc_info()[1], entry.get("name")))
+ return self.config.get_groups(self.get_tags(metadata))
diff --git a/src/lib/Bcfg2/Server/Plugins/Account.py b/src/lib/Bcfg2/Server/Plugins/Account.py
deleted file mode 100644
index fd49d3655..000000000
--- a/src/lib/Bcfg2/Server/Plugins/Account.py
+++ /dev/null
@@ -1,102 +0,0 @@
-"""This handles authentication setup."""
-
-import Bcfg2.Server.Plugin
-
-
-class Account(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Generator):
- """This module generates account config files,
- based on an internal data repo:
- static.(passwd|group|limits.conf) -> static entries
- dyn.(passwd|group) -> dynamic entries (usually acquired from yp or somesuch)
- useraccess -> users to be granted login access on some hosts
- superusers -> users to be granted root privs on all hosts
- rootlike -> users to be granted root privs on some hosts
-
- """
- name = 'Account'
- __author__ = 'bcfg-dev@mcs.anl.gov'
- deprecated = True
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Generator.__init__(self)
- self.Entries = {'ConfigFile': {'/etc/passwd': self.from_yp_cb,
- '/etc/group': self.from_yp_cb,
- '/etc/security/limits.conf': self.gen_limits_cb,
- '/root/.ssh/authorized_keys': self.gen_root_keys_cb,
- '/etc/sudoers': self.gen_sudoers}}
- try:
- self.repository = Bcfg2.Server.Plugin.DirectoryBacked(self.data,
- self.core.fam)
- except:
- self.logger.error("Failed to load repos: %s, %s" % \
- (self.data, "%s/ssh" % (self.data)))
- raise Bcfg2.Server.Plugin.PluginInitError
-
- def from_yp_cb(self, entry, metadata):
- """Build password file from cached yp data."""
- fname = entry.attrib['name'].split('/')[-1]
- entry.text = self.repository.entries["static.%s" % (fname)].data
- entry.text += self.repository.entries["dyn.%s" % (fname)].data
- perms = {'owner': 'root',
- 'group': 'root',
- 'mode': '0644'}
- [entry.attrib.__setitem__(key, value) for (key, value) in \
- list(perms.items())]
-
- def gen_limits_cb(self, entry, metadata):
- """Build limits entries based on current ACLs."""
- entry.text = self.repository.entries["static.limits.conf"].data
- superusers = self.repository.entries["superusers"].data.split()
- useraccess = [line.split(':') for line in \
- self.repository.entries["useraccess"].data.split()]
- users = [user for (user, host) in \
- useraccess if host == metadata.hostname.split('.')[0]]
- perms = {'owner': 'root',
- 'group': 'root',
- 'mode': '0600'}
- [entry.attrib.__setitem__(key, value) for (key, value) in \
- list(perms.items())]
- entry.text += "".join(["%s hard maxlogins 1024\n" % uname for uname in superusers + users])
- if "*" not in users:
- entry.text += "* hard maxlogins 0\n"
-
- def gen_root_keys_cb(self, entry, metadata):
- """Build root authorized keys file based on current ACLs."""
- superusers = self.repository.entries['superusers'].data.split()
- try:
- rootlike = [line.split(':', 1) for line in \
- self.repository.entries['rootlike'].data.split()]
- superusers += [user for (user, host) in rootlike \
- if host == metadata.hostname.split('.')[0]]
- except:
- pass
- rdata = self.repository.entries
- entry.text = "".join([rdata["%s.key" % user].data for user \
- in superusers if \
- ("%s.key" % user) in rdata])
- perms = {'owner': 'root',
- 'group': 'root',
- 'mode': '0600'}
- [entry.attrib.__setitem__(key, value) for (key, value) \
- in list(perms.items())]
-
- def gen_sudoers(self, entry, metadata):
- """Build root authorized keys file based on current ACLs."""
- superusers = self.repository.entries['superusers'].data.split()
- try:
- rootlike = [line.split(':', 1) for line in \
- self.repository.entries['rootlike'].data.split()]
- superusers += [user for (user, host) in rootlike \
- if host == metadata.hostname.split('.')[0]]
- except:
- pass
- entry.text = self.repository.entries['static.sudoers'].data
- entry.text += "".join(["%s ALL=(ALL) ALL\n" % uname \
- for uname in superusers])
- perms = {'owner': 'root',
- 'group': 'root',
- 'mode': '0440'}
- [entry.attrib.__setitem__(key, value) for (key, value) \
- in list(perms.items())]
diff --git a/src/lib/Bcfg2/Server/Plugins/Base.py b/src/lib/Bcfg2/Server/Plugins/Base.py
deleted file mode 100644
index a18204d60..000000000
--- a/src/lib/Bcfg2/Server/Plugins/Base.py
+++ /dev/null
@@ -1,33 +0,0 @@
-"""This module sets up a base list of configuration entries."""
-
-import copy
-import lxml.etree
-import Bcfg2.Server.Plugin
-from itertools import chain
-
-
-class Base(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Structure,
- Bcfg2.Server.Plugin.XMLDirectoryBacked):
- """This Structure is good for the pile of independent configs
- needed for most actual systems.
- """
- name = 'Base'
- __author__ = 'bcfg-dev@mcs.anl.gov'
- __child__ = Bcfg2.Server.Plugin.StructFile
- deprecated = True
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Structure.__init__(self)
- Bcfg2.Server.Plugin.XMLDirectoryBacked.__init__(self, self.data,
- self.core.fam)
-
- def BuildStructures(self, metadata):
- """Build structures for client described by metadata."""
- ret = lxml.etree.Element("Independent", version='2.0')
- fragments = list(chain(*[base.Match(metadata)
- for base in list(self.entries.values())]))
- for frag in fragments:
- ret.append(copy.copy(frag))
- return [ret]
diff --git a/src/lib/Bcfg2/Server/Plugins/Bundler.py b/src/lib/Bcfg2/Server/Plugins/Bundler.py
index 58f8f4430..6c35ada59 100644
--- a/src/lib/Bcfg2/Server/Plugins/Bundler.py
+++ b/src/lib/Bcfg2/Server/Plugins/Bundler.py
@@ -1,201 +1,152 @@
"""This provides bundle clauses with translation functionality."""
-import copy
-import logging
-import lxml.etree
import os
-import os.path
import re
import sys
-import Bcfg2.Server
-import Bcfg2.Server.Plugin
-import Bcfg2.Server.Lint
-
-try:
- import genshi.template.base
- from Bcfg2.Server.Plugins.TGenshi import removecomment, TemplateFile
- HAS_GENSHI = True
-except ImportError:
- HAS_GENSHI = False
-
-
-SETUP = None
+import copy
+import fnmatch
+import lxml.etree
+from Bcfg2.Server.Plugin import StructFile, Plugin, Structure, \
+ StructureValidator, XMLDirectoryBacked, Generator
+from Bcfg2.version import Bcfg2VersionInfo
+from genshi.template import TemplateError
-class BundleFile(Bcfg2.Server.Plugin.StructFile):
+class BundleFile(StructFile):
""" Representation of a bundle XML file """
- def get_xml_value(self, metadata):
- """ get the XML data that applies to the given client """
- bundlename = os.path.splitext(os.path.basename(self.name))[0]
- bundle = lxml.etree.Element('Bundle', name=bundlename)
- for item in self.Match(metadata):
- bundle.append(copy.copy(item))
- return bundle
-
-
-if HAS_GENSHI:
- class BundleTemplateFile(TemplateFile,
- Bcfg2.Server.Plugin.StructFile):
- """ Representation of a Genshi-templated bundle XML file """
-
- def __init__(self, name, specific, encoding, fam=None):
- TemplateFile.__init__(self, name, specific, encoding)
- Bcfg2.Server.Plugin.StructFile.__init__(self, name, fam=fam)
- self.logger = logging.getLogger(name)
-
- def get_xml_value(self, metadata):
- """ get the rendered XML data that applies to the given
- client """
- if not hasattr(self, 'template'):
- msg = "No parsed template information for %s" % self.name
- self.logger.error(msg)
- raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
- stream = self.template.generate(
- metadata=metadata,
- repo=SETUP['repo']).filter(removecomment)
- data = lxml.etree.XML(
- stream.render('xml', strip_whitespace=False).encode(),
- parser=Bcfg2.Server.XMLParser)
- bundlename = os.path.splitext(os.path.basename(self.name))[0]
- bundle = lxml.etree.Element('Bundle', name=bundlename)
- for item in self.Match(metadata, data):
- bundle.append(copy.deepcopy(item))
- return bundle
-
- def Match(self, metadata, xdata): # pylint: disable=W0221
- """Return matching fragments of parsed template."""
- rv = []
- for child in xdata.getchildren():
- rv.extend(self._match(child, metadata))
- self.logger.debug("File %s got %d match(es)" % (self.name,
- len(rv)))
- return rv
-
- class SGenshiTemplateFile(BundleTemplateFile):
- """ provided for backwards compat with the deprecated SGenshi
- plugin """
- pass
-
-
-class Bundler(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Structure,
- Bcfg2.Server.Plugin.XMLDirectoryBacked):
+ bundle_name_re = re.compile(r'^(?P<name>.*)\.(xml|genshi)$')
+
+ def __init__(self, filename, should_monitor=False):
+ StructFile.__init__(self, filename, should_monitor=should_monitor)
+ if self.name.endswith(".genshi"):
+ self.logger.warning("Bundler: %s: Bundle filenames ending with "
+ ".genshi are deprecated; add the Genshi XML "
+ "namespace to a .xml bundle instead" %
+ self.name)
+
+ def Index(self):
+ StructFile.Index(self)
+ if self.xdata.get("name"):
+ self.logger.warning("Bundler: %s: Explicitly specifying bundle "
+ "names is deprecated" % self.name)
+
+ @property
+ def bundle_name(self):
+ """ The name of the bundle, as determined from the filename """
+ return self.bundle_name_re.match(
+ os.path.basename(self.name)).group("name")
+
+
+class Bundler(Plugin,
+ Structure,
+ StructureValidator,
+ XMLDirectoryBacked):
""" The bundler creates dependent clauses based on the
bundle/translation scheme from Bcfg1. """
__author__ = 'bcfg-dev@mcs.anl.gov'
- patterns = re.compile(r'^(?P<name>.*)\.(xml|genshi)$')
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Structure.__init__(self)
- self.encoding = core.setup['encoding']
- self.__child__ = self.template_dispatch
- Bcfg2.Server.Plugin.XMLDirectoryBacked.__init__(self, self.data,
- self.core.fam)
- global SETUP
- SETUP = core.setup
-
- def template_dispatch(self, name, _):
- """ Add the correct child entry type to Bundler depending on
- whether the XML file in question is a plain XML file or a
- templated bundle """
- bundle = lxml.etree.parse(name, parser=Bcfg2.Server.XMLParser)
- nsmap = bundle.getroot().nsmap
- if (name.endswith('.genshi') or
- ('py' in nsmap and
- nsmap['py'] == 'http://genshi.edgewall.org/')):
- if HAS_GENSHI:
- spec = Bcfg2.Server.Plugin.Specificity()
- return BundleTemplateFile(name, spec, self.encoding,
- fam=self.core.fam)
- else:
- raise Bcfg2.Server.Plugin.PluginExecutionError("Genshi not "
- "available: %s"
- % name)
- else:
- return BundleFile(name, fam=self.fam)
+ __child__ = BundleFile
+ patterns = re.compile(r'^.*\.(?:xml|genshi)$')
+
+ def __init__(self, core):
+ Plugin.__init__(self, core)
+ Structure.__init__(self)
+ StructureValidator.__init__(self)
+ XMLDirectoryBacked.__init__(self, self.data)
+ #: Bundles by bundle name, rather than filename
+ self.bundles = dict()
+
+ def HandleEvent(self, event):
+ XMLDirectoryBacked.HandleEvent(self, event)
+ self.bundles = dict([(b.bundle_name, b)
+ for b in self.entries.values()])
+
+ def validate_structures(self, metadata, structures):
+ """ Translate <Path glob='...'/> entries into <Path name='...'/>
+ entries """
+ for struct in structures:
+ for pathglob in struct.xpath("//Path[@glob]"):
+ for plugin in self.core.plugins_by_type(Generator):
+ for match in fnmatch.filter(plugin.Entries['Path'].keys(),
+ pathglob.get("glob")):
+ lxml.etree.SubElement(pathglob.getparent(),
+ "Path", name=match)
+ pathglob.getparent().remove(pathglob)
def BuildStructures(self, metadata):
- """Build all structures for client (metadata)."""
bundleset = []
-
- bundle_entries = {}
- for key, item in self.entries.items():
- bundle_entries.setdefault(
- self.patterns.match(os.path.basename(key)).group('name'),
- []).append(item)
-
- for bundlename in metadata.bundles:
+ bundles = copy.copy(metadata.bundles)
+ bundles_added = set(bundles)
+ while bundles:
+ bundlename = bundles.pop()
try:
- entries = bundle_entries[bundlename]
+ bundle = self.bundles[bundlename]
except KeyError:
self.logger.error("Bundler: Bundle %s does not exist" %
bundlename)
continue
+
try:
- bundleset.append(entries[0].get_xml_value(metadata))
- except genshi.template.base.TemplateError:
+ data = bundle.XMLMatch(metadata)
+ except TemplateError:
err = sys.exc_info()[1]
self.logger.error("Bundler: Failed to render templated bundle "
"%s: %s" % (bundlename, err))
+ continue
except:
self.logger.error("Bundler: Unexpected bundler error for %s" %
bundlename, exc_info=1)
- return bundleset
-
+ continue
-class BundlerLint(Bcfg2.Server.Lint.ServerPlugin):
- """ Perform various :ref:`Bundler
- <server-plugins-structures-bundler-index>` checks. """
-
- def Run(self):
- self.missing_bundles()
- for bundle in self.core.plugins['Bundler'].entries.values():
- if (self.HandlesFile(bundle.name) and
- (not HAS_GENSHI or
- not isinstance(bundle, BundleTemplateFile))):
- self.bundle_names(bundle)
-
- @classmethod
- def Errors(cls):
- return {"bundle-not-found": "error",
- "inconsistent-bundle-name": "warning"}
-
- def missing_bundles(self):
- """ Find bundles listed in Metadata but not implemented in
- Bundler. """
- if self.files is None:
- # when given a list of files on stdin, this check is
- # useless, so skip it
- groupdata = self.metadata.groups_xml.xdata
- ref_bundles = set([b.get("name")
- for b in groupdata.findall("//Bundle")])
-
- allbundles = self.core.plugins['Bundler'].entries.keys()
- for bundle in ref_bundles:
- xmlbundle = "%s.xml" % bundle
- genshibundle = "%s.genshi" % bundle
- if (xmlbundle not in allbundles and
- genshibundle not in allbundles):
- self.LintError("bundle-not-found",
- "Bundle %s referenced, but does not exist" %
- bundle)
-
- def bundle_names(self, bundle):
- """ Verify bundle name attribute matches filename.
-
- :param bundle: The bundle to verify
- :type bundle: Bcfg2.Server.Plugins.Bundler.BundleFile
- """
- try:
- xdata = lxml.etree.XML(bundle.data)
- except AttributeError:
- # genshi template
- xdata = lxml.etree.parse(bundle.template.filepath).getroot()
-
- fname = os.path.splitext(os.path.basename(bundle.name))[0]
- bname = xdata.get('name')
- if fname != bname:
- self.LintError("inconsistent-bundle-name",
- "Inconsistent bundle name: filename is %s, "
- "bundle name is %s" % (fname, bname))
+ if data.get("independent", "false").lower() == "true":
+ data.tag = "Independent"
+ del data.attrib['independent']
+
+ data.set("name", bundlename)
+
+ for child in data.findall("Bundle"):
+ if child.getchildren():
+ # XInclude'd bundle -- "flatten" it so there
+ # aren't extra Bundle tags, since other bits in
+ # Bcfg2 only handle the direct children of the
+ # top-level Bundle tag
+ if data.get("name"):
+ self.logger.warning("Bundler: In file XIncluded from "
+ "%s: Explicitly specifying "
+ "bundle names is deprecated" %
+ self.name)
+ for el in child.getchildren():
+ data.append(el)
+ data.remove(child)
+ else:
+ # no children -- wat
+ self.logger.warning("Bundler: Useless empty Bundle tag "
+ "in %s" % self.name)
+ data.remove(child)
+
+ for child in data.findall('RequiredBundle'):
+ if child.get("name"):
+ # dependent bundle -- add it to the list of
+ # bundles for this client
+ if child.get("name") not in bundles_added:
+ bundles.append(child.get("name"))
+ bundles_added.add(child.get("name"))
+ if child.get('inherit_modification', 'false') == 'true':
+ if metadata.version_info >= \
+ Bcfg2VersionInfo('1.4.0pre2'):
+ lxml.etree.SubElement(data, 'Bundle',
+ name=child.get('name'))
+ else:
+ self.logger.warning(
+ 'Bundler: inherit_modification="true" is '
+ 'only supported for clients starting '
+ '1.4.0pre2')
+ data.remove(child)
+ else:
+ # no name -- wat
+ self.logger.warning('Bundler: Missing required name in '
+ 'RequiredBundle tag in %s' %
+ self.name)
+ data.remove(child)
+
+ bundleset.append(data)
+ return bundleset
diff --git a/src/lib/Bcfg2/Server/Plugins/Bzr.py b/src/lib/Bcfg2/Server/Plugins/Bzr.py
index e0cbdf72a..01b51ace4 100644
--- a/src/lib/Bcfg2/Server/Plugins/Bzr.py
+++ b/src/lib/Bcfg2/Server/Plugins/Bzr.py
@@ -11,19 +11,19 @@ class Bzr(Bcfg2.Server.Plugin.Version):
using bazaar. """
__author__ = 'bcfg-dev@mcs.anl.gov'
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Version.__init__(self, core, datastore)
+ def __init__(self, core):
+ Bcfg2.Server.Plugin.Version.__init__(self, core)
self.logger.debug("Initialized Bazaar plugin with directory %s at "
- "revision = %s" % (self.vcs_root,
+ "revision = %s" % (Bcfg2.Options.setup.vcs_root,
self.get_revision()))
def get_revision(self):
"""Read Bazaar revision information for the Bcfg2 repository."""
try:
- working_tree = WorkingTree.open(self.vcs_root)
+ working_tree = WorkingTree.open(Bcfg2.Options.setup.vcs_root)
revision = str(working_tree.branch.revno())
if (working_tree.has_changes(working_tree.basis_tree()) or
- working_tree.unknowns()):
+ working_tree.unknowns()):
revision += "+"
except errors.NotBranchError:
msg = "Failed to read Bazaar branch"
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgAuthorizedKeysGenerator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgAuthorizedKeysGenerator.py
index 41d5588e4..7792d7e5c 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgAuthorizedKeysGenerator.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgAuthorizedKeysGenerator.py
@@ -3,8 +3,9 @@ based on an XML specification of which SSH keypairs should granted
access. """
import lxml.etree
+import Bcfg2.Options
from Bcfg2.Server.Plugin import StructFile, PluginExecutionError
-from Bcfg2.Server.Plugins.Cfg import CfgGenerator, SETUP, CFG
+from Bcfg2.Server.Plugins.Cfg import CfgGenerator, get_cfg
from Bcfg2.Server.Plugins.Metadata import ClientMetadata
@@ -20,26 +21,13 @@ class CfgAuthorizedKeysGenerator(CfgGenerator, StructFile):
#: Handle authorized keys XML files
__basenames__ = ['authorizedkeys.xml', 'authorized_keys.xml']
- #: This handler is experimental, in part because it depends upon
- #: the (experimental) CfgPrivateKeyCreator handler
- experimental = True
-
def __init__(self, fname):
- CfgGenerator.__init__(self, fname, None, None)
+ CfgGenerator.__init__(self, fname, None)
StructFile.__init__(self, fname)
self.cache = dict()
- self.core = CFG.core
+ self.core = get_cfg().core
__init__.__doc__ = CfgGenerator.__init__.__doc__
- @property
- def category(self):
- """ The name of the metadata category that generated keys are
- specific to """
- if (SETUP.cfp.has_section("sshkeys") and
- SETUP.cfp.has_option("sshkeys", "category")):
- return SETUP.cfp.get("sshkeys", "category")
- return None
-
def handle_event(self, event):
CfgGenerator.handle_event(self, event)
StructFile.HandleEvent(self, event)
@@ -51,12 +39,6 @@ class CfgAuthorizedKeysGenerator(CfgGenerator, StructFile):
rv = []
for allow in spec.findall("Allow"):
options = []
- if allow.find("Params") is not None:
- self.logger.warning("Use of <Params> in authorized_keys.xml "
- "is deprecated; use <Option> instead")
- options.extend("=".join(p)
- for p in allow.find("Params").attrib.items())
-
for opt in allow.findall("Option"):
if opt.get("value"):
options.append("%s=%s" % (opt.get("name"),
@@ -68,7 +50,8 @@ class CfgAuthorizedKeysGenerator(CfgGenerator, StructFile):
if pubkey_name:
host = allow.get("host")
group = allow.get("group")
- category = allow.get("category", self.category)
+ category = allow.get("category",
+ Bcfg2.Options.setup.sshkeys_category)
if host:
key_md = self.core.build_metadata(host)
elif group:
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgCatFilter.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgCatFilter.py
deleted file mode 100644
index 49a5a85b3..000000000
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgCatFilter.py
+++ /dev/null
@@ -1,28 +0,0 @@
-""" Handle .cat files, which append lines to and remove lines from
-plaintext files """
-
-from Bcfg2.Server.Plugins.Cfg import CfgFilter
-
-
-class CfgCatFilter(CfgFilter):
- """ CfgCatFilter appends lines to and remove lines from plaintext
- :ref:`server-plugins-generators-Cfg` files"""
-
- #: Handle .cat files
- __extensions__ = ['cat']
-
- #: .cat files are deprecated
- deprecated = True
-
- def modify_data(self, entry, metadata, data):
- datalines = data.strip().split('\n')
- for line in self.data.split('\n'):
- if not line:
- continue
- if line.startswith('+'):
- datalines.append(line[1:])
- elif line.startswith('-'):
- if line[1:] in datalines:
- datalines.remove(line[1:])
- return "\n".join(datalines) + "\n"
- modify_data.__doc__ = CfgFilter.modify_data.__doc__
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgCheetahGenerator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgCheetahGenerator.py
index 724164cf5..84309b5dd 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgCheetahGenerator.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgCheetahGenerator.py
@@ -2,8 +2,10 @@
<http://www.cheetahtemplate.org/>`_ templating system to generate
:ref:`server-plugins-generators-cfg` files. """
-from Bcfg2.Server.Plugin import PluginExecutionError
-from Bcfg2.Server.Plugins.Cfg import CfgGenerator, SETUP
+import Bcfg2.Options
+from Bcfg2.Server.Plugin import PluginExecutionError, \
+ DefaultTemplateDataProvider, get_template_data
+from Bcfg2.Server.Plugins.Cfg import CfgGenerator
try:
from Cheetah.Template import Template
@@ -12,6 +14,18 @@ except ImportError:
HAS_CHEETAH = False
+class DefaultCheetahDataProvider(DefaultTemplateDataProvider):
+ """ Template data provider for Cheetah templates. Cheetah and
+ Genshi currently differ over the value of the ``path`` variable,
+ which is why this is necessary. """
+
+ def get_template_data(self, entry, metadata, template):
+ rv = DefaultTemplateDataProvider.get_template_data(self, entry,
+ metadata, template)
+ rv['path'] = rv['name']
+ return rv
+
+
class CfgCheetahGenerator(CfgGenerator):
""" The CfgCheetahGenerator allows you to use the `Cheetah
<http://www.cheetahtemplate.org/>`_ templating system to generate
@@ -27,19 +41,18 @@ class CfgCheetahGenerator(CfgGenerator):
#: :class:`Cheetah.Template.Template` compiler settings
settings = dict(useStackFrames=False)
- def __init__(self, fname, spec, encoding):
- CfgGenerator.__init__(self, fname, spec, encoding)
+ def __init__(self, fname, spec):
+ CfgGenerator.__init__(self, fname, spec)
if not HAS_CHEETAH:
raise PluginExecutionError("Cheetah is not available")
__init__.__doc__ = CfgGenerator.__init__.__doc__
def get_data(self, entry, metadata):
- template = Template(self.data.decode(self.encoding),
+ template = Template(self.data.decode(Bcfg2.Options.setup.encoding),
compilerSettings=self.settings)
- template.metadata = metadata
- template.name = entry.get('realname', entry.get('name'))
- template.path = entry.get('realname', entry.get('name'))
- template.source_path = self.name
- template.repo = SETUP['repo']
+ for key, val in get_template_data(
+ entry, metadata, self.name,
+ default=DefaultCheetahDataProvider()).items():
+ setattr(template, key, val)
return template.respond()
get_data.__doc__ = CfgGenerator.get_data.__doc__
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgDiffFilter.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgDiffFilter.py
deleted file mode 100644
index da506a195..000000000
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgDiffFilter.py
+++ /dev/null
@@ -1,35 +0,0 @@
-""" Handle .diff files, which apply diffs to plaintext files """
-
-import os
-import tempfile
-from Bcfg2.Server.Plugin import PluginExecutionError
-from subprocess import Popen, PIPE
-from Bcfg2.Server.Plugins.Cfg import CfgFilter
-
-
-class CfgDiffFilter(CfgFilter):
- """ CfgDiffFilter applies diffs to plaintext
- :ref:`server-plugins-generators-Cfg` files """
-
- #: Handle .diff files
- __extensions__ = ['diff']
-
- #: .diff files are deprecated
- deprecated = True
-
- def modify_data(self, entry, metadata, data):
- basehandle, basename = tempfile.mkstemp()
- open(basename, 'w').write(data)
- os.close(basehandle)
-
- cmd = ["patch", "-u", "-f", basename]
- patch = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
- stderr = patch.communicate(input=self.data)[1]
- ret = patch.wait()
- output = open(basename, 'r').read()
- os.unlink(basename)
- if ret != 0:
- raise PluginExecutionError("Error applying diff %s: %s" %
- (self.name, stderr))
- return output
- modify_data.__doc__ = CfgFilter.modify_data.__doc__
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenerator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenerator.py
index 0a30a070a..849c75f70 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenerator.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenerator.py
@@ -1,11 +1,11 @@
""" CfgEncryptedGenerator lets you encrypt your plaintext
:ref:`server-plugins-generators-cfg` files on the server. """
+import Bcfg2.Options
from Bcfg2.Server.Plugin import PluginExecutionError
-from Bcfg2.Server.Plugins.Cfg import CfgGenerator, SETUP
+from Bcfg2.Server.Plugins.Cfg import CfgGenerator
try:
- from Bcfg2.Encryption import bruteforce_decrypt, EVPError, \
- get_algorithm, CFG_SECTION
+ from Bcfg2.Server.Encryption import bruteforce_decrypt, EVPError
HAS_CRYPTO = True
except ImportError:
HAS_CRYPTO = False
@@ -22,8 +22,8 @@ class CfgEncryptedGenerator(CfgGenerator):
#: .genshi.crypt and .cheetah.crypt files
__priority__ = 50
- def __init__(self, fname, spec, encoding):
- CfgGenerator.__init__(self, fname, spec, encoding)
+ def __init__(self, fname, spec):
+ CfgGenerator.__init__(self, fname, spec)
if not HAS_CRYPTO:
raise PluginExecutionError("M2Crypto is not available")
@@ -33,17 +33,13 @@ class CfgEncryptedGenerator(CfgGenerator):
return
# todo: let the user specify a passphrase by name
try:
- self.data = bruteforce_decrypt(
- self.data, setup=SETUP,
- algorithm=get_algorithm(SETUP))
+ self.data = bruteforce_decrypt(self.data)
except EVPError:
- strict = SETUP.cfp.get(CFG_SECTION, "decrypt",
- default="strict")
msg = "Cfg: Failed to decrypt %s" % self.name
- if strict:
- raise PluginExecutionError(msg)
- else:
+ if Bcfg2.Options.setup.lax_decryption:
self.logger.debug(msg)
+ else:
+ raise PluginExecutionError(msg)
def get_data(self, entry, metadata):
if self.data is None:
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenshiGenerator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenshiGenerator.py
index 130652aef..f69ab8e5f 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenshiGenerator.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenshiGenerator.py
@@ -1,32 +1,24 @@
""" Handle encrypted Genshi templates (.crypt.genshi or .genshi.crypt
files) """
+from genshi.template import TemplateLoader
from Bcfg2.Compat import StringIO
from Bcfg2.Server.Plugin import PluginExecutionError
-from Bcfg2.Server.Plugins.Cfg import SETUP
from Bcfg2.Server.Plugins.Cfg.CfgGenshiGenerator import CfgGenshiGenerator
try:
- from Bcfg2.Encryption import bruteforce_decrypt, get_algorithm
+ from Bcfg2.Server.Encryption import bruteforce_decrypt
HAS_CRYPTO = True
except ImportError:
HAS_CRYPTO = False
-try:
- from genshi.template import TemplateLoader
-except ImportError:
- # CfgGenshiGenerator will raise errors if genshi doesn't exist
- TemplateLoader = object # pylint: disable=C0103
-
class EncryptedTemplateLoader(TemplateLoader):
""" Subclass :class:`genshi.template.TemplateLoader` to decrypt
the data on the fly as it's read in using
- :func:`Bcfg2.Encryption.bruteforce_decrypt` """
+ :func:`Bcfg2.Server.Encryption.bruteforce_decrypt` """
def _instantiate(self, cls, fileobj, filepath, filename, encoding=None):
- plaintext = \
- StringIO(bruteforce_decrypt(fileobj.read(),
- algorithm=get_algorithm(SETUP)))
+ plaintext = StringIO(bruteforce_decrypt(fileobj.read()))
return TemplateLoader._instantiate(self, cls, plaintext, filepath,
filename, encoding=encoding)
@@ -45,7 +37,7 @@ class CfgEncryptedGenshiGenerator(CfgGenshiGenerator):
#: when it's read in
__loader_cls__ = EncryptedTemplateLoader
- def __init__(self, fname, spec, encoding):
- CfgGenshiGenerator.__init__(self, fname, spec, encoding)
+ def __init__(self, fname, spec):
+ CfgGenshiGenerator.__init__(self, fname, spec)
if not HAS_CRYPTO:
raise PluginExecutionError("M2Crypto is not available")
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedJinja2Generator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedJinja2Generator.py
new file mode 100644
index 000000000..c8da84ae0
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedJinja2Generator.py
@@ -0,0 +1,25 @@
+""" Handle encrypted Jinja2 templates (.crypt.jinja2 or
+.jinja2.crypt files)"""
+
+from Bcfg2.Server.Plugins.Cfg.CfgJinja2Generator import CfgJinja2Generator
+from Bcfg2.Server.Plugins.Cfg.CfgEncryptedGenerator \
+ import CfgEncryptedGenerator
+
+
+class CfgEncryptedJinja2Generator(CfgJinja2Generator, CfgEncryptedGenerator):
+ """ CfgEncryptedJinja2Generator lets you encrypt your Jinja2
+ :ref:`server-plugins-generators-cfg` files on the server """
+
+ #: handle .crypt.jinja2 or .jinja2.crypt files
+ __extensions__ = ['jinja2.crypt', 'crypt.jinja2']
+
+ #: Override low priority from parent class
+ __priority__ = 0
+
+ def handle_event(self, event):
+ CfgEncryptedGenerator.handle_event(self, event)
+ handle_event.__doc__ = CfgEncryptedGenerator.handle_event.__doc__
+
+ def get_data(self, entry, metadata):
+ return CfgJinja2Generator.get_data(self, entry, metadata)
+ get_data.__doc__ = CfgJinja2Generator.get_data.__doc__
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgExternalCommandVerifier.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgExternalCommandVerifier.py
index 313e53ee9..953473a12 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgExternalCommandVerifier.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgExternalCommandVerifier.py
@@ -3,8 +3,8 @@
import os
import sys
import shlex
+from Bcfg2.Utils import Executor
from Bcfg2.Server.Plugin import PluginExecutionError
-from subprocess import Popen, PIPE
from Bcfg2.Server.Plugins.Cfg import CfgVerifier, CfgVerificationError
@@ -15,27 +15,19 @@ class CfgExternalCommandVerifier(CfgVerifier):
#: Handle :file:`:test` files
__basenames__ = [':test']
- def __init__(self, name, specific, encoding):
- CfgVerifier.__init__(self, name, specific, encoding)
+ def __init__(self, name, specific):
+ CfgVerifier.__init__(self, name, specific)
self.cmd = []
+ self.exc = Executor(timeout=30)
__init__.__doc__ = CfgVerifier.__init__.__doc__
def verify_entry(self, entry, metadata, data):
try:
- proc = Popen(self.cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
- out, err = proc.communicate(input=data)
- rv = proc.wait()
- if rv != 0:
- # pylint: disable=E1103
- raise CfgVerificationError(err.strip() or out.strip() or
- "Non-zero return value %s" % rv)
- # pylint: enable=E1103
- except CfgVerificationError:
- raise
- except:
- err = sys.exc_info()[1]
- raise CfgVerificationError("Error running external command "
- "verifier: %s" % err)
+ result = self.exc.run(self.cmd, inputdata=data)
+ if not result.success:
+ raise CfgVerificationError(result.error)
+ except OSError:
+ raise CfgVerificationError(sys.exc_info()[1])
verify_entry.__doc__ = CfgVerifier.verify_entry.__doc__
def handle_event(self, event):
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgGenshiGenerator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgGenshiGenerator.py
index 83a5c1165..c3bf9569b 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgGenshiGenerator.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgGenshiGenerator.py
@@ -5,63 +5,55 @@
import re
import sys
import traceback
-from Bcfg2.Server.Plugin import PluginExecutionError
-from Bcfg2.Server.Plugins.Cfg import CfgGenerator, SETUP
-
-try:
- import genshi.core
- from genshi.template import TemplateLoader, NewTextTemplate
- from genshi.template.eval import UndefinedError, Suite
- #: True if Genshi libraries are available
- HAS_GENSHI = True
-
- def _genshi_removes_blank_lines():
- """ Genshi 0.5 uses the Python :mod:`compiler` package to
- compile genshi snippets to AST. Genshi 0.6 uses some bespoke
- magic, because compiler has been deprecated.
- :func:`compiler.parse` produces an AST that removes all excess
- whitespace (e.g., blank lines), while
- :func:`genshi.template.astutil.parse` does not. In order to
- determine which actual line of code an error occurs on, we
- need to know which is in use and how it treats blank lines.
- I've beat my head against this for hours and the best/only way
- I can find is to compile some genshi code with an error and
- see which line it's on."""
- code = """d = dict()
-
+import Bcfg2.Options
+from Bcfg2.Server.Plugin import PluginExecutionError, removecomment, \
+ DefaultTemplateDataProvider, get_template_data
+from Bcfg2.Server.Plugins.Cfg import CfgGenerator
+from genshi.template import TemplateLoader, NewTextTemplate
+from genshi.template.base import TemplateError
+from genshi.template.eval import UndefinedError, Suite
+
+
+def _genshi_removes_blank_lines():
+ """ Genshi 0.5 uses the Python :mod:`compiler` package to
+ compile genshi snippets to AST. Genshi 0.6 uses some bespoke
+ magic, because compiler has been deprecated.
+ :func:`compiler.parse` produces an AST that removes all excess
+ whitespace (e.g., blank lines), while
+ :func:`genshi.template.astutil.parse` does not. In order to
+ determine which actual line of code an error occurs on, we
+ need to know which is in use and how it treats blank lines.
+ I've beat my head against this for hours and the best/only way
+ I can find is to compile some genshi code with an error and
+ see which line it's on."""
+ code = """d = dict()
d['a']"""
- try:
- Suite(code).execute(dict())
- except KeyError:
- line = traceback.extract_tb(sys.exc_info()[2])[-1][1]
- if line == 2:
- return True
- else:
- return False
-
- #: True if Genshi removes all blank lines from a code block before
- #: executing it; False indicates that Genshi only removes leading
- #: and trailing blank lines. See
- #: :func:`_genshi_removes_blank_lines` for an explanation of this.
- GENSHI_REMOVES_BLANK_LINES = _genshi_removes_blank_lines()
-except ImportError:
- TemplateLoader = None # pylint: disable=C0103
- HAS_GENSHI = False
-
-
-def removecomment(stream):
- """ A Genshi filter that removes comments from the stream. This
- function is a generator.
-
- :param stream: The Genshi stream to remove comments from
- :type stream: genshi.core.Stream
- :returns: tuple of ``(kind, data, pos)``, as when iterating
- through a Genshi stream
- """
- for kind, data, pos in stream:
- if kind is genshi.core.COMMENT:
- continue
- yield kind, data, pos
+ try:
+ Suite(code).execute(dict())
+ except KeyError:
+ line = traceback.extract_tb(sys.exc_info()[2])[-1][1]
+ if line == 2:
+ return True
+ else:
+ return False
+
+#: True if Genshi removes all blank lines from a code block before
+#: executing it; False indicates that Genshi only removes leading
+#: and trailing blank lines. See
+#: :func:`_genshi_removes_blank_lines` for an explanation of this.
+GENSHI_REMOVES_BLANK_LINES = _genshi_removes_blank_lines()
+
+
+class DefaultGenshiDataProvider(DefaultTemplateDataProvider):
+ """ Template data provider for Genshi templates. Cheetah and
+ Genshi currently differ over the value of the ``path`` variable,
+ which is why this is necessary. """
+
+ def get_template_data(self, entry, metadata, template):
+ rv = DefaultTemplateDataProvider.get_template_data(self, entry,
+ metadata, template)
+ rv['path'] = template
+ return rv
class CfgGenshiGenerator(CfgGenerator):
@@ -92,10 +84,8 @@ class CfgGenshiGenerator(CfgGenerator):
#: occurred.
pyerror_re = re.compile(r'<\w+ u?[\'"](.*?)\s*\.\.\.[\'"]>')
- def __init__(self, fname, spec, encoding):
- CfgGenerator.__init__(self, fname, spec, encoding)
- if not HAS_GENSHI:
- raise PluginExecutionError("Genshi is not available")
+ def __init__(self, fname, spec):
+ CfgGenerator.__init__(self, fname, spec)
self.template = None
self.loader = self.__loader_cls__(max_cache_size=0)
__init__.__doc__ = CfgGenerator.__init__.__doc__
@@ -105,19 +95,18 @@ class CfgGenshiGenerator(CfgGenerator):
raise PluginExecutionError("Failed to load template %s" %
self.name)
- fname = entry.get('realname', entry.get('name'))
- stream = \
- self.template.generate(name=fname,
- metadata=metadata,
- path=self.name,
- source_path=self.name,
- repo=SETUP['repo']).filter(removecomment)
+ stream = self.template.generate(
+ **get_template_data(
+ entry, metadata, self.name,
+ default=DefaultGenshiDataProvider())).filter(removecomment)
try:
try:
- return stream.render('text', encoding=self.encoding,
+ return stream.render('text',
+ encoding=Bcfg2.Options.setup.encoding,
strip_whitespace=False)
except TypeError:
- return stream.render('text', encoding=self.encoding)
+ return stream.render('text',
+ encoding=Bcfg2.Options.setup.encoding)
except UndefinedError:
# a failure in a genshi expression _other_ than %{ python ... %}
err = sys.exc_info()[1]
@@ -129,6 +118,8 @@ class CfgGenshiGenerator(CfgGenerator):
quad[2]))
raise
except:
+ # this needs to be a blanket except, since it can catch
+ # any error raised by the genshi template.
self._handle_genshi_exception(sys.exc_info())
get_data.__doc__ = CfgGenerator.get_data.__doc__
@@ -196,9 +187,10 @@ class CfgGenshiGenerator(CfgGenerator):
def handle_event(self, event):
CfgGenerator.handle_event(self, event)
try:
- self.template = self.loader.load(self.name, cls=NewTextTemplate,
- encoding=self.encoding)
- except:
+ self.template = self.loader.load(
+ self.name, cls=NewTextTemplate,
+ encoding=Bcfg2.Options.setup.encoding)
+ except TemplateError:
raise PluginExecutionError("Failed to load template: %s" %
sys.exc_info()[1])
handle_event.__doc__ = CfgGenerator.handle_event.__doc__
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgInfoXML.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgInfoXML.py
index 3b6fc8fa0..886b3993b 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgInfoXML.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgInfoXML.py
@@ -1,6 +1,6 @@
""" Handle info.xml files """
-from Bcfg2.Server.Plugin import PluginExecutionError, InfoXML
+from Bcfg2.Server.Plugin import InfoXML
from Bcfg2.Server.Plugins.Cfg import CfgInfo
@@ -17,21 +17,9 @@ class CfgInfoXML(CfgInfo):
__init__.__doc__ = CfgInfo.__init__.__doc__
def bind_info_to_entry(self, entry, metadata):
- mdata = dict()
- self.infoxml.pnode.Match(metadata, mdata, entry=entry)
- if 'Info' not in mdata:
- raise PluginExecutionError("Failed to set metadata for file %s" %
- entry.get('name'))
- self._set_info(entry, mdata['Info'][None])
+ self.infoxml.BindEntry(entry, metadata)
bind_info_to_entry.__doc__ = CfgInfo.bind_info_to_entry.__doc__
def handle_event(self, event):
self.infoxml.HandleEvent()
handle_event.__doc__ = CfgInfo.handle_event.__doc__
-
- def _set_info(self, entry, info):
- CfgInfo._set_info(self, entry, info)
- if '__children__' in info:
- for child in info['__children__']:
- entry.append(child)
- _set_info.__doc__ = CfgInfo._set_info.__doc__
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgJinja2Generator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgJinja2Generator.py
new file mode 100644
index 000000000..cff9ff61e
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgJinja2Generator.py
@@ -0,0 +1,91 @@
+""" The CfgJinja2Generator allows you to use the `Jinja2
+<http://jinja.pocoo.org/>`_ templating system to generate
+:ref:`server-plugins-generators-cfg` files. """
+
+import os
+import sys
+import Bcfg2.Options
+from Bcfg2.Server.Plugin import PluginExecutionError, \
+ DefaultTemplateDataProvider, get_template_data
+from Bcfg2.Server.Plugins.Cfg import CfgGenerator
+
+try:
+ from jinja2 import Environment, FileSystemLoader
+ HAS_JINJA2 = True
+except ImportError:
+ HAS_JINJA2 = False
+
+
+class RelEnvironment(Environment):
+ """Override join_path() to enable relative template paths."""
+ def join_path(self, template, parent):
+ return os.path.join(os.path.dirname(parent), template)
+
+
+class DefaultJinja2DataProvider(DefaultTemplateDataProvider):
+ """ Template data provider for Jinja2 templates. Jinja2 and
+ Genshi currently differ over the value of the ``path`` variable,
+ which is why this is necessary. """
+
+ def get_template_data(self, entry, metadata, template):
+ rv = DefaultTemplateDataProvider.get_template_data(self, entry,
+ metadata, template)
+ rv['path'] = rv['name']
+ return rv
+
+
+class CfgJinja2Generator(CfgGenerator):
+ """ The CfgJinja2Generator allows you to use the `Jinja2
+ <http://jinja.pocoo.org/>`_ templating system to generate
+ :ref:`server-plugins-generators-cfg` files. """
+
+ #: Handle .jinja2 files
+ __extensions__ = ['jinja2']
+
+ #: ``__loader_cls__`` is the class that will be instantiated to
+ #: load the template files. It must implement one public function,
+ #: ``load()``, as :class:`genshi.template.TemplateLoader`.
+ __loader_cls__ = FileSystemLoader
+
+ #: ``__environment_cls__`` is the class that will be instantiated to
+ #: store the jinja2 environment. It must implement one public function,
+ #: ``get_template()``, as :class:`jinja2.Environment`.
+ __environment_cls__ = RelEnvironment
+
+ #: Ignore ``.jinja2_include`` files so they can be used with the
+ #: Jinja2 ``{% include ... %}`` directive without raising warnings.
+ __ignore__ = ["jinja2_include"]
+
+ #: Low priority to avoid matching host- or group-specific
+ #: .crypt.jinja2 files
+ __priority__ = 50
+
+ def __init__(self, fname, spec):
+ CfgGenerator.__init__(self, fname, spec)
+ if not HAS_JINJA2:
+ raise PluginExecutionError("Jinja2 is not available")
+ self.template = None
+ encoding = Bcfg2.Options.setup.encoding
+ self.loader = self.__loader_cls__('/',
+ encoding=encoding)
+ self.environment = self.__environment_cls__(loader=self.loader)
+ __init__.__doc__ = CfgGenerator.__init__.__doc__
+
+ def get_data(self, entry, metadata):
+ if self.template is None:
+ raise PluginExecutionError("Failed to load template %s" %
+ self.name)
+ return self.template.render(
+ get_template_data(entry, metadata, self.name,
+ default=DefaultJinja2DataProvider()))
+ get_data.__doc__ = CfgGenerator.get_data.__doc__
+
+ def handle_event(self, event):
+ CfgGenerator.handle_event(self, event)
+ try:
+ self.template = \
+ self.environment.get_template(self.name)
+ except:
+ raise PluginExecutionError("Failed to load template: %s" %
+ sys.exc_info()[1])
+ handle_event.__doc__ = CfgGenerator.handle_event.__doc__
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgLegacyInfo.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgLegacyInfo.py
deleted file mode 100644
index 5122d9aa1..000000000
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgLegacyInfo.py
+++ /dev/null
@@ -1,46 +0,0 @@
-""" Handle info and :info files """
-
-import Bcfg2.Server.Plugin
-from Bcfg2.Server.Plugins.Cfg import CfgInfo
-
-
-class CfgLegacyInfo(CfgInfo):
- """ CfgLegacyInfo handles :file:`info` and :file:`:info` files for
- :ref:`server-plugins-generators-cfg` """
-
- #: Handle :file:`info` and :file:`:info`
- __basenames__ = ['info', ':info']
-
- #: CfgLegacyInfo is deprecated. Use
- #: :class:`Bcfg2.Server.Plugins.Cfg.CfgInfoXML.CfgInfoXML` instead.
- deprecated = True
-
- def __init__(self, path):
- CfgInfo.__init__(self, path)
- self.path = path
-
- #: The set of info metadata stored in the file
- self.metadata = None
- __init__.__doc__ = CfgInfo.__init__.__doc__
-
- def bind_info_to_entry(self, entry, metadata):
- self._set_info(entry, self.metadata)
- bind_info_to_entry.__doc__ = CfgInfo.bind_info_to_entry.__doc__
-
- def handle_event(self, event):
- if event.code2str() == 'deleted':
- return
- self.metadata = dict()
- for line in open(self.path).readlines():
- match = Bcfg2.Server.Plugin.INFO_REGEX.match(line)
- if not match:
- self.logger.warning("Failed to parse line in %s: %s" %
- (event.filename, line))
- continue
- else:
- for key, value in list(match.groupdict().items()):
- if value:
- self.metadata[key] = value
- if ('mode' in self.metadata and len(self.metadata['mode']) == 3):
- self.metadata['mode'] = "0%s" % self.metadata['mode']
- handle_event.__doc__ = CfgInfo.handle_event.__doc__
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgPrivateKeyCreator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgPrivateKeyCreator.py
index ac031461a..43035b410 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgPrivateKeyCreator.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgPrivateKeyCreator.py
@@ -3,18 +3,13 @@
import os
import shutil
import tempfile
-import subprocess
-from Bcfg2.Server.Plugin import PluginExecutionError, StructFile
-from Bcfg2.Server.Plugins.Cfg import CfgCreator, CfgCreationError, SETUP
+import Bcfg2.Options
+from Bcfg2.Utils import Executor
+from Bcfg2.Server.Plugins.Cfg import XMLCfgCreator, CfgCreationError
from Bcfg2.Server.Plugins.Cfg.CfgPublicKeyCreator import CfgPublicKeyCreator
-try:
- import Bcfg2.Encryption
- HAS_CRYPTO = True
-except ImportError:
- HAS_CRYPTO = False
-class CfgPrivateKeyCreator(CfgCreator, StructFile):
+class CfgPrivateKeyCreator(XMLCfgCreator):
"""The CfgPrivateKeyCreator creates SSH keys on the fly. """
#: Different configurations for different clients/groups can be
@@ -24,36 +19,21 @@ class CfgPrivateKeyCreator(CfgCreator, StructFile):
#: Handle XML specifications of private keys
__basenames__ = ['privkey.xml']
- def __init__(self, fname):
- CfgCreator.__init__(self, fname)
- StructFile.__init__(self, fname)
+ cfg_section = "sshkeys"
+ options = [
+ Bcfg2.Options.Option(
+ cf=("sshkeys", "category"), dest="sshkeys_category",
+ help="Metadata category that generated SSH keys are specific to"),
+ Bcfg2.Options.Option(
+ cf=("sshkeys", "passphrase"), dest="sshkeys_passphrase",
+ help="Passphrase used to encrypt generated SSH private keys")]
+ def __init__(self, fname):
+ XMLCfgCreator.__init__(self, fname)
pubkey_path = os.path.dirname(self.name) + ".pub"
pubkey_name = os.path.join(pubkey_path, os.path.basename(pubkey_path))
self.pubkey_creator = CfgPublicKeyCreator(pubkey_name)
-
- @property
- def category(self):
- """ The name of the metadata category that generated keys are
- specific to """
- if (SETUP.cfp.has_section("sshkeys") and
- SETUP.cfp.has_option("sshkeys", "category")):
- return SETUP.cfp.get("sshkeys", "category")
- return None
-
- @property
- def passphrase(self):
- """ The passphrase used to encrypt private keys """
- if (HAS_CRYPTO and
- SETUP.cfp.has_section("sshkeys") and
- SETUP.cfp.has_option("sshkeys", "passphrase")):
- return Bcfg2.Encryption.get_passphrases(SETUP)[
- SETUP.cfp.get("sshkeys", "passphrase")]
- return None
-
- def handle_event(self, event):
- CfgCreator.handle_event(self, event)
- StructFile.HandleEvent(self, event)
+ self.cmd = Executor()
def _gen_keypair(self, metadata, spec=None):
""" Generate a keypair according to the given client medata
@@ -67,7 +47,7 @@ class CfgPrivateKeyCreator(CfgCreator, StructFile):
the given client metadata, and may be obtained by
doing ``self.XMLMatch(metadata)``
:type spec: lxml.etree._Element
- :returns: string - The filename of the private key
+ :returns: tuple - (private key data, public key data)
"""
if spec is None:
spec = self.XMLMatch(metadata)
@@ -100,61 +80,20 @@ class CfgPrivateKeyCreator(CfgCreator, StructFile):
log_cmd.append("''")
self.debug_log("Cfg: Generating new SSH key pair: %s" %
" ".join(log_cmd))
- proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- err = proc.communicate()[1]
- if proc.wait():
+ result = self.cmd.run(cmd)
+ if not result.success:
raise CfgCreationError("Cfg: Failed to generate SSH key pair "
"at %s for %s: %s" %
- (filename, metadata.hostname, err))
- elif err:
+ (filename, metadata.hostname,
+ result.error))
+ elif result.stderr:
self.logger.warning("Cfg: Generated SSH key pair at %s for %s "
"with errors: %s" % (filename,
metadata.hostname,
- err))
- return filename
- except:
+ result.stderr))
+ return (open(filename).read(), open(filename + ".pub").read())
+ finally:
shutil.rmtree(tempdir)
- raise
-
- def get_specificity(self, metadata, spec=None):
- """ Get config settings for key generation specificity
- (per-host or per-group).
-
- :param metadata: The client metadata to create data for
- :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
- :param spec: The key specification to follow when creating the
- keys. This should be an XML document that only
- contains key specification data that applies to
- the given client metadata, and may be obtained by
- doing ``self.XMLMatch(metadata)``
- :type spec: lxml.etree._Element
- :returns: dict - A dict of specificity arguments suitable for
- passing to
- :func:`Bcfg2.Server.Plugins.Cfg.CfgCreator.write_data`
- or
- :func:`Bcfg2.Server.Plugins.Cfg.CfgCreator.get_filename`
- """
- if spec is None:
- spec = self.XMLMatch(metadata)
- category = spec.get("category", self.category)
- if category is None:
- per_host_default = "true"
- else:
- per_host_default = "false"
- per_host = spec.get("perhost", per_host_default).lower() == "true"
-
- specificity = dict(host=metadata.hostname)
- if category and not per_host:
- group = metadata.group_in_category(category)
- if group:
- specificity = dict(group=group,
- prio=int(spec.get("priority", 50)))
- else:
- self.logger.info("Cfg: %s has no group in category %s, "
- "creating host-specific key" %
- (metadata.hostname, category))
- return specificity
# pylint: disable=W0221
def create_data(self, entry, metadata):
@@ -168,77 +107,18 @@ class CfgPrivateKeyCreator(CfgCreator, StructFile):
:returns: string - The private key data
"""
spec = self.XMLMatch(metadata)
- specificity = self.get_specificity(metadata, spec)
- filename = self._gen_keypair(metadata, spec)
-
- try:
- # write the public key, stripping the comment and
- # replacing it with a comment that specifies the filename.
- kdata = open(filename + ".pub").read().split()[:2]
- kdata.append(self.pubkey_creator.get_filename(**specificity))
- pubkey = " ".join(kdata) + "\n"
- self.pubkey_creator.write_data(pubkey, **specificity)
-
- # encrypt the private key, write to the proper place, and
- # return it
- privkey = open(filename).read()
- if HAS_CRYPTO and self.passphrase:
- self.debug_log("Cfg: Encrypting key data at %s" % filename)
- privkey = Bcfg2.Encryption.ssl_encrypt(
- privkey,
- self.passphrase,
- algorithm=Bcfg2.Encryption.get_algorithm(SETUP))
- specificity['ext'] = '.crypt'
-
- self.write_data(privkey, **specificity)
- return privkey
- finally:
- shutil.rmtree(os.path.dirname(filename))
+ specificity = self.get_specificity(metadata)
+ privkey, pubkey = self._gen_keypair(metadata, spec)
+
+ # write the public key, stripping the comment and
+ # replacing it with a comment that specifies the filename.
+ kdata = pubkey.split()[:2]
+ kdata.append(self.pubkey_creator.get_filename(**specificity))
+ pubkey = " ".join(kdata) + "\n"
+ self.pubkey_creator.write_data(pubkey, **specificity)
+
+ # encrypt the private key, write to the proper place, and
+ # return it
+ self.write_data(privkey, **specificity)
+ return privkey
# pylint: enable=W0221
-
- def Index(self):
- StructFile.Index(self)
- if HAS_CRYPTO:
- for el in self.xdata.xpath("//*[@encrypted]"):
- try:
- el.text = self._decrypt(el).encode('ascii',
- 'xmlcharrefreplace')
- except UnicodeDecodeError:
- self.logger.info("Cfg: Decrypted %s to gibberish, skipping"
- % el.tag)
- except Bcfg2.Encryption.EVPError:
- default_strict = SETUP.cfp.get(
- Bcfg2.Encryption.CFG_SECTION, "decrypt",
- default="strict")
- strict = self.xdata.get("decrypt",
- default_strict) == "strict"
- msg = "Cfg: Failed to decrypt %s element in %s" % \
- (el.tag, self.name)
- if strict:
- raise PluginExecutionError(msg)
- else:
- self.logger.debug(msg)
-
- def _decrypt(self, element):
- """ Decrypt a single encrypted element """
- if not element.text or not element.text.strip():
- return
- passes = Bcfg2.Encryption.get_passphrases(SETUP)
- try:
- passphrase = passes[element.get("encrypted")]
- try:
- return Bcfg2.Encryption.ssl_decrypt(
- element.text,
- passphrase,
- algorithm=Bcfg2.Encryption.get_algorithm(SETUP))
- except Bcfg2.Encryption.EVPError:
- # error is raised below
- pass
- except KeyError:
- # bruteforce_decrypt raises an EVPError with a sensible
- # error message, so we just let it propagate up the stack
- return Bcfg2.Encryption.bruteforce_decrypt(
- element.text,
- passphrases=passes.values(),
- algorithm=Bcfg2.Encryption.get_algorithm(SETUP))
- raise Bcfg2.Encryption.EVPError("Failed to decrypt")
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgPublicKeyCreator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgPublicKeyCreator.py
index 4bd8690ed..3f2d1030b 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgPublicKeyCreator.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgPublicKeyCreator.py
@@ -8,7 +8,7 @@ import tempfile
import lxml.etree
from Bcfg2.Utils import Executor
from Bcfg2.Server.Plugin import StructFile, PluginExecutionError
-from Bcfg2.Server.Plugins.Cfg import CfgCreator, CfgCreationError, CFG
+from Bcfg2.Server.Plugins.Cfg import CfgCreator, CfgCreationError, get_cfg
class CfgPublicKeyCreator(CfgCreator, StructFile):
@@ -21,17 +21,20 @@ class CfgPublicKeyCreator(CfgCreator, StructFile):
creation of a keypair when a public key is created. """
#: Different configurations for different clients/groups can be
- #: handled with Client and Group tags within privkey.xml
+ #: handled with Client and Group tags within pubkey.xml
__specific__ = False
#: Handle XML specifications of private keys
__basenames__ = ['pubkey.xml']
+ #: No text content on any tags, so encryption support disabled
+ encryption = False
+
def __init__(self, fname):
CfgCreator.__init__(self, fname)
StructFile.__init__(self, fname)
- self.cfg = CFG
- self.core = CFG.core
+ self.cfg = get_cfg()
+ self.core = self.cfg.core
self.cmd = Executor()
def create_data(self, entry, metadata):
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgSSLCACertCreator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgSSLCACertCreator.py
new file mode 100644
index 000000000..92fcc4cd8
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgSSLCACertCreator.py
@@ -0,0 +1,255 @@
+""" Cfg creator that creates SSL certs """
+
+import os
+import sys
+import tempfile
+import lxml.etree
+import Bcfg2.Options
+from Bcfg2.Utils import Executor
+from Bcfg2.Compat import ConfigParser
+from Bcfg2.Server.FileMonitor import get_fam
+from Bcfg2.Server.Plugin import PluginExecutionError
+from Bcfg2.Server.Plugins.Cfg import CfgCreationError, XMLCfgCreator, \
+ CfgCreator, CfgVerifier, CfgVerificationError, get_cfg
+
+
+class CfgSSLCACertCreator(XMLCfgCreator, CfgVerifier):
+ """ This class acts as both a Cfg creator that creates SSL certs,
+ and as a Cfg verifier that verifies SSL certs. """
+
+ #: Different configurations for different clients/groups can be
+ #: handled with Client and Group tags within pubkey.xml
+ __specific__ = False
+
+ #: Handle XML specifications of private keys
+ __basenames__ = ['sslcert.xml']
+
+ cfg_section = "sslca"
+ options = [
+ Bcfg2.Options.Option(
+ cf=("sslca", "category"), dest="sslca_category",
+ help="Metadata category that generated SSL keys are specific to"),
+ Bcfg2.Options.Option(
+ cf=("sslca", "passphrase"), dest="sslca_passphrase",
+ help="Passphrase used to encrypt generated SSL keys"),
+ Bcfg2.Options.WildcardSectionGroup(
+ Bcfg2.Options.PathOption(
+ cf=("sslca_*", "config"),
+ help="Path to the openssl config for the CA"),
+ Bcfg2.Options.Option(
+ cf=("sslca_*", "passphrase"),
+ help="Passphrase for the CA private key"),
+ Bcfg2.Options.PathOption(
+ cf=("sslca_*", "chaincert"),
+ help="Path to the SSL chaining certificate for verification"),
+ Bcfg2.Options.BooleanOption(
+ cf=("sslca_*", "root_ca"),
+ help="Whether or not <chaincert> is a root CA (as opposed to "
+ "an intermediate cert"),
+ prefix="")]
+
+ def __init__(self, fname):
+ XMLCfgCreator.__init__(self, fname)
+ CfgVerifier.__init__(self, fname, None)
+ self.cmd = Executor()
+ self.cfg = get_cfg()
+
+ def build_req_config(self, metadata):
+ """ Generates a temporary openssl configuration file that is
+ used to generate the required certificate request. """
+ fd, fname = tempfile.mkstemp()
+ cfp = ConfigParser.ConfigParser({})
+ cfp.optionxform = str
+ defaults = dict(
+ req=dict(
+ default_md='sha1',
+ distinguished_name='req_distinguished_name',
+ req_extensions='v3_req',
+ x509_extensions='v3_req',
+ prompt='no'),
+ req_distinguished_name=dict(),
+ v3_req=dict(subjectAltName='@alt_names'),
+ alt_names=dict())
+ for section in list(defaults.keys()):
+ cfp.add_section(section)
+ for key in defaults[section]:
+ cfp.set(section, key, defaults[section][key])
+ spec = self.XMLMatch(metadata)
+ cert = spec.find("Cert")
+ altnamenum = 1
+ altnames = spec.findall('subjectAltName')
+ altnames.extend(list(metadata.aliases))
+ altnames.append(metadata.hostname)
+ for altname in altnames:
+ cfp.set('alt_names', 'DNS.' + str(altnamenum), altname)
+ altnamenum += 1
+ for item in ['C', 'L', 'ST', 'O', 'OU', 'emailAddress']:
+ if cert.get(item):
+ cfp.set('req_distinguished_name', item, cert.get(item))
+ cfp.set('req_distinguished_name', 'CN', metadata.hostname)
+ self.debug_log("Cfg: Writing temporary CSR config to %s" % fname)
+ try:
+ cfp.write(os.fdopen(fd, 'w'))
+ except IOError:
+ raise CfgCreationError("Cfg: Failed to write temporary CSR config "
+ "file: %s" % sys.exc_info()[1])
+ return fname
+
+ def build_request(self, keyfile, metadata):
+ """ Create the certificate request """
+ req_config = self.build_req_config(metadata)
+ try:
+ fd, req = tempfile.mkstemp()
+ os.close(fd)
+ cert = self.XMLMatch(metadata).find("Cert")
+ days = cert.get("days", "365")
+ cmd = ["openssl", "req", "-new", "-config", req_config,
+ "-days", days, "-key", keyfile, "-text", "-out", req]
+ result = self.cmd.run(cmd)
+ if not result.success:
+ raise CfgCreationError("Failed to generate CSR: %s" %
+ result.error)
+ return req
+ finally:
+ try:
+ os.unlink(req_config)
+ except OSError:
+ self.logger.error("Cfg: Failed to unlink temporary CSR "
+ "config: %s" % sys.exc_info()[1])
+
+ def get_ca(self, name):
+ """ get a dict describing a CA from the config file """
+ rv = dict()
+ prefix = "sslca_%s_" % name
+ for attr in dir(Bcfg2.Options.setup):
+ if attr.startswith(prefix):
+ rv[attr[len(prefix):]] = getattr(Bcfg2.Options.setup, attr)
+ return rv
+
+ def create_data(self, entry, metadata):
+ """ generate a new cert """
+ self.logger.info("Cfg: Generating new SSL cert for %s" % self.name)
+ cert = self.XMLMatch(metadata).find("Cert")
+ ca = self.get_ca(cert.get('ca', 'default'))
+ req = self.build_request(self._get_keyfile(cert, metadata), metadata)
+ try:
+ days = cert.get('days', '365')
+ cmd = ["openssl", "ca", "-config", ca['config'], "-in", req,
+ "-days", days, "-batch"]
+ passphrase = ca.get('passphrase')
+ if passphrase:
+ cmd.extend(["-passin", "pass:%s" % passphrase])
+ result = self.cmd.run(cmd)
+ if not result.success:
+ raise CfgCreationError("Failed to generate cert: %s" %
+ result.error)
+ except KeyError:
+ raise CfgCreationError("Cfg: [sslca_%s] section has no 'config' "
+ "option" % cert.get('ca', 'default'))
+ finally:
+ try:
+ os.unlink(req)
+ except OSError:
+ self.logger.error("Cfg: Failed to unlink temporary CSR: %s " %
+ sys.exc_info()[1])
+ data = result.stdout
+ if cert.get('append_chain') and 'chaincert' in ca:
+ data += open(ca['chaincert']).read()
+
+ self.write_data(data, **self.get_specificity(metadata))
+ return data
+
+ def verify_entry(self, entry, metadata, data):
+ fd, fname = tempfile.mkstemp()
+ self.debug_log("Cfg: Writing SSL cert %s to temporary file %s for "
+ "verification" % (entry.get("name"), fname))
+ os.fdopen(fd, 'w').write(data)
+ cert = self.XMLMatch(metadata).find("Cert")
+ ca = self.get_ca(cert.get('ca', 'default'))
+ try:
+ if ca.get('chaincert'):
+ self.verify_cert_against_ca(fname, entry, metadata)
+ self.verify_cert_against_key(fname,
+ self._get_keyfile(cert, metadata))
+ finally:
+ os.unlink(fname)
+
+ def _get_keyfile(self, cert, metadata):
+ """ Given a <Cert/> element and client metadata, return the
+ full path to the file on the filesystem that the key lives in."""
+ keypath = cert.get("key")
+ eset = self.cfg.entries[keypath]
+ try:
+ return eset.best_matching(metadata).name
+ except PluginExecutionError:
+ # SSL key needs to be created
+ try:
+ creator = eset.best_matching(metadata,
+ eset.get_handlers(metadata,
+ CfgCreator))
+ except PluginExecutionError:
+ raise CfgCreationError("Cfg: No SSL key or key creator "
+ "defined for %s" % keypath)
+
+ keyentry = lxml.etree.Element("Path", name=keypath)
+ creator.create_data(keyentry, metadata)
+
+ tries = 0
+ while True:
+ if tries >= 10:
+ raise CfgCreationError("Cfg: Timed out waiting for event "
+ "on SSL key at %s" % keypath)
+ get_fam().handle_events_in_interval(1)
+ try:
+ return eset.best_matching(metadata).name
+ except PluginExecutionError:
+ tries += 1
+ continue
+
+ def verify_cert_against_ca(self, filename, entry, metadata):
+ """
+ check that a certificate validates against the ca cert,
+ and that it has not expired.
+ """
+ cert = self.XMLMatch(metadata).find("Cert")
+ ca = self.get_ca(cert.get("ca", "default"))
+ chaincert = ca.get('chaincert')
+ cmd = ["openssl", "verify"]
+ is_root = ca.get('root_ca', "false").lower() == 'true'
+ if is_root:
+ cmd.append("-CAfile")
+ else:
+ # verifying based on an intermediate cert
+ cmd.extend(["-purpose", "sslserver", "-untrusted"])
+ cmd.extend([chaincert, filename])
+ self.debug_log("Cfg: Verifying %s against CA" % entry.get("name"))
+ result = self.cmd.run(cmd)
+ if result.stdout == cert + ": OK\n":
+ self.debug_log("Cfg: %s verified successfully against CA" %
+ entry.get("name"))
+ else:
+ raise CfgVerificationError("%s failed verification against CA: %s"
+ % (entry.get("name"), result.error))
+
+ def _get_modulus(self, fname, ftype="x509"):
+ """ get the modulus from the given file """
+ cmd = ["openssl", ftype, "-noout", "-modulus", "-in", fname]
+ self.debug_log("Cfg: Getting modulus of %s for verification: %s" %
+ (fname, " ".join(cmd)))
+ result = self.cmd.run(cmd)
+ if not result.success:
+ raise CfgVerificationError("Failed to get modulus of %s: %s" %
+ (fname, result.error))
+ return result.stdout.strip()
+
+ def verify_cert_against_key(self, filename, keyfile):
+ """ check that a certificate validates against its private
+ key. """
+ cert = self._get_modulus(filename)
+ key = self._get_modulus(keyfile, ftype="rsa")
+ if cert == key:
+ self.debug_log("Cfg: %s verified successfully against key %s" %
+ (filename, keyfile))
+ else:
+ raise CfgVerificationError("%s failed verification against key %s"
+ % (filename, keyfile))
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgSSLCAKeyCreator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgSSLCAKeyCreator.py
new file mode 100644
index 000000000..a158302be
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgSSLCAKeyCreator.py
@@ -0,0 +1,36 @@
+""" Cfg creator that creates SSL keys """
+
+from Bcfg2.Utils import Executor
+from Bcfg2.Server.Plugins.Cfg import CfgCreationError, XMLCfgCreator
+
+
+class CfgSSLCAKeyCreator(XMLCfgCreator):
+ """ Cfg creator that creates SSL keys """
+
+ #: Different configurations for different clients/groups can be
+ #: handled with Client and Group tags within sslkey.xml
+ __specific__ = False
+
+ __basenames__ = ["sslkey.xml"]
+
+ cfg_section = "sslca"
+
+ def create_data(self, entry, metadata):
+ self.logger.info("Cfg: Generating new SSL key for %s" % self.name)
+ spec = self.XMLMatch(metadata)
+ key = spec.find("Key")
+ if not key:
+ key = dict()
+ ktype = key.get('type', 'rsa')
+ bits = key.get('bits', '2048')
+ if ktype == 'rsa':
+ cmd = ["openssl", "genrsa", bits]
+ elif ktype == 'dsa':
+ cmd = ["openssl", "dsaparam", "-noout", "-genkey", bits]
+ result = Executor().run(cmd)
+ if not result.success:
+ raise CfgCreationError("Failed to generate key %s for %s: %s" %
+ (self.name, metadata.hostname,
+ result.error))
+ self.write_data(result.stdout, **self.get_specificity(metadata))
+ return result.stdout
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py b/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py
index c6e2d0acb..355e53588 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py
@@ -3,60 +3,39 @@
import re
import os
import sys
-import stat
import errno
import operator
import lxml.etree
import Bcfg2.Options
import Bcfg2.Server.Plugin
-import Bcfg2.Server.Lint
-from fnmatch import fnmatch
from Bcfg2.Server.Plugin import PluginExecutionError
# pylint: disable=W0622
-from Bcfg2.Compat import u_str, unicode, b64encode, walk_packages, \
- any, oct_mode
+from Bcfg2.Compat import u_str, unicode, b64encode, any, walk_packages
# pylint: enable=W0622
-#: SETUP contains a reference to the
-#: :class:`Bcfg2.Options.OptionParser` created by the Bcfg2 core for
-#: parsing command-line and config file options.
-#: :class:`Bcfg2.Server.Plugins.Cfg.Cfg` stores it in a module global
-#: so that the handler objects can access it, because there is no other
-#: facility for passing a setup object from a
-#: :class:`Bcfg2.Server.Plugin.helpers.GroupSpool` to its
-#: :class:`Bcfg2.Server.Plugin.helpers.EntrySet` objects and thence to
-#: the EntrySet children.
-SETUP = None
-
-#: CFG is a reference to the :class:`Bcfg2.Server.Plugins.Cfg.Cfg`
-#: plugin object created by the Bcfg2 core. This is provided so that
-#: the handler objects can access it as necessary, since the existing
-#: :class:`Bcfg2.Server.Plugin.helpers.GroupSpool` and
-#: :class:`Bcfg2.Server.Plugin.helpers.EntrySet` classes have no
-#: facility for passing it otherwise.
-CFG = None
-
-_HANDLERS = []
-
-
-def handlers():
- """ A list of Cfg handler classes. Loading the handlers must
- be done at run-time, not at compile-time, or it causes a
- circular import and Bad Things Happen."""
- if not _HANDLERS:
- for submodule in walk_packages(path=__path__, prefix=__name__ + "."):
- mname = submodule[1].rsplit('.', 1)[-1]
- module = getattr(__import__(submodule[1]).Server.Plugins.Cfg,
- mname)
- hdlr = getattr(module, mname)
- if issubclass(hdlr, CfgBaseFileMatcher):
- _HANDLERS.append(hdlr)
- _HANDLERS.sort(key=operator.attrgetter("__priority__"))
- return _HANDLERS
-
-
-class CfgBaseFileMatcher(Bcfg2.Server.Plugin.SpecificData,
- Bcfg2.Server.Plugin.Debuggable):
+try:
+ import Bcfg2.Server.Encryption
+ HAS_CRYPTO = True
+except ImportError:
+ HAS_CRYPTO = False
+
+_handlers = [m[1] # pylint: disable=C0103
+ for m in walk_packages(path=__path__)]
+
+_CFG = None
+
+
+def get_cfg():
+ """ Get the :class:`Bcfg2.Server.Plugins.Cfg.Cfg` plugin object
+ created by the Bcfg2 core. This is provided so that the handler
+ objects can access it as necessary, since the existing
+ :class:`Bcfg2.Server.Plugin.helpers.GroupSpool` and
+ :class:`Bcfg2.Server.Plugin.helpers.EntrySet` classes have no
+ facility for passing it otherwise."""
+ return _CFG
+
+
+class CfgBaseFileMatcher(Bcfg2.Server.Plugin.SpecificData):
""" .. currentmodule:: Bcfg2.Server.Plugins.Cfg
CfgBaseFileMatcher is the parent class for all Cfg handler
@@ -100,13 +79,10 @@ class CfgBaseFileMatcher(Bcfg2.Server.Plugin.SpecificData,
#: Flag to indicate an experimental handler.
experimental = False
- def __init__(self, name, specific, encoding):
+ def __init__(self, name, specific):
if not self.__specific__ and not specific:
specific = Bcfg2.Server.Plugin.Specificity(all=True)
- Bcfg2.Server.Plugin.SpecificData.__init__(self, name, specific,
- encoding)
- Bcfg2.Server.Plugin.Debuggable.__init__(self)
- self.encoding = encoding
+ Bcfg2.Server.Plugin.SpecificData.__init__(self, name, specific)
__init__.__doc__ = Bcfg2.Server.Plugin.SpecificData.__init__.__doc__ + \
"""
.. -----
@@ -197,7 +173,7 @@ class CfgGenerator(CfgBaseFileMatcher):
client. See :class:`Bcfg2.Server.Plugin.helpers.EntrySet` for more
details on how the best handler is chosen."""
- def __init__(self, name, specific, encoding):
+ def __init__(self, name, specific):
# we define an __init__ that just calls the parent __init__,
# so that we can set the docstring on __init__ to something
# different from the parent __init__ -- namely, the parent
@@ -205,7 +181,7 @@ class CfgGenerator(CfgBaseFileMatcher):
# which we use to delineate the actual docs from the
# .. autoattribute hacks we have to do to get private
# attributes included in sphinx 1.0 """
- CfgBaseFileMatcher.__init__(self, name, specific, encoding)
+ CfgBaseFileMatcher.__init__(self, name, specific)
__init__.__doc__ = CfgBaseFileMatcher.__init__.__doc__.split(".. -----")[0]
def get_data(self, entry, metadata): # pylint: disable=W0613
@@ -225,9 +201,9 @@ class CfgFilter(CfgBaseFileMatcher):
""" CfgFilters modify the initial content of a file after it has
been generated by a :class:`Bcfg2.Server.Plugins.Cfg.CfgGenerator`. """
- def __init__(self, name, specific, encoding):
+ def __init__(self, name, specific):
# see comment on CfgGenerator.__init__ above
- CfgBaseFileMatcher.__init__(self, name, specific, encoding)
+ CfgBaseFileMatcher.__init__(self, name, specific)
__init__.__doc__ = CfgBaseFileMatcher.__init__.__doc__.split(".. -----")[0]
def modify_data(self, entry, metadata, data):
@@ -249,10 +225,7 @@ class CfgFilter(CfgBaseFileMatcher):
class CfgInfo(CfgBaseFileMatcher):
""" CfgInfo handlers provide metadata (owner, group, paranoid,
- etc.) for a file entry.
-
- .. private-include: _set_info
- """
+ etc.) for a file entry. """
#: Whether or not the files handled by this handler are permitted
#: to have specificity indicators in their filenames -- e.g.,
@@ -268,7 +241,7 @@ class CfgInfo(CfgBaseFileMatcher):
.. -----
.. autoattribute:: Bcfg2.Server.Plugins.Cfg.CfgInfo.__specific__
"""
- CfgBaseFileMatcher.__init__(self, fname, None, None)
+ CfgBaseFileMatcher.__init__(self, fname, None)
def bind_info_to_entry(self, entry, metadata):
""" Assign the appropriate attributes to the entry, modifying
@@ -282,20 +255,6 @@ class CfgInfo(CfgBaseFileMatcher):
"""
raise NotImplementedError
- def _set_info(self, entry, info):
- """ Helper function to assign a dict of info attributes to an
- entry object. ``entry`` is modified in-place.
-
- :param entry: The abstract entry to bind the info to
- :type entry: lxml.etree._Element
- :param info: A dict of attribute: value pairs
- :type info: dict
- :returns: None
- """
- for key, value in list(info.items()):
- if not key.startswith("__"):
- entry.attrib[key] = value
-
class CfgVerifier(CfgBaseFileMatcher):
""" CfgVerifier handlers validate entry data once it has been
@@ -305,9 +264,9 @@ class CfgVerifier(CfgBaseFileMatcher):
etc.), or both.
"""
- def __init__(self, name, specific, encoding):
+ def __init__(self, name, specific):
# see comment on CfgGenerator.__init__ above
- CfgBaseFileMatcher.__init__(self, name, specific, encoding)
+ CfgBaseFileMatcher.__init__(self, name, specific)
__init__.__doc__ = CfgBaseFileMatcher.__init__.__doc__.split(".. -----")[0]
def verify_entry(self, entry, metadata, data):
@@ -338,18 +297,15 @@ class CfgCreator(CfgBaseFileMatcher):
#: file, and are thus not specific
__specific__ = False
- #: The CfgCreator interface is experimental at this time
- experimental = True
-
def __init__(self, fname):
"""
:param name: The full path to the file
:type name: string
.. -----
- .. autoattribute:: Bcfg2.Server.Plugins.Cfg.CfgCreator.__specific__
+ .. autoattribute:: Bcfg2.Server.Plugins.Cfg.CfgInfo.__specific__
"""
- CfgBaseFileMatcher.__init__(self, fname, None, None)
+ CfgBaseFileMatcher.__init__(self, fname, None)
def create_data(self, entry, metadata):
""" Create new data for the given entry and write it to disk
@@ -369,7 +325,9 @@ class CfgCreator(CfgBaseFileMatcher):
``host`` is given, it will be host-specific. It will be
group-specific if ``group`` and ``prio`` are given. If
neither ``host`` nor ``group`` is given, the filename will be
- non-specific.
+ non-specific. In general, this will be called as::
+
+ self.get_filename(**self.get_specificity(metadata))
:param host: The file applies to the given host
:type host: bool
@@ -400,6 +358,9 @@ class CfgCreator(CfgBaseFileMatcher):
written as a host-specific file, or as a group-specific file
if ``group`` and ``prio`` are given. If neither ``host`` nor
``group`` is given, it will be written as a non-specific file.
+ In general, this will be called as::
+
+ self.write_data(data, **self.get_specificity(metadata))
:param data: The data to write
:type data: string
@@ -419,7 +380,7 @@ class CfgCreator(CfgBaseFileMatcher):
:raises: :exc:`Bcfg2.Server.Plugins.Cfg.CfgCreationError`
"""
fileloc = self.get_filename(host=host, group=group, prio=prio, ext=ext)
- self.debug_log("%s: Writing new file %s" % (self.name, fileloc))
+ self.debug_log("Cfg: Writing new file %s" % fileloc)
try:
os.makedirs(os.path.dirname(fileloc))
except OSError:
@@ -435,6 +396,95 @@ class CfgCreator(CfgBaseFileMatcher):
raise CfgCreationError("Could not write %s: %s" % (fileloc, err))
+class XMLCfgCreator(CfgCreator, # pylint: disable=W0223
+ Bcfg2.Server.Plugin.StructFile):
+ """ A CfgCreator that uses XML to describe how data should be
+ generated. """
+
+ #: Whether or not the created data from this class can be
+ #: encrypted
+ encryptable = True
+
+ #: Encryption and creation settings can be stored in bcfg2.conf,
+ #: either under the [cfg] section, or under the named section.
+ cfg_section = None
+
+ def __init__(self, name):
+ CfgCreator.__init__(self, name)
+ Bcfg2.Server.Plugin.StructFile.__init__(self, name)
+
+ def handle_event(self, event):
+ CfgCreator.handle_event(self, event)
+ Bcfg2.Server.Plugin.StructFile.HandleEvent(self, event)
+
+ @property
+ def passphrase(self):
+ """ The passphrase used to encrypt created data """
+ if self.cfg_section:
+ localopt = "%s_passphrase" % self.cfg_section
+ passphrase = getattr(Bcfg2.Options.setup, localopt,
+ Bcfg2.Options.setup.cfg_passphrase)
+ else:
+ passphrase = Bcfg2.Options.setup.cfg_passphrase
+ if passphrase is None:
+ return None
+ try:
+ return Bcfg2.Options.setup.passphrases[passphrase]
+ except KeyError:
+ raise CfgCreationError("%s: No such passphrase: %s" %
+ (self.__class__.__name__, passphrase))
+
+ @property
+ def category(self):
+ """ The category to which created data is specific """
+ if self.cfg_section:
+ localopt = "%s_category" % self.cfg_section
+ return getattr(Bcfg2.Options.setup, localopt,
+ Bcfg2.Options.setup.cfg_category)
+ else:
+ return Bcfg2.Options.setup.cfg_category
+
+ def write_data(self, data, host=None, group=None, prio=0, ext=''):
+ if HAS_CRYPTO and self.encryptable and self.passphrase:
+ self.debug_log("Cfg: Encrypting created data")
+ data = Bcfg2.Server.Encryption.ssl_encrypt(data, self.passphrase)
+ ext = '.crypt'
+ CfgCreator.write_data(self, data, host=host, group=group, prio=prio,
+ ext=ext)
+
+ def get_specificity(self, metadata):
+ """ Get config settings for key generation specificity
+ (per-host or per-group).
+
+ :param metadata: The client metadata to create data for
+ :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
+ :returns: dict - A dict of specificity arguments suitable for
+ passing to
+ :func:`Bcfg2.Server.Plugins.Cfg.CfgCreator.write_data`
+ or
+ :func:`Bcfg2.Server.Plugins.Cfg.CfgCreator.get_filename`
+ """
+ category = self.xdata.get("category", self.category)
+ if category is None:
+ per_host_default = "true"
+ else:
+ per_host_default = "false"
+ per_host = self.xdata.get("perhost",
+ per_host_default).lower() == "true"
+
+ specificity = dict(host=metadata.hostname)
+ if category and not per_host:
+ group = metadata.group_in_category(category)
+ if group:
+ specificity = dict(group=group,
+ prio=int(self.xdata.get("priority", 50)))
+ else:
+ self.logger.info("Cfg: %s has no group in category %s, "
+ "creating host-specific data" %
+ (metadata.hostname, category))
+ return specificity
+
+
class CfgVerificationError(Exception):
""" Raised by
:func:`Bcfg2.Server.Plugins.Cfg.CfgVerifier.verify_entry` when an
@@ -453,37 +503,27 @@ class CfgDefaultInfo(CfgInfo):
""" :class:`Bcfg2.Server.Plugins.Cfg.Cfg` handler that supplies a
default set of file metadata """
- def __init__(self, defaults):
+ def __init__(self):
CfgInfo.__init__(self, '')
- self.defaults = defaults
__init__.__doc__ = CfgInfo.__init__.__doc__.split(".. -----")[0]
- def bind_info_to_entry(self, entry, metadata):
- self._set_info(entry, self.defaults)
+ def bind_info_to_entry(self, entry, _):
+ for key, value in Bcfg2.Server.Plugin.default_path_metadata().items():
+ entry.attrib[key] = value
bind_info_to_entry.__doc__ = CfgInfo.bind_info_to_entry.__doc__
-#: A :class:`CfgDefaultInfo` object instantiated with
-#: :attr:`Bcfg2.Server.Plugin.helper.DEFAULT_FILE_METADATA` as its
-#: default metadata. This is used to set a default file metadata set
-#: on an entry before a "real" :class:`CfgInfo` handler applies its
-#: metadata to the entry.
-DEFAULT_INFO = CfgDefaultInfo(Bcfg2.Server.Plugin.DEFAULT_FILE_METADATA)
-
-class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet,
- Bcfg2.Server.Plugin.Debuggable):
+class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet):
""" Handle a collection of host- and group-specific Cfg files with
multiple different Cfg handlers in a single directory. """
- def __init__(self, basename, path, entry_type, encoding):
- Bcfg2.Server.Plugin.EntrySet.__init__(self, basename, path,
- entry_type, encoding)
- Bcfg2.Server.Plugin.Debuggable.__init__(self)
+ def __init__(self, basename, path, entry_type):
+ Bcfg2.Server.Plugin.EntrySet.__init__(self, basename, path, entry_type)
self.specific = None
__init__.__doc__ = Bcfg2.Server.Plugin.EntrySet.__doc__
def set_debug(self, debug):
- rv = Bcfg2.Server.Plugin.Debuggable.set_debug(self, debug)
+ rv = Bcfg2.Server.Plugin.EntrySet.set_debug(self, debug)
for entry in self.entries.values():
entry.set_debug(debug)
return rv
@@ -504,7 +544,7 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet,
# process a bogus changed event like a created
return
- for hdlr in handlers():
+ for hdlr in Bcfg2.Options.setup.cfg_handlers:
if hdlr.handles(event, basename=self.path):
if action == 'changed':
# warn about a bogus 'changed' event, but
@@ -513,12 +553,7 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet,
% (action, event.filename))
self.debug_log("%s handling %s event on %s" %
(hdlr.__name__, action, event.filename))
- try:
- self.entry_init(event, hdlr)
- except: # pylint: disable=W0702
- err = sys.exc_info()[1]
- self.logger.error("Cfg: Failed to parse %s: %s" %
- (event.filename, err))
+ self.entry_init(event, hdlr)
return
elif hdlr.ignore(event, basename=self.path):
return
@@ -597,7 +632,7 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet,
# most specific to least specific.
data = fltr.modify_data(entry, metadata, data)
- if SETUP['validate']:
+ if Bcfg2.Options.setup.cfg_validation:
try:
self._validate_data(entry, metadata, data)
except CfgVerificationError:
@@ -613,7 +648,7 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet,
if not isinstance(data, unicode):
if not isinstance(data, str):
data = data.decode('utf-8')
- data = u_str(data, self.encoding)
+ data = u_str(data, Bcfg2.Options.setup.encoding)
except UnicodeDecodeError:
msg = "Failed to decode %s: %s" % (entry.get('name'),
sys.exc_info()[1])
@@ -652,7 +687,7 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet,
rv = []
for ent in self.entries.values():
if (isinstance(ent, handler_type) and
- (not ent.__specific__ or ent.specific.matches(metadata))):
+ (not ent.__specific__ or ent.specific.matches(metadata))):
rv.append(ent)
return rv
@@ -668,7 +703,7 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet,
:returns: None
"""
info_handlers = self.get_handlers(metadata, CfgInfo)
- DEFAULT_INFO.bind_info_to_entry(entry, metadata)
+ CfgDefaultInfo().bind_info_to_entry(entry, metadata)
if len(info_handlers) > 1:
self.logger.error("More than one info supplier found for %s: %s" %
(entry.get("name"), info_handlers))
@@ -692,7 +727,7 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet,
try:
return creator.create_data(entry, metadata)
- except:
+ except CfgCreationError:
raise PluginExecutionError("Cfg: Error creating data for %s: %s" %
(entry.get("name"), sys.exc_info()[1]))
@@ -717,16 +752,12 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet,
# raises an appropriate exception
return (self._create_data(entry, metadata), None)
- if entry.get('mode').lower() == 'inherit':
- # use on-disk permissions
- self.logger.warning("Cfg: %s: Use of mode='inherit' is deprecated"
- % entry.get("name"))
- fname = os.path.join(self.path, generator.name)
- entry.set('mode',
- oct_mode(stat.S_IMODE(os.stat(fname).st_mode)))
try:
return (generator.get_data(entry, metadata), generator)
except:
+ # TODO: the exceptions raised by ``get_data`` are not
+ # constrained in any way, so for now this needs to be a
+ # blanket except.
msg = "Cfg: Error rendering %s: %s" % (entry.get("name"),
sys.exc_info()[1])
self.logger.error(msg)
@@ -770,7 +801,7 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet,
try:
best = self.best_matching(metadata, generators)
rv.append(best.specific)
- except: # pylint: disable=W0702
+ except PluginExecutionError:
pass
if not rv or not rv[0].hostname:
@@ -801,10 +832,10 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet,
self.logger.error(msg)
raise PluginExecutionError(msg)
try:
- etext = new_entry['text'].encode(self.encoding)
- except:
+ etext = new_entry['text'].encode(Bcfg2.Options.setup.encoding)
+ except UnicodeDecodeError:
msg = "Cfg: Cannot encode content of %s as %s" % \
- (name, self.encoding)
+ (name, Bcfg2.Options.setup.encoding)
self.logger.error(msg)
raise PluginExecutionError(msg)
open(name, 'w').write(etext)
@@ -812,13 +843,6 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet,
badattr = [attr for attr in ['owner', 'group', 'mode']
if attr in new_entry]
if badattr:
- # check for info files and inform user of their removal
- for ifile in ['info', ':info']:
- info = os.path.join(self.path, ifile)
- if os.path.exists(info):
- self.logger.info("Removing %s and replacing with info.xml"
- % info)
- os.remove(info)
metadata_updates = {}
metadata_updates.update(self.metadata)
for attr in badattr:
@@ -836,28 +860,45 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet,
flag=log)
+class CfgHandlerAction(Bcfg2.Options.ComponentAction):
+ """ Option parser action to load Cfg handlers """
+ bases = ['Bcfg2.Server.Plugins.Cfg']
+
+
class Cfg(Bcfg2.Server.Plugin.GroupSpool,
Bcfg2.Server.Plugin.PullTarget):
""" The Cfg plugin provides a repository to describe configuration
file contents for clients. In its simplest form, the Cfg repository is
just a directory tree modeled off of the directory tree on your client
- machines.
- """
+ machines. """
__author__ = 'bcfg-dev@mcs.anl.gov'
es_cls = CfgEntrySet
es_child_cls = Bcfg2.Server.Plugin.SpecificData
- def __init__(self, core, datastore):
- global SETUP, CFG # pylint: disable=W0603
- Bcfg2.Server.Plugin.GroupSpool.__init__(self, core, datastore)
+ options = Bcfg2.Server.Plugin.GroupSpool.options + [
+ Bcfg2.Options.BooleanOption(
+ '--cfg-validation', cf=('cfg', 'validation'), default=True,
+ help='Run validation on Cfg files'),
+ Bcfg2.Options.Option(
+ cf=('cfg', 'category'), dest="cfg_category",
+ help='The default name of the metadata category that created data '
+ 'is specific to'),
+ Bcfg2.Options.Option(
+ cf=('cfg', 'passphrase'), dest="cfg_passphrase",
+ help='The default passphrase name used to encrypt created data'),
+ Bcfg2.Options.Option(
+ cf=("cfg", "handlers"), dest="cfg_handlers",
+ help="Cfg handlers to load",
+ type=Bcfg2.Options.Types.comma_list, action=CfgHandlerAction,
+ default=_handlers)]
+
+ def __init__(self, core):
+ global _CFG # pylint: disable=W0603
+ Bcfg2.Server.Plugin.GroupSpool.__init__(self, core)
Bcfg2.Server.Plugin.PullTarget.__init__(self)
-
- CFG = self
-
- SETUP = core.setup
- if 'validate' not in SETUP:
- SETUP.add_option('validate', Bcfg2.Options.CFG_VALIDATION)
- SETUP.reparse()
+ Bcfg2.Options.setup.cfg_handlers.sort(
+ key=operator.attrgetter("__priority__"))
+ _CFG = self
__init__.__doc__ = Bcfg2.Server.Plugin.GroupSpool.__init__.__doc__
def has_generator(self, entry, metadata):
@@ -891,127 +932,3 @@ class Cfg(Bcfg2.Server.Plugin.GroupSpool,
log)
AcceptPullData.__doc__ = \
Bcfg2.Server.Plugin.PullTarget.AcceptPullData.__doc__
-
-
-class CfgLint(Bcfg2.Server.Lint.ServerPlugin):
- """ warn about usage of .cat and .diff files """
-
- def Run(self):
- for basename, entry in list(self.core.plugins['Cfg'].entries.items()):
- self.check_delta(basename, entry)
- self.check_pubkey(basename, entry)
- self.check_missing_files()
- self.check_conflicting_handlers()
-
- @classmethod
- def Errors(cls):
- return {"cat-file-used": "warning",
- "diff-file-used": "warning",
- "no-pubkey-xml": "warning",
- "unknown-cfg-files": "error",
- "extra-cfg-files": "error",
- "multiple-global-handlers": "error"}
-
- def check_delta(self, basename, entry):
- """ check that no .cat or .diff files are in use """
- for fname, handler in entry.entries.items():
- path = handler.name
- if self.HandlesFile(path) and isinstance(handler, CfgFilter):
- extension = fname.split(".")[-1]
- if extension in ["cat", "diff"]:
- self.LintError("%s-file-used" % extension,
- "%s file used on %s: %s" % (extension,
- basename,
- fname))
-
- def check_pubkey(self, basename, entry):
- """ check that privkey.xml files have corresponding pubkey.xml
- files """
- if "privkey.xml" not in entry.entries:
- return
- privkey = entry.entries["privkey.xml"]
- if not self.HandlesFile(privkey.name):
- return
-
- pubkey = basename + ".pub"
- if pubkey not in self.core.plugins['Cfg'].entries:
- self.LintError("no-pubkey-xml",
- "%s has no corresponding pubkey.xml at %s" %
- (basename, pubkey))
- else:
- pubset = self.core.plugins['Cfg'].entries[pubkey]
- if "pubkey.xml" not in pubset.entries:
- self.LintError("no-pubkey-xml",
- "%s has no corresponding pubkey.xml at %s" %
- (basename, pubkey))
-
- def _list_path_components(self, path):
- """ Get a list of all components of a path. E.g.,
- ``self._list_path_components("/foo/bar/foobaz")`` would return
- ``["foo", "bar", "foo", "baz"]``. The list is not guaranteed
- to be in order."""
- rv = []
- remaining, component = os.path.split(path)
- while component != '':
- rv.append(component)
- remaining, component = os.path.split(remaining)
- return rv
-
- def check_conflicting_handlers(self):
- """ Check that a single entryset doesn't have multiple
- non-specific (i.e., 'all') handlers. """
- cfg = self.core.plugins['Cfg']
- for eset in cfg.entries.values():
- alls = [e for e in eset.entries.values()
- if (e.specific.all and
- issubclass(e.__class__, CfgGenerator))]
- if len(alls) > 1:
- self.LintError("multiple-global-handlers",
- "%s has multiple global handlers: %s" %
- (eset.path, ", ".join(os.path.basename(e.name)
- for e in alls)))
-
- def check_missing_files(self):
- """ check that all files on the filesystem are known to Cfg """
- cfg = self.core.plugins['Cfg']
-
- # first, collect ignore patterns from handlers
- ignore = set()
- for hdlr in handlers():
- ignore.update(hdlr.__ignore__)
-
- # next, get a list of all non-ignored files on the filesystem
- all_files = set()
- for root, _, files in os.walk(cfg.data):
- for fname in files:
- fpath = os.path.join(root, fname)
- # check against the handler ignore patterns and the
- # global FAM ignore list
- if (not any(fname.endswith("." + i) for i in ignore) and
- not any(fnmatch(fpath, p)
- for p in self.config['ignore']) and
- not any(fnmatch(c, p)
- for p in self.config['ignore']
- for c in self._list_path_components(fpath))):
- all_files.add(fpath)
-
- # next, get a list of all files known to Cfg
- cfg_files = set()
- for root, eset in cfg.entries.items():
- cfg_files.update(os.path.join(cfg.data, root.lstrip("/"), fname)
- for fname in eset.entries.keys())
-
- # finally, compare the two
- unknown_files = all_files - cfg_files
- extra_files = cfg_files - all_files
- if unknown_files:
- self.LintError(
- "unknown-cfg-files",
- "Files on the filesystem could not be understood by Cfg: %s" %
- "; ".join(unknown_files))
- if extra_files:
- self.LintError(
- "extra-cfg-files",
- "Cfg has entries for files that do not exist on the "
- "filesystem: %s\nThis is probably a bug." %
- "; ".join(extra_files))
diff --git a/src/lib/Bcfg2/Server/Plugins/Cvs.py b/src/lib/Bcfg2/Server/Plugins/Cvs.py
index 22cacaa76..35bff0835 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cvs.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cvs.py
@@ -1,7 +1,7 @@
""" The Cvs plugin provides a revision interface for Bcfg2 repos using
cvs. """
-from subprocess import Popen, PIPE
+from Bcfg2.Utils import Executor
import Bcfg2.Server.Plugin
@@ -11,22 +11,19 @@ class Cvs(Bcfg2.Server.Plugin.Version):
__author__ = 'bcfg-dev@mcs.anl.gov'
__vcs_metadata_path__ = "CVSROOT"
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Version.__init__(self, core, datastore)
+ def __init__(self, core):
+ Bcfg2.Server.Plugin.Version.__init__(self, core)
+ self.cmd = Executor()
self.logger.debug("Initialized cvs plugin with CVS directory %s" %
self.vcs_path)
def get_revision(self):
"""Read cvs revision information for the Bcfg2 repository."""
+ result = self.cmd.run(["env LC_ALL=C", "cvs", "log"],
+ shell=True, cwd=Bcfg2.Options.setup.vcs_root)
try:
- data = Popen("env LC_ALL=C cvs log",
- shell=True,
- cwd=self.vcs_root,
- stdout=PIPE).stdout.readlines()
- return data[3].strip('\n')
- except IndexError:
- msg = "Failed to read CVS log"
+ return result.stdout.splitlines()[0].strip()
+ except (IndexError, AttributeError):
+ msg = "Failed to read revision from CVS: %s" % result.error
self.logger.error(msg)
- self.logger.error('Ran command "cvs log" from directory %s' %
- self.vcs_root)
raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
diff --git a/src/lib/Bcfg2/Server/Plugins/DBStats.py b/src/lib/Bcfg2/Server/Plugins/DBStats.py
index e6ef50fa1..88cb6d17c 100644
--- a/src/lib/Bcfg2/Server/Plugins/DBStats.py
+++ b/src/lib/Bcfg2/Server/Plugins/DBStats.py
@@ -6,8 +6,8 @@ import Bcfg2.Server.Plugin
class DBStats(Bcfg2.Server.Plugin.Plugin):
""" DBstats provides a database-backed statistics handler """
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ def __init__(self, core):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core)
self.logger.error("DBStats has been replaced with Reporting")
self.logger.error("DBStats: Be sure to migrate your data "
"before running the report collector")
diff --git a/src/lib/Bcfg2/Server/Plugins/Darcs.py b/src/lib/Bcfg2/Server/Plugins/Darcs.py
index b4abafb0e..01e42b2d0 100644
--- a/src/lib/Bcfg2/Server/Plugins/Darcs.py
+++ b/src/lib/Bcfg2/Server/Plugins/Darcs.py
@@ -1,7 +1,7 @@
""" Darcs is a version plugin for dealing with Bcfg2 repos stored in the
Darcs VCS. """
-from subprocess import Popen, PIPE
+from Bcfg2.Utils import Executor
import Bcfg2.Server.Plugin
@@ -11,23 +11,19 @@ class Darcs(Bcfg2.Server.Plugin.Version):
__author__ = 'bcfg-dev@mcs.anl.gov'
__vcs_metadata_path__ = "_darcs"
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Version.__init__(self, core, datastore)
+ def __init__(self, core):
+ Bcfg2.Server.Plugin.Version.__init__(self, core)
+ self.cmd = Executor()
self.logger.debug("Initialized Darcs plugin with darcs directory %s" %
self.vcs_path)
def get_revision(self):
"""Read Darcs changeset information for the Bcfg2 repository."""
- try:
- data = Popen("env LC_ALL=C darcs changes",
- shell=True,
- cwd=self.vcs_root,
- stdout=PIPE).stdout.readlines()
- revision = data[0].strip('\n')
- except:
- msg = "Failed to read darcs repository"
+ result = self.cmd.run(["env LC_ALL=C", "darcs", "changes"],
+ shell=True, cwd=Bcfg2.Options.setup.vcs_root)
+ if result.success:
+ return result.stdout.splitlines()[0].strip()
+ else:
+ msg = "Failed to read revision from darcs: %s" % result.error
self.logger.error(msg)
- self.logger.error('Ran command "darcs changes" from directory %s' %
- self.vcs_root)
raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
- return revision
diff --git a/src/lib/Bcfg2/Server/Plugins/Decisions.py b/src/lib/Bcfg2/Server/Plugins/Decisions.py
index 66f299bc9..957426ec8 100644
--- a/src/lib/Bcfg2/Server/Plugins/Decisions.py
+++ b/src/lib/Bcfg2/Server/Plugins/Decisions.py
@@ -2,57 +2,35 @@
blacklist certain entries. """
import os
-import lxml.etree
import Bcfg2.Server.Plugin
+import Bcfg2.Server.FileMonitor
-class DecisionFile(Bcfg2.Server.Plugin.SpecificData):
+class DecisionFile(Bcfg2.Server.Plugin.StructFile):
""" Representation of a Decisions XML file """
- def __init__(self, name, specific, encoding):
- Bcfg2.Server.Plugin.SpecificData.__init__(self, name, specific,
- encoding)
- self.contents = None
-
- def handle_event(self, event):
- Bcfg2.Server.Plugin.SpecificData.handle_event(self, event)
- self.contents = lxml.etree.XML(self.data)
-
- def get_decisions(self):
+ def get_decisions(self, metadata):
""" Get a list of whitelist or blacklist tuples """
+ if self.xdata is None:
+ # no white/blacklist has been read yet, probably because
+ # it doesn't exist
+ return []
return [(x.get('type'), x.get('name'))
- for x in self.contents.xpath('.//Decision')]
+ for x in self.XMLMatch(metadata).xpath('.//Decision')]
-class Decisions(Bcfg2.Server.Plugin.EntrySet,
- Bcfg2.Server.Plugin.Plugin,
+class Decisions(Bcfg2.Server.Plugin.Plugin,
Bcfg2.Server.Plugin.Decision):
- """ Decisions plugin
-
- Arguments:
- - `core`: Bcfg2.Core instance
- - `datastore`: File repository location
- """
- basename_is_regex = True
+ """ Decisions plugin """
__author__ = 'bcfg-dev@mcs.anl.gov'
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ def __init__(self, core):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core)
Bcfg2.Server.Plugin.Decision.__init__(self)
- Bcfg2.Server.Plugin.EntrySet.__init__(self, '(white|black)list',
- self.data, DecisionFile,
- core.setup['encoding'])
- core.fam.AddMonitor(self.data, self)
-
- def HandleEvent(self, event):
- """ Handle events on Decision files by passing them off to
- EntrySet.handle_event """
- if event.filename != self.path:
- return self.handle_event(event)
+ self.whitelist = DecisionFile(os.path.join(self.data, "whitelist.xml"),
+ should_monitor=True)
+ self.blacklist = DecisionFile(os.path.join(self.data, "blacklist.xml"),
+ should_monitor=True)
def GetDecisions(self, metadata, mode):
- ret = []
- for cdt in self.get_matching(metadata):
- if os.path.basename(cdt.name).startswith(mode):
- ret.extend(cdt.get_decisions())
- return ret
+ return getattr(self, mode).get_decisions(metadata)
diff --git a/src/lib/Bcfg2/Server/Plugins/Defaults.py b/src/lib/Bcfg2/Server/Plugins/Defaults.py
index 04c14aa96..79e2ca0e2 100644
--- a/src/lib/Bcfg2/Server/Plugins/Defaults.py
+++ b/src/lib/Bcfg2/Server/Plugins/Defaults.py
@@ -9,6 +9,8 @@ class Defaults(Bcfg2.Server.Plugins.Rules.Rules,
"""Set default attributes on bound entries"""
__author__ = 'bcfg-dev@mcs.anl.gov'
+ options = Bcfg2.Server.Plugin.PrioDir.options
+
# Rules is a Generator that happens to implement all of the
# functionality we want, so we overload it, but Defaults should
# _not_ handle any entries; it does its stuff in the structure
diff --git a/src/lib/Bcfg2/Server/Plugins/Deps.py b/src/lib/Bcfg2/Server/Plugins/Deps.py
index d3a1ee871..1872e68af 100644
--- a/src/lib/Bcfg2/Server/Plugins/Deps.py
+++ b/src/lib/Bcfg2/Server/Plugins/Deps.py
@@ -1,43 +1,20 @@
"""This plugin provides automatic dependency handling."""
import lxml.etree
-
import Bcfg2.Server.Plugin
-
-
-class DNode(Bcfg2.Server.Plugin.INode):
- """DNode provides supports for single predicate types for dependencies."""
- def _load_children(self, data, idict):
- for item in data.getchildren():
- if item.tag in self.containers:
- self.children.append(self.__class__(item, idict, self))
- else:
- data = [(child.tag, child.get('name'))
- for child in item.getchildren()]
- try:
- self.contents[item.tag][item.get('name')] = data
- except KeyError:
- self.contents[item.tag] = {item.get('name'): data}
-
-
-class DepXMLSrc(Bcfg2.Server.Plugin.XMLSrc):
- __node__ = DNode
+from Bcfg2.Server.Plugin import PluginExecutionError
class Deps(Bcfg2.Server.Plugin.PrioDir,
Bcfg2.Server.Plugin.StructureValidator):
- name = 'Deps'
- __author__ = 'bcfg-dev@mcs.anl.gov'
- __child__ = DepXMLSrc
-
# Override the default sort_order (of 500) so that this plugin
# gets handled after others running at the default. In particular,
# we want to run after Packages, so we can see the final set of
# packages that will be installed on the client.
sort_order = 750
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.PrioDir.__init__(self, core, datastore)
+ def __init__(self, core):
+ Bcfg2.Server.Plugin.PrioDir.__init__(self, core)
Bcfg2.Server.Plugin.StructureValidator.__init__(self)
self.cache = {}
@@ -55,63 +32,59 @@ class Deps(Bcfg2.Server.Plugin.PrioDir,
tag = entry.tag
if tag.startswith('Bound'):
tag = tag[5:]
- if (tag, entry.get('name')) not in entries \
- and not isinstance(entry, lxml.etree._Comment):
+ if ((tag, entry.get('name')) not in entries
+ and not isinstance(entry, lxml.etree._Comment)):
entries.append((tag, entry.get('name')))
entries.sort()
entries = tuple(entries)
- gdata = list(metadata.groups)
- gdata.sort()
- gdata = tuple(gdata)
+ groups = list(metadata.groups)
+ groups.sort()
+ groups = tuple(groups)
# Check to see if we have cached the prereqs already
- if (entries, gdata) in self.cache:
- prereqs = self.cache[(entries, gdata)]
+ if (entries, groups) in self.cache:
+ prereqs = self.cache[(entries, groups)]
else:
prereqs = self.calculate_prereqs(metadata, entries)
- self.cache[(entries, gdata)] = prereqs
+ self.cache[(entries, groups)] = prereqs
- newstruct = lxml.etree.Element("Independent")
+ newstruct = lxml.etree.Element("Independent",
+ name=self.__class__.__name__)
for tag, name in prereqs:
- try:
- lxml.etree.SubElement(newstruct, tag, name=name)
- except:
- self.logger.error("Failed to add dep entry for %s:%s" % (tag, name))
+ lxml.etree.SubElement(newstruct, tag, name=name)
structures.append(newstruct)
-
def calculate_prereqs(self, metadata, entries):
"""Calculate the prerequisites defined in Deps for the passed
set of entries.
"""
prereqs = []
- [src.Cache(metadata) for src in self.entries.values()]
-
toexamine = list(entries[:])
while toexamine:
entry = toexamine.pop()
- matching = [src for src in list(self.entries.values())
- if src.cache and entry[0] in src.cache[1]
- and entry[1] in src.cache[1][entry[0]]]
+ # tuples of (PriorityStructFile, element) for each
+ # matching element and the structfile that contains it
+ matching = []
+ for deps in self.entries.values():
+ el = deps.find("/%s[name='%s']" % (entry.tag,
+ entry.get("name")))
+ if el:
+ matching.append((deps, el))
if len(matching) > 1:
- prio = [int(src.priority) for src in matching]
+ prio = [int(m[0].priority) for m in matching]
if prio.count(max(prio)) > 1:
- self.logger.error("Found conflicting %s sources with same priority for %s, pkg %s" %
- (entry[0].lower(), metadata.hostname, entry[1]))
- raise Bcfg2.Server.Plugin.PluginExecutionError
+ raise PluginExecutionError(
+ "Deps: Found conflicting dependencies with same "
+ "priority for %s:%s for %s: %s" %
+ (entry.tag, entry.get("name"),
+ metadata.hostname, [m[0].name for m in matching]))
index = prio.index(max(prio))
matching = [matching[index]]
- elif len(matching) == 1:
- for prq in matching[0].cache[1][entry[0]][entry[1]]:
- # XML comments seem to show up in the cache as a
- # tuple with item 0 being callable. The logic
- # below filters them out. Would be better to
- # exclude them when we load the cache in the first
- # place.
- if prq not in prereqs and prq not in entries and not callable(prq[0]):
- toexamine.append(prq)
- prereqs.append(prq)
- else:
+ if not matching:
continue
+ for prq in matching[0][1].getchildren():
+ if prq not in prereqs and prq not in entries:
+ toexamine.append(prq)
+ prereqs.append(prq)
return prereqs
diff --git a/src/lib/Bcfg2/Server/Plugins/Editor.py b/src/lib/Bcfg2/Server/Plugins/Editor.py
deleted file mode 100644
index f82e0f1dd..000000000
--- a/src/lib/Bcfg2/Server/Plugins/Editor.py
+++ /dev/null
@@ -1,80 +0,0 @@
-import Bcfg2.Server.Plugin
-import re
-import lxml.etree
-
-
-def linesub(pattern, repl, filestring):
- """Substitutes instances of pattern with repl in filestring."""
- if filestring == None:
- filestring = ''
- output = list()
- fileread = filestring.split('\n')
- for line in fileread:
- output.append(re.sub(pattern, repl, filestring))
- return '\n'.join(output)
-
-
-class EditDirectives(Bcfg2.Server.Plugin.SpecificData):
- """This object handles the editing directives."""
- def ProcessDirectives(self, input):
- """Processes a list of edit directives on input."""
- temp = input
- for directive in self.data.split('\n'):
- directive = directive.split(',')
- temp = linesub(directive[0], directive[1], temp)
- return temp
-
-
-class EditEntrySet(Bcfg2.Server.Plugin.EntrySet):
- def __init__(self, basename, path, entry_type, encoding):
- self.ignore = re.compile("^(\.#.*|.*~|\\..*\\.(tmp|sw[px])|%s\.H_.*)$" % path.split('/')[-1])
- Bcfg2.Server.Plugin.EntrySet.__init__(self,
- basename,
- path,
- entry_type,
- encoding)
- self.inputs = dict()
-
- def bind_entry(self, entry, metadata):
- client = metadata.hostname
- filename = entry.get('name')
- permdata = {'owner': 'root',
- 'group': 'root',
- 'mode': '0644'}
- [entry.attrib.__setitem__(key, permdata[key]) for key in permdata]
- entry.text = self.entries['edits'].ProcessDirectives(self.get_client_data(client))
- if not entry.text:
- entry.set('empty', 'true')
- try:
- f = open('%s/%s.H_%s' % (self.path, filename.split('/')[-1], client), 'w')
- f.write(entry.text)
- f.close()
- except:
- pass
-
- def get_client_data(self, client):
- return self.inputs[client]
-
-
-class Editor(Bcfg2.Server.Plugin.GroupSpool,
- Bcfg2.Server.Plugin.Probing):
- name = 'Editor'
- __author__ = 'bcfg2-dev@mcs.anl.gov'
- filename_pattern = 'edits'
- es_child_cls = EditDirectives
- es_cls = EditEntrySet
-
- def GetProbes(self, _):
- '''Return a set of probes for execution on client'''
- probelist = list()
- for name in list(self.entries.keys()):
- probe = lxml.etree.Element('probe')
- probe.set('name', name)
- probe.set('source', "Editor")
- probe.text = "cat %s" % name
- probelist.append(probe)
- return probelist
-
- def ReceiveData(self, client, datalist):
- for data in datalist:
- self.entries[data.get('name')].inputs[client.hostname] = data.text
diff --git a/src/lib/Bcfg2/Server/Plugins/FileProbes.py b/src/lib/Bcfg2/Server/Plugins/FileProbes.py
index 8e074118f..b7d78bf75 100644
--- a/src/lib/Bcfg2/Server/Plugins/FileProbes.py
+++ b/src/lib/Bcfg2/Server/Plugins/FileProbes.py
@@ -8,9 +8,9 @@ import os
import sys
import errno
import lxml.etree
-import Bcfg2.Options
import Bcfg2.Server
import Bcfg2.Server.Plugin
+import Bcfg2.Server.FileMonitor
from Bcfg2.Compat import b64decode
#: The probe we send to clients to get the file data. Returns an XML
@@ -38,7 +38,7 @@ if not os.path.exists(path):
try:
stat = os.stat(path)
-except:
+except OSError:
sys.stderr.write("Could not stat %%s: %%s" %% (path, sys.exc_info()[1]))
raise SystemExit(0)
data = Bcfg2.Client.XML.Element("ProbedFileData",
@@ -48,7 +48,7 @@ data = Bcfg2.Client.XML.Element("ProbedFileData",
mode=oct_mode(stat[0] & 4095))
try:
data.text = b64encode(open(path).read())
-except:
+except IOError:
sys.stderr.write("Could not read %%s: %%s" %% (path, sys.exc_info()[1]))
raise SystemExit(0)
print(Bcfg2.Client.XML.tostring(data, xml_declaration=False).decode('UTF-8'))
@@ -64,13 +64,12 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin,
the client """
__author__ = 'chris.a.st.pierre@gmail.com'
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ def __init__(self, core):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core)
Bcfg2.Server.Plugin.Probing.__init__(self)
self.config = \
Bcfg2.Server.Plugin.StructFile(os.path.join(self.data,
'config.xml'),
- fam=core.fam,
should_monitor=True,
create=self.name)
self.entries = dict()
@@ -88,7 +87,7 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin,
# for which update is false; we can't possibly do
# anything with the data we get from such a probe
if (entry.get('update', 'false').lower() == "false" and
- not cfg.has_generator(entry, metadata)):
+ not cfg.has_generator(entry, metadata)):
continue
self.entries[metadata.hostname][path] = entry
probe = lxml.etree.Element('probe', name=path,
@@ -148,7 +147,7 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin,
self.write_file(fileloc, contents)
self.verify_file(filename, contents, metadata)
infoxml = os.path.join(cfg.data, filename.lstrip("/"), "info.xml")
- self.write_infoxml(infoxml, entry, data)
+ self.write_infoxml(infoxml, data)
elif entrydata == contents:
self.debug_log("Existing %s contents match probed contents" %
filename)
@@ -198,7 +197,7 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin,
if tries >= 10:
self.logger.error("%s still not registered" % filename)
return
- self.core.fam.handle_events_in_interval(1)
+ Bcfg2.Server.FileMonitor.get_fam().handle_events_in_interval(1)
try:
cfg.entries[filename].bind_entry(entry, metadata)
except Bcfg2.Server.Plugin.PluginExecutionError:
@@ -214,18 +213,18 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin,
updated = True
tries += 1
- def write_infoxml(self, infoxml, entry, data):
+ def write_infoxml(self, infoxml, data):
""" write an info.xml for the file """
if os.path.exists(infoxml):
return
self.logger.info("Writing %s for %s" % (infoxml, data.get("name")))
+ default_mdata = Bcfg2.Server.Plugin.default_path_metadata()
info = lxml.etree.Element(
"Info",
- owner=data.get("owner", Bcfg2.Options.MDATA_OWNER.value),
- group=data.get("group", Bcfg2.Options.MDATA_GROUP.value),
- mode=data.get("mode", Bcfg2.Options.MDATA_MODE.value),
- encoding=entry.get("encoding", Bcfg2.Options.ENCODING.value))
+ owner=data.get("owner", default_mdata['owner']),
+ group=data.get("group", default_mdata['group']),
+ mode=data.get("mode", default_mdata['mode']))
root = lxml.etree.Element("FileInfo")
root.append(info)
diff --git a/src/lib/Bcfg2/Server/Plugins/Fossil.py b/src/lib/Bcfg2/Server/Plugins/Fossil.py
index 6165ac651..c4d9af4a4 100644
--- a/src/lib/Bcfg2/Server/Plugins/Fossil.py
+++ b/src/lib/Bcfg2/Server/Plugins/Fossil.py
@@ -1,7 +1,7 @@
""" The Fossil plugin provides a revision interface for Bcfg2 repos
using fossil."""
-from subprocess import Popen, PIPE
+from Bcfg2.Utils import Executor
import Bcfg2.Server.Plugin
@@ -11,24 +11,24 @@ class Fossil(Bcfg2.Server.Plugin.Version):
__author__ = 'bcfg-dev@mcs.anl.gov'
__vcs_metadata_path__ = "_FOSSIL_"
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Version.__init__(self, core, datastore)
+ def __init__(self, core):
+ Bcfg2.Server.Plugin.Version.__init__(self, core)
+ self.cmd = Executor()
self.logger.debug("Initialized Fossil plugin with fossil directory %s"
% self.vcs_path)
def get_revision(self):
"""Read fossil revision information for the Bcfg2 repository."""
+ result = self.cmd.run(["env LC_ALL=C", "fossil", "info"],
+ shell=True, cwd=Bcfg2.Options.setup.vcs_root)
try:
- data = Popen("env LC_ALL=C fossil info",
- shell=True,
- cwd=self.vcs_root,
- stdout=PIPE).stdout.readlines()
- revline = [line.split(': ')[1].strip() for line in data
- if line.split(': ')[0].strip() == 'checkout'][-1]
- return revline.split(' ')[0]
- except IndexError:
- msg = "Failed to read fossil info"
+ revision = None
+ for line in result.stdout.splitlines():
+ ldata = line.split(': ')
+ if ldata[0].strip() == 'checkout':
+ revision = line[1].strip().split(' ')[0]
+ return revision
+ except (IndexError, AttributeError):
+ msg = "Failed to read revision from Fossil: %s" % result.error
self.logger.error(msg)
- self.logger.error('Ran command "fossil info" from directory "%s"' %
- self.vcs_root)
raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
diff --git a/src/lib/Bcfg2/Server/Plugins/Git.py b/src/lib/Bcfg2/Server/Plugins/Git.py
index 44971aba7..d0ae010fa 100644
--- a/src/lib/Bcfg2/Server/Plugins/Git.py
+++ b/src/lib/Bcfg2/Server/Plugins/Git.py
@@ -2,13 +2,14 @@
git. """
import sys
+import Bcfg2.Options
from Bcfg2.Server.Plugin import Version, PluginExecutionError
-from subprocess import Popen, PIPE
try:
import git
HAS_GITPYTHON = True
except ImportError:
+ from Bcfg2.Utils import Executor
HAS_GITPYTHON = False
@@ -20,14 +21,16 @@ class Git(Version):
if HAS_GITPYTHON:
__rmi__ = Version.__rmi__ + ['Update']
- def __init__(self, core, datastore):
- Version.__init__(self, core, datastore)
+ def __init__(self, core):
+ Version.__init__(self, core)
if HAS_GITPYTHON:
- self.repo = git.Repo(self.vcs_root)
+ self.repo = git.Repo(Bcfg2.Options.setup.vcs_root)
+ self.cmd = None
else:
self.logger.debug("Git: GitPython not found, using CLI interface "
"to Git")
self.repo = None
+ self.cmd = Executor()
self.logger.debug("Initialized git plugin with git directory %s" %
self.vcs_path)
@@ -38,22 +41,17 @@ class Git(Version):
def get_revision(self):
"""Read git revision information for the Bcfg2 repository."""
- try:
- if HAS_GITPYTHON:
- return self.repo.head.commit.hexsha
- else:
- cmd = ["git", "--git-dir", self.vcs_path,
- "--work-tree", self.vcs_root, "rev-parse", "HEAD"]
- self.debug_log("Git: Running %s" % cmd)
- proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
- rv, err = proc.communicate()
- if proc.wait():
- raise Exception(err)
- return rv
- except:
- raise PluginExecutionError("Git: Error getting revision from %s: "
- "%s" % (self.vcs_root,
- sys.exc_info()[1]))
+ if HAS_GITPYTHON:
+ return self.repo.head.commit.hexsha
+ else:
+ cmd = ["git", "--git-dir", self.vcs_path,
+ "--work-tree", Bcfg2.Options.setup.vcs_root,
+ "rev-parse", "HEAD"]
+ self.debug_log("Git: Running %s" % cmd)
+ result = self.cmd.run(cmd)
+ if not result.success:
+ raise PluginExecutionError(result.stderr)
+ return result.stdout
def Update(self, ref=None):
""" Git.Update() => True|False
@@ -61,14 +59,15 @@ class Git(Version):
"""
self.logger.info("Git: Git.Update(ref='%s')" % ref)
self.debug_log("Git: Performing garbage collection on repo at %s" %
- self.vcs_root)
+ Bcfg2.Options.setup.vcs_root)
try:
self._log_git_cmd(self.repo.git.gc('--auto'))
except git.GitCommandError:
self.logger.warning("Git: Failed to perform garbage collection: %s"
% sys.exc_info()[1])
- self.debug_log("Git: Fetching all refs for repo at %s" % self.vcs_root)
+ self.debug_log("Git: Fetching all refs for repo at %s" %
+ Bcfg2.Options.setup.vcs_root)
try:
self._log_git_cmd(self.repo.git.fetch('--all'))
except git.GitCommandError:
@@ -101,5 +100,5 @@ class Git(Version):
"upstream: %s" % sys.exc_info()[1])
self.logger.info("Git: Repo at %s updated to %s" %
- (self.vcs_root, self.get_revision()))
+ (Bcfg2.Options.setup.vcs_root, self.get_revision()))
return True
diff --git a/src/lib/Bcfg2/Server/Plugins/GroupLogic.py b/src/lib/Bcfg2/Server/Plugins/GroupLogic.py
index 24547949b..b60f60e65 100644
--- a/src/lib/Bcfg2/Server/Plugins/GroupLogic.py
+++ b/src/lib/Bcfg2/Server/Plugins/GroupLogic.py
@@ -6,30 +6,24 @@ import lxml.etree
from threading import local
import Bcfg2.Server.Plugin
from Bcfg2.Server.Plugins.Metadata import MetadataGroup
-try:
- from Bcfg2.Server.Plugins.Bundler import BundleTemplateFile
-except ImportError:
- # BundleTemplateFile missing means that genshi is missing. we
- # import genshi to get the _real_ error
- import genshi # pylint: disable=W0611
-class GroupLogicConfig(BundleTemplateFile):
+class GroupLogicConfig(Bcfg2.Server.Plugin.StructFile):
""" Representation of the GroupLogic groups.xml file """
create = lxml.etree.Element("GroupLogic",
nsmap=dict(py="http://genshi.edgewall.org/"))
- def __init__(self, name, fam):
- BundleTemplateFile.__init__(self, name,
- Bcfg2.Server.Plugin.Specificity(), None)
- self.fam = fam
- self.should_monitor = True
- self.fam.AddMonitor(self.name, self)
+ def _match(self, item, metadata, *args):
+ if item.tag == 'Group' and not len(item.getchildren()):
+ return [item]
+ return Bcfg2.Server.Plugin.StructFile._match(self, item, metadata,
+ *args)
- def _match(self, item, metadata):
+ def _xml_match(self, item, metadata, *args):
if item.tag == 'Group' and not len(item.getchildren()):
return [item]
- return BundleTemplateFile._match(self, item, metadata)
+ return Bcfg2.Server.Plugin.StructFile._xml_match(self, item, metadata,
+ *args)
class GroupLogic(Bcfg2.Server.Plugin.Plugin,
@@ -41,11 +35,11 @@ class GroupLogic(Bcfg2.Server.Plugin.Plugin,
# use groups set by them
sort_order = 1000
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ def __init__(self, core):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core)
Bcfg2.Server.Plugin.Connector.__init__(self)
self.config = GroupLogicConfig(os.path.join(self.data, "groups.xml"),
- core.fam)
+ should_monitor=True)
self._local = local()
def get_additional_groups(self, metadata):
@@ -66,7 +60,7 @@ class GroupLogic(Bcfg2.Server.Plugin.Plugin,
return []
self._local.building.add(metadata.hostname)
rv = []
- for el in self.config.get_xml_value(metadata).xpath("//Group"):
+ for el in self.config.XMLMatch(metadata).findall("Group"):
if el.get("category"):
rv.append(MetadataGroup(el.get("name"),
category=el.get("category")))
diff --git a/src/lib/Bcfg2/Server/Plugins/GroupPatterns.py b/src/lib/Bcfg2/Server/Plugins/GroupPatterns.py
index eadd918b7..9874977df 100644
--- a/src/lib/Bcfg2/Server/Plugins/GroupPatterns.py
+++ b/src/lib/Bcfg2/Server/Plugins/GroupPatterns.py
@@ -8,34 +8,43 @@ import Bcfg2.Server.Plugin
from Bcfg2.Utils import PackedDigitRange
+class PatternInitializationError(Exception):
+ """Raised when creating a PatternMap object fails."""
+
+
class PatternMap(object):
- """ Handler for a single pattern or range """
+ """Handler for a single pattern or range."""
def __init__(self, pattern, rangestr, groups):
self.pattern = pattern
self.rangestr = rangestr
self.groups = groups
- if pattern is not None:
- self.re = re.compile(pattern)
- self.process = self.process_re
- elif rangestr is not None:
- if '\\' in rangestr:
- raise Exception("Backslashes are not allowed in NameRanges")
- range_finder = r'\[\[[\d\-,]+\]\]'
- self.process = self.process_range
- self.re = re.compile(r'^' + re.sub(range_finder, r'(\d+)',
- rangestr))
- dmatcher = re.compile(re.sub(range_finder,
- r'\[\[([\d\-,]+)\]\]',
- rangestr))
- self.dranges = [PackedDigitRange(x)
- for x in dmatcher.match(rangestr).groups()]
- else:
- raise Exception("No pattern or range given")
+ try:
+ if pattern is not None:
+ self._re = re.compile(pattern)
+ self.process = self.process_re
+ elif rangestr is not None:
+ if '\\' in rangestr:
+ raise PatternInitializationError(
+ "Backslashes are not allowed in NameRanges")
+ range_finder = r'\[\[[\d\-,]+\]\]'
+ self.process = self.process_range
+ self._re = re.compile(r'^' + re.sub(range_finder, r'(\d+)',
+ rangestr))
+ dmatcher = re.compile(re.sub(range_finder,
+ r'\[\[([\d\-,]+)\]\]',
+ rangestr))
+ self.dranges = [PackedDigitRange(x)
+ for x in dmatcher.match(rangestr).groups()]
+ else:
+ raise PatternInitializationError("No pattern or range given")
+ except re.error:
+ raise PatternInitializationError(
+ "Could not compile pattern regex: %s" % sys.exc_info()[1])
def process_range(self, name):
- """ match the given hostname against a range-based NameRange """
- match = self.re.match(name)
+ """match the given hostname against a range-based NameRange."""
+ match = self._re.match(name)
if not match:
return None
digits = match.groups()
@@ -45,11 +54,11 @@ class PatternMap(object):
return self.groups
def process_re(self, name):
- """ match the given hostname against a regex-based NamePattern """
- match = self.re.search(name)
+ """match the given hostname against a regex-based NamePattern."""
+ match = self._re.search(name)
if not match:
return None
- ret = list()
+ ret = []
sub = match.groups()
for group in self.groups:
newg = group
@@ -64,16 +73,12 @@ class PatternMap(object):
class PatternFile(Bcfg2.Server.Plugin.XMLFileBacked):
- """ representation of GroupPatterns config.xml """
+ """representation of GroupPatterns config.xml."""
__identifier__ = None
create = 'GroupPatterns'
def __init__(self, filename, core=None):
- try:
- fam = core.fam
- except AttributeError:
- fam = None
- Bcfg2.Server.Plugin.XMLFileBacked.__init__(self, filename, fam=fam,
+ Bcfg2.Server.Plugin.XMLFileBacked.__init__(self, filename,
should_monitor=True)
self.core = core
self.patterns = []
@@ -81,7 +86,7 @@ class PatternFile(Bcfg2.Server.Plugin.XMLFileBacked):
def Index(self):
Bcfg2.Server.Plugin.XMLFileBacked.Index(self)
if (self.core and
- self.core.metadata_cache_mode in ['cautious', 'aggressive']):
+ self.core.metadata_cache_mode in ['cautious', 'aggressive']):
self.core.metadata_cache.expire()
self.patterns = []
for entry in self.xdata.xpath('//GroupPattern'):
@@ -93,7 +98,7 @@ class PatternFile(Bcfg2.Server.Plugin.XMLFileBacked):
for range_ent in entry.findall('NameRange'):
rng = range_ent.text
self.patterns.append(PatternMap(None, rng, groups))
- except: # pylint: disable=W0702
+ except PatternInitializationError:
self.logger.error("GroupPatterns: Failed to initialize "
"pattern %s: %s" % (entry.text,
sys.exc_info()[1]))
@@ -103,61 +108,21 @@ class PatternFile(Bcfg2.Server.Plugin.XMLFileBacked):
client based on patterns that match the hostname """
ret = []
for pattern in self.patterns:
- try:
- grps = pattern.process(hostname)
- if grps is not None:
- ret.extend(grps)
- except: # pylint: disable=W0702
- self.logger.error("GroupPatterns: Failed to process pattern "
- "%s for %s" % (pattern.pattern, hostname),
- exc_info=1)
+ grps = pattern.process(hostname)
+ if grps is not None:
+ ret.extend(grps)
return ret
class GroupPatterns(Bcfg2.Server.Plugin.Plugin,
Bcfg2.Server.Plugin.Connector):
- """ set group membership based on client hostnames """
+ """set group membership based on client hostnames."""
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ def __init__(self, core):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core)
Bcfg2.Server.Plugin.Connector.__init__(self)
self.config = PatternFile(os.path.join(self.data, 'config.xml'),
core=core)
def get_additional_groups(self, metadata):
return self.config.process_patterns(metadata.hostname)
-
-
-class GroupPatternsLint(Bcfg2.Server.Lint.ServerPlugin):
- """ ``bcfg2-lint`` plugin to check all given :ref:`GroupPatterns
- <server-plugins-grouping-grouppatterns>` patterns for validity.
- This is simply done by trying to create a
- :class:`Bcfg2.Server.Plugins.GroupPatterns.PatternMap` object for
- each pattern, and catching exceptions and presenting them as
- ``bcfg2-lint`` errors."""
-
- def Run(self):
- cfg = self.core.plugins['GroupPatterns'].config
- for entry in cfg.xdata.xpath('//GroupPattern'):
- groups = [g.text for g in entry.findall('Group')]
- self.check(entry, groups, ptype='NamePattern')
- self.check(entry, groups, ptype='NameRange')
-
- @classmethod
- def Errors(cls):
- return {"pattern-fails-to-initialize": "error"}
-
- def check(self, entry, groups, ptype="NamePattern"):
- """ Check a single pattern for validity """
- for el in entry.findall(ptype):
- pat = el.text
- try:
- if ptype == "NamePattern":
- PatternMap(pat, None, groups)
- else:
- PatternMap(None, pat, groups)
- except: # pylint: disable=W0702
- err = sys.exc_info()[1]
- self.LintError("pattern-fails-to-initialize",
- "Failed to initialize %s %s for %s: %s" %
- (ptype, pat, entry.get('pattern'), err))
diff --git a/src/lib/Bcfg2/Server/Plugins/Guppy.py b/src/lib/Bcfg2/Server/Plugins/Guppy.py
index 3c9b8a459..de994a2db 100644
--- a/src/lib/Bcfg2/Server/Plugins/Guppy.py
+++ b/src/lib/Bcfg2/Server/Plugins/Guppy.py
@@ -1,9 +1,9 @@
-"""
-This plugin is used to trace memory leaks within the bcfg2-server
-process using Guppy. By default the remote debugger is started
-when this plugin is enabled. The debugger can be shutoff in a running
-process using "bcfg2-admin xcmd Guppy.Disable" and reenabled using
-"bcfg2-admin xcmd Guppy.Enable".
+"""Debugging plugin to trace memory leaks within the bcfg2-server process.
+
+By default the remote debugger is started when this plugin is enabled.
+The debugger can be shutoff in a running process using "bcfg2-admin
+xcmd Guppy.Disable" and reenabled using "bcfg2-admin xcmd
+Guppy.Enable".
To attach the console run:
@@ -23,39 +23,27 @@ Remote connection 1. To return to Monitor, type <Ctrl-C> or .<RETURN>
Remote interactive console. To return to Annex, type '-'.
>>> hp.heap()
...
-
-
"""
import Bcfg2.Server.Plugin
from guppy.heapy import Remote
class Guppy(Bcfg2.Server.Plugin.Plugin):
- """Guppy is a debugging plugin to help trace memory leaks"""
- name = 'Guppy'
+ """Guppy is a debugging plugin to help trace memory leaks."""
__author__ = 'bcfg-dev@mcs.anl.gov'
-
- experimental = True
__rmi__ = Bcfg2.Server.Plugin.Plugin.__rmi__ + ['Enable', 'Disable']
__child_rmi__ = __rmi__[:]
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
-
+ def __init__(self, core):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core)
self.Enable()
- def Enable(self):
- """Enable remote debugging"""
- try:
- Remote.on()
- except:
- self.logger.error("Failed to create Heapy context")
- raise Bcfg2.Server.Plugin.PluginInitError
-
- def Disable(self):
- """Disable remote debugging"""
- try:
- Remote.off()
- except:
- self.logger.error("Failed to disable Heapy")
- raise Bcfg2.Server.Plugin.PluginInitError
+ @staticmethod
+ def Enable():
+ """Enable remote debugging."""
+ Remote.on()
+
+ @staticmethod
+ def Disable():
+ """Disable remote debugging."""
+ Remote.off()
diff --git a/src/lib/Bcfg2/Server/Plugins/Hg.py b/src/lib/Bcfg2/Server/Plugins/Hg.py
index 3fd3918bd..3260908d8 100644
--- a/src/lib/Bcfg2/Server/Plugins/Hg.py
+++ b/src/lib/Bcfg2/Server/Plugins/Hg.py
@@ -1,5 +1,5 @@
-""" The Hg plugin provides a revision interface for Bcfg2 repos using
-mercurial. """
+"""Revision interface for Bcfg2 repos using mercurial.
+"""
import sys
from mercurial import ui, hg
@@ -7,24 +7,25 @@ import Bcfg2.Server.Plugin
class Hg(Bcfg2.Server.Plugin.Version):
- """ The Hg plugin provides a revision interface for Bcfg2 repos
- using mercurial. """
+ """Revision interface for Bcfg2 repos using mercurial.
+ """
+
__author__ = 'bcfg-dev@mcs.anl.gov'
__vcs_metadata_path__ = ".hg"
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Version.__init__(self, core, datastore)
+ def __init__(self, core):
+ Bcfg2.Server.Plugin.Version.__init__(self, core)
self.logger.debug("Initialized hg plugin with hg directory %s" %
self.vcs_path)
def get_revision(self):
"""Read hg revision information for the Bcfg2 repository."""
try:
- repo_path = self.vcs_root + "/"
+ repo_path = Bcfg2.Options.setup.vcs_root + "/"
repo = hg.repository(ui.ui(), repo_path)
tip = repo.changelog.tip()
return repo.changelog.rev(tip)
- except:
+ except hg.error.RepoError:
err = sys.exc_info()[1]
msg = "Failed to read hg repository: %s" % err
self.logger.error(msg)
diff --git a/src/lib/Bcfg2/Server/Plugins/Hostbase.py b/src/lib/Bcfg2/Server/Plugins/Hostbase.py
deleted file mode 100644
index 55757e0b4..000000000
--- a/src/lib/Bcfg2/Server/Plugins/Hostbase.py
+++ /dev/null
@@ -1,599 +0,0 @@
-"""
-This file provides the Hostbase plugin.
-It manages dns/dhcp/nis host information
-"""
-
-from lxml.etree import Element, SubElement
-import os
-import re
-from time import strftime
-os.environ['DJANGO_SETTINGS_MODULE'] = 'Bcfg2.Server.Hostbase.settings'
-import Bcfg2.Server.Plugin
-from Bcfg2.Server.Plugin import PluginExecutionError, PluginInitError
-from django.template import Context, loader
-from django.db import connection
-# Compatibility imports
-from Bcfg2.Compat import StringIO
-
-try:
- set
-except NameError:
- # deprecated since python 2.6
- from sets import Set as set
-
-
-class Hostbase(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Structure,
- Bcfg2.Server.Plugin.Generator):
- """The Hostbase plugin handles host/network info."""
- name = 'Hostbase'
- __author__ = 'bcfg-dev@mcs.anl.gov'
- filepath = '/my/adm/hostbase/files/bind'
- deprecated = True
-
- def __init__(self, core, datastore):
-
- self.ready = False
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Structure.__init__(self)
- Bcfg2.Server.Plugin.Generator.__init__(self)
- files = ['zone.tmpl',
- 'reversesoa.tmpl',
- 'named.tmpl',
- 'reverseappend.tmpl',
- 'dhcpd.tmpl',
- 'hosts.tmpl',
- 'hostsappend.tmpl']
- self.filedata = {}
- self.dnsservers = []
- self.dhcpservers = []
- self.templates = {'zone': loader.get_template('zone.tmpl'),
- 'reversesoa': loader.get_template('reversesoa.tmpl'),
- 'named': loader.get_template('named.tmpl'),
- 'namedviews': loader.get_template('namedviews.tmpl'),
- 'reverseapp': loader.get_template('reverseappend.tmpl'),
- 'dhcp': loader.get_template('dhcpd.tmpl'),
- 'hosts': loader.get_template('hosts.tmpl'),
- 'hostsapp': loader.get_template('hostsappend.tmpl'),
- }
- self.Entries['ConfigFile'] = {}
- self.__rmi__ = ['rebuildState']
- try:
- self.rebuildState(None)
- except:
- raise PluginInitError
-
- def FetchFile(self, entry, metadata):
- """Return prebuilt file data."""
- fname = entry.get('name').split('/')[-1]
- if not fname in self.filedata:
- raise PluginExecutionError
- perms = {'owner': 'root',
- 'group': 'root',
- 'mode': '644'}
- [entry.attrib.__setitem__(key, value)
- for (key, value) in list(perms.items())]
- entry.text = self.filedata[fname]
-
- def BuildStructures(self, metadata):
- """Build hostbase bundle."""
- if metadata.hostname not in self.dnsservers or metadata.hostname not in self.dhcpservers:
- return []
- output = Element("Bundle", name='hostbase')
- if metadata.hostname in self.dnsservers:
- for configfile in self.Entries['ConfigFile']:
- if re.search('/etc/bind/', configfile):
- SubElement(output, "ConfigFile", name=configfile)
- if metadata.hostname in self.dhcpservers:
- SubElement(output, "ConfigFile", name="/etc/dhcp3/dhcpd.conf")
- return [output]
-
- def rebuildState(self, _):
- """Pre-cache all state information for hostbase config files
- callable as an XMLRPC function.
-
- """
- self.buildZones()
- self.buildDHCP()
- self.buildHosts()
- self.buildHostsLPD()
- self.buildPrinters()
- self.buildNetgroups()
- return True
-
- def buildZones(self):
- """Pre-build and stash zone files."""
- cursor = connection.cursor()
-
- cursor.execute("SELECT id, serial FROM hostbase_zone")
- zones = cursor.fetchall()
-
- for zone in zones:
- # update the serial number for all zone files
- todaydate = (strftime('%Y%m%d'))
- try:
- if todaydate == str(zone[1])[:8]:
- serial = zone[1] + 1
- else:
- serial = int(todaydate) * 100
- except (KeyError):
- serial = int(todaydate) * 100
- cursor.execute("""UPDATE hostbase_zone SET serial = \'%s\' WHERE id = \'%s\'""" % (str(serial), zone[0]))
-
- cursor.execute("SELECT * FROM hostbase_zone WHERE zone NOT LIKE \'%%.rev\'")
- zones = cursor.fetchall()
-
- iplist = []
- hosts = {}
-
- for zone in zones:
- zonefile = StringIO()
- externalzonefile = StringIO()
- cursor.execute("""SELECT n.name FROM hostbase_zone_nameservers z
- INNER JOIN hostbase_nameserver n ON z.nameserver_id = n.id
- WHERE z.zone_id = \'%s\'""" % zone[0])
- nameservers = cursor.fetchall()
- cursor.execute("""SELECT i.ip_addr FROM hostbase_zone_addresses z
- INNER JOIN hostbase_zoneaddress i ON z.zoneaddress_id = i.id
- WHERE z.zone_id = \'%s\'""" % zone[0])
- addresses = cursor.fetchall()
- cursor.execute("""SELECT m.priority, m.mx FROM hostbase_zone_mxs z
- INNER JOIN hostbase_mx m ON z.mx_id = m.id
- WHERE z.zone_id = \'%s\'""" % zone[0])
- mxs = cursor.fetchall()
- context = Context({
- 'zone': zone,
- 'nameservers': nameservers,
- 'addresses': addresses,
- 'mxs': mxs
- })
- zonefile.write(self.templates['zone'].render(context))
- externalzonefile.write(self.templates['zone'].render(context))
-
- querystring = """SELECT h.hostname, p.ip_addr,
- n.name, c.cname, m.priority, m.mx, n.dns_view
- FROM (((((hostbase_host h INNER JOIN hostbase_interface i ON h.id = i.host_id)
- INNER JOIN hostbase_ip p ON i.id = p.interface_id)
- INNER JOIN hostbase_name n ON p.id = n.ip_id)
- INNER JOIN hostbase_name_mxs x ON n.id = x.name_id)
- INNER JOIN hostbase_mx m ON m.id = x.mx_id)
- LEFT JOIN hostbase_cname c ON n.id = c.name_id
- WHERE n.name LIKE '%%%%%s'
- AND h.status = 'active'
- ORDER BY h.hostname, n.name, p.ip_addr
- """ % zone[1]
- cursor.execute(querystring)
- zonehosts = cursor.fetchall()
- prevhost = (None, None, None, None)
- cnames = StringIO()
- cnamesexternal = StringIO()
- for host in zonehosts:
- if not host[2].split(".", 1)[1] == zone[1]:
- zonefile.write(cnames.getvalue())
- externalzonefile.write(cnamesexternal.getvalue())
- cnames = StringIO()
- cnamesexternal = StringIO()
- continue
- if not prevhost[1] == host[1] or not prevhost[2] == host[2]:
- zonefile.write(cnames.getvalue())
- externalzonefile.write(cnamesexternal.getvalue())
- cnames = StringIO()
- cnamesexternal = StringIO()
- zonefile.write("%-32s%-10s%-32s\n" %
- (host[2].split(".", 1)[0], 'A', host[1]))
- zonefile.write("%-32s%-10s%-3s%s.\n" %
- ('', 'MX', host[4], host[5]))
- if host[6] == 'global':
- externalzonefile.write("%-32s%-10s%-32s\n" %
- (host[2].split(".", 1)[0], 'A', host[1]))
- externalzonefile.write("%-32s%-10s%-3s%s.\n" %
- ('', 'MX', host[4], host[5]))
- elif not prevhost[5] == host[5]:
- zonefile.write("%-32s%-10s%-3s%s.\n" %
- ('', 'MX', host[4], host[5]))
- if host[6] == 'global':
- externalzonefile.write("%-32s%-10s%-3s%s.\n" %
- ('', 'MX', host[4], host[5]))
-
- if host[3]:
- try:
- if host[3].split(".", 1)[1] == zone[1]:
- cnames.write("%-32s%-10s%-32s\n" %
- (host[3].split(".", 1)[0],
- 'CNAME', host[2].split(".", 1)[0]))
- if host[6] == 'global':
- cnamesexternal.write("%-32s%-10s%-32s\n" %
- (host[3].split(".", 1)[0],
- 'CNAME', host[2].split(".", 1)[0]))
- else:
- cnames.write("%-32s%-10s%-32s\n" %
- (host[3] + ".",
- 'CNAME',
- host[2].split(".", 1)[0]))
- if host[6] == 'global':
- cnamesexternal.write("%-32s%-10s%-32s\n" %
- (host[3] + ".",
- 'CNAME',
- host[2].split(".", 1)[0]))
-
- except:
- pass
- prevhost = host
- zonefile.write(cnames.getvalue())
- externalzonefile.write(cnamesexternal.getvalue())
- zonefile.write("\n\n%s" % zone[9])
- externalzonefile.write("\n\n%s" % zone[9])
- self.filedata[zone[1]] = zonefile.getvalue()
- self.filedata[zone[1] + ".external"] = externalzonefile.getvalue()
- zonefile.close()
- externalzonefile.close()
- self.Entries['ConfigFile']["%s/%s" % (self.filepath, zone[1])] = self.FetchFile
- self.Entries['ConfigFile']["%s/%s.external" % (self.filepath, zone[1])] = self.FetchFile
-
- cursor.execute("SELECT * FROM hostbase_zone WHERE zone LIKE \'%%.rev\' AND zone <> \'.rev\'")
- reversezones = cursor.fetchall()
-
- reversenames = []
- for reversezone in reversezones:
- cursor.execute("""SELECT n.name FROM hostbase_zone_nameservers z
- INNER JOIN hostbase_nameserver n ON z.nameserver_id = n.id
- WHERE z.zone_id = \'%s\'""" % reversezone[0])
- reverse_nameservers = cursor.fetchall()
-
- context = Context({
- 'inaddr': reversezone[1].rstrip('.rev'),
- 'zone': reversezone,
- 'nameservers': reverse_nameservers,
- })
-
- self.filedata[reversezone[1]] = self.templates['reversesoa'].render(context)
- self.filedata[reversezone[1] + '.external'] = self.templates['reversesoa'].render(context)
- self.filedata[reversezone[1]] += reversezone[9]
- self.filedata[reversezone[1] + '.external'] += reversezone[9]
-
- subnet = reversezone[1].split(".")
- subnet.reverse()
- reversenames.append((reversezone[1].rstrip('.rev'), ".".join(subnet[1:])))
-
- for filename in reversenames:
- cursor.execute("""
- SELECT DISTINCT h.hostname, p.ip_addr, n.dns_view FROM ((hostbase_host h
- INNER JOIN hostbase_interface i ON h.id = i.host_id)
- INNER JOIN hostbase_ip p ON i.id = p.interface_id)
- INNER JOIN hostbase_name n ON n.ip_id = p.id
- WHERE p.ip_addr LIKE '%s%%%%' AND h.status = 'active' ORDER BY p.ip_addr
- """ % filename[1])
- reversehosts = cursor.fetchall()
- zonefile = StringIO()
- externalzonefile = StringIO()
- if len(filename[0].split(".")) == 2:
- originlist = []
- [originlist.append((".".join([ip[1].split(".")[2], filename[0]]),
- ".".join([filename[1], ip[1].split(".")[2]])))
- for ip in reversehosts
- if (".".join([ip[1].split(".")[2], filename[0]]),
- ".".join([filename[1], ip[1].split(".")[2]])) not in originlist]
- for origin in originlist:
- hosts = [(host[1].split("."), host[0])
- for host in reversehosts
- if host[1].rstrip('0123456789').rstrip('.') == origin[1]]
- hosts_external = [(host[1].split("."), host[0])
- for host in reversehosts
- if (host[1].rstrip('0123456789').rstrip('.') == origin[1]
- and host[2] == 'global')]
- context = Context({
- 'hosts': hosts,
- 'inaddr': origin[0],
- 'fileorigin': filename[0],
- })
- zonefile.write(self.templates['reverseapp'].render(context))
- context = Context({
- 'hosts': hosts_external,
- 'inaddr': origin[0],
- 'fileorigin': filename[0],
- })
- externalzonefile.write(self.templates['reverseapp'].render(context))
- else:
- originlist = [filename[0]]
- hosts = [(host[1].split("."), host[0])
- for host in reversehosts
- if (host[1].split("."), host[0]) not in hosts]
- hosts_external = [(host[1].split("."), host[0])
- for host in reversehosts
- if ((host[1].split("."), host[0]) not in hosts_external
- and host[2] == 'global')]
- context = Context({
- 'hosts': hosts,
- 'inaddr': filename[0],
- 'fileorigin': None,
- })
- zonefile.write(self.templates['reverseapp'].render(context))
- context = Context({
- 'hosts': hosts_external,
- 'inaddr': filename[0],
- 'fileorigin': None,
- })
- externalzonefile.write(self.templates['reverseapp'].render(context))
- self.filedata['%s.rev' % filename[0]] += zonefile.getvalue()
- self.filedata['%s.rev.external' % filename[0]] += externalzonefile.getvalue()
- zonefile.close()
- externalzonefile.close()
- self.Entries['ConfigFile']['%s/%s.rev' % (self.filepath, filename[0])] = self.FetchFile
- self.Entries['ConfigFile']['%s/%s.rev.external' % (self.filepath, filename[0])] = self.FetchFile
-
- ## here's where the named.conf file gets written
- context = Context({
- 'zones': zones,
- 'reverses': reversenames,
- })
- self.filedata['named.conf'] = self.templates['named'].render(context)
- self.Entries['ConfigFile']['/my/adm/hostbase/files/named.conf'] = self.FetchFile
- self.filedata['named.conf.views'] = self.templates['namedviews'].render(context)
- self.Entries['ConfigFile']['/my/adm/hostbase/files/named.conf.views'] = self.FetchFile
-
- def buildDHCP(self):
- """Pre-build dhcpd.conf and stash in the filedata table."""
-
- # fetches all the hosts with DHCP == True
- cursor = connection.cursor()
- cursor.execute("""
- SELECT hostname, mac_addr, ip_addr
- FROM (hostbase_host h INNER JOIN hostbase_interface i ON h.id = i.host_id)
- INNER JOIN hostbase_ip ip ON i.id = ip.interface_id
- WHERE i.dhcp=1 AND h.status='active' AND i.mac_addr <> ''
- AND i.mac_addr <> 'float' AND i.mac_addr <> 'unknown'
- ORDER BY h.hostname, i.mac_addr
- """)
-
- dhcphosts = cursor.fetchall()
- count = 0
- hosts = []
- hostdata = [dhcphosts[0][0], dhcphosts[0][1], dhcphosts[0][2]]
- if len(dhcphosts) > 1:
- for x in range(1, len(dhcphosts)):
- # if an interface has 2 or more ip addresses
- # adds the ip to the current interface
- if hostdata[0].split(".")[0] == dhcphosts[x][0].split(".")[0] and hostdata[1] == dhcphosts[x][1]:
- hostdata[2] = ", ".join([hostdata[2], dhcphosts[x][2]])
- # if a host has 2 or more interfaces
- # writes the current one and grabs the next
- elif hostdata[0].split(".")[0] == dhcphosts[x][0].split(".")[0]:
- hosts.append(hostdata)
- count += 1
- hostdata = ["-".join([dhcphosts[x][0], str(count)]), dhcphosts[x][1], dhcphosts[x][2]]
- # new host found, writes current data to the template
- else:
- hosts.append(hostdata)
- count = 0
- hostdata = [dhcphosts[x][0], dhcphosts[x][1], dhcphosts[x][2]]
- #makes sure the last of the data gets written out
- if hostdata not in hosts:
- hosts.append(hostdata)
-
- context = Context({
- 'hosts': hosts,
- 'numips': len(hosts),
- })
-
- self.filedata['dhcpd.conf'] = self.templates['dhcp'].render(context)
- self.Entries['ConfigFile']['/my/adm/hostbase/files/dhcpd.conf'] = self.FetchFile
-
- def buildHosts(self):
- """Pre-build and stash /etc/hosts file."""
-
- append_data = []
-
- cursor = connection.cursor()
- cursor.execute("""
- SELECT hostname FROM hostbase_host ORDER BY hostname
- """)
- hostbase = cursor.fetchall()
- domains = [host[0].split(".", 1)[1] for host in hostbase]
- domains_set = set(domains)
- domain_data = [(domain, domains.count(domain)) for domain in domains_set]
- domain_data.sort()
-
- cursor.execute("""
- SELECT ip_addr FROM hostbase_ip ORDER BY ip_addr
- """)
- ips = cursor.fetchall()
- three_octets = [ip[0].rstrip('0123456789').rstrip('.') \
- for ip in ips]
- three_octets_set = set(three_octets)
- three_octets_data = [(octet, three_octets.count(octet)) \
- for octet in three_octets_set]
- three_octets_data.sort()
-
- for three_octet in three_octets_data:
- querystring = """SELECT h.hostname, h.primary_user,
- p.ip_addr, n.name, c.cname
- FROM (((hostbase_host h INNER JOIN hostbase_interface i ON h.id = i.host_id)
- INNER JOIN hostbase_ip p ON i.id = p.interface_id)
- INNER JOIN hostbase_name n ON p.id = n.ip_id)
- LEFT JOIN hostbase_cname c ON n.id = c.name_id
- WHERE p.ip_addr LIKE \'%s.%%%%\' AND h.status = 'active'""" % three_octet[0]
- cursor.execute(querystring)
- tosort = list(cursor.fetchall())
- tosort.sort(lambda x, y: cmp(int(x[2].split(".")[-1]), int(y[2].split(".")[-1])))
- append_data.append((three_octet, tuple(tosort)))
-
- two_octets = [ip.rstrip('0123456789').rstrip('.') for ip in three_octets]
- two_octets_set = set(two_octets)
- two_octets_data = [(octet, two_octets.count(octet))
- for octet in two_octets_set]
- two_octets_data.sort()
-
- context = Context({
- 'domain_data': domain_data,
- 'three_octets_data': three_octets_data,
- 'two_octets_data': two_octets_data,
- 'three_octets': three_octets,
- 'num_ips': len(three_octets),
- })
-
- self.filedata['hosts'] = self.templates['hosts'].render(context)
-
- for subnet in append_data:
- ips = []
- simple = True
- namelist = [name.split('.', 1)[0] for name in [subnet[1][0][3]]]
- cnamelist = []
- if subnet[1][0][4]:
- cnamelist.append(subnet[1][0][4].split('.', 1)[0])
- simple = False
- appenddata = subnet[1][0]
- for ip in subnet[1][1:]:
- if appenddata[2] == ip[2]:
- namelist.append(ip[3].split('.', 1)[0])
- if ip[4]:
- cnamelist.append(ip[4].split('.', 1)[0])
- simple = False
- appenddata = ip
- else:
- if appenddata[0] == ip[0]:
- simple = False
- ips.append((appenddata[2], appenddata[0], set(namelist),
- cnamelist, simple, appenddata[1]))
- appenddata = ip
- simple = True
- namelist = [ip[3].split('.', 1)[0]]
- cnamelist = []
- if ip[4]:
- cnamelist.append(ip[4].split('.', 1)[0])
- simple = False
- ips.append((appenddata[2], appenddata[0], set(namelist),
- cnamelist, simple, appenddata[1]))
- context = Context({
- 'subnet': subnet[0],
- 'ips': ips,
- })
- self.filedata['hosts'] += self.templates['hostsapp'].render(context)
- self.Entries['ConfigFile']['/mcs/etc/hosts'] = self.FetchFile
-
- def buildPrinters(self):
- """The /mcs/etc/printers.data file"""
- header = """# This file is automatically generated. DO NOT EDIT IT!
-#
-Name Room User Type Notes
-============== ========== ============================== ======================== ====================
-"""
-
- cursor = connection.cursor()
- # fetches all the printers from the database
- cursor.execute("""
- SELECT printq, location, primary_user, comments
- FROM hostbase_host
- WHERE whatami='printer' AND printq <> '' AND status = 'active'
- ORDER BY printq
- """)
- printers = cursor.fetchall()
-
- printersfile = header
- for printer in printers:
- # splits up the printq line and gets the
- # correct description out of the comments section
- temp = printer[3].split('\n')
- for printq in re.split(',[ ]*', printer[0]):
- if len(temp) > 1:
- printersfile += ("%-16s%-12s%-32s%-26s%s\n" %
- (printq, printer[1], printer[2], temp[1], temp[0]))
- else:
- printersfile += ("%-16s%-12s%-32s%-26s%s\n" %
- (printq, printer[1], printer[2], '', printer[3]))
- self.filedata['printers.data'] = printersfile
- self.Entries['ConfigFile']['/mcs/etc/printers.data'] = self.FetchFile
-
- def buildHostsLPD(self):
- """Creates the /mcs/etc/hosts.lpd file"""
-
- # this header needs to be changed to be more generic
- header = """+@machines
-+@all-machines
-achilles.ctd.anl.gov
-raven.ops.anl.gov
-seagull.hr.anl.gov
-parrot.ops.anl.gov
-condor.ops.anl.gov
-delphi.esh.anl.gov
-anlcv1.ctd.anl.gov
-anlvms.ctd.anl.gov
-olivia.ctd.anl.gov\n\n"""
-
- cursor = connection.cursor()
- cursor.execute("""
- SELECT hostname FROM hostbase_host WHERE netgroup=\"red\" AND status = 'active'
- ORDER BY hostname""")
- redmachines = list(cursor.fetchall())
- cursor.execute("""
- SELECT n.name FROM ((hostbase_host h INNER JOIN hostbase_interface i ON h.id = i.host_id)
- INNER JOIN hostbase_ip p ON i.id = p.interface_id) INNER JOIN hostbase_name n ON p.id = n.ip_id
- WHERE netgroup=\"red\" AND n.only=1 AND h.status = 'active'
- """)
- redmachines.extend(list(cursor.fetchall()))
- cursor.execute("""
- SELECT hostname FROM hostbase_host WHERE netgroup=\"win\" AND status = 'active'
- ORDER BY hostname""")
- winmachines = list(cursor.fetchall())
- cursor.execute("""
- SELECT n.name FROM ((hostbase_host h INNER JOIN hostbase_interface i ON h.id = i.host_id)
- INNER JOIN hostbase_ip p ON i.id = p.interface_id) INNER JOIN hostbase_name n ON p.id = n.ip_id
- WHERE netgroup=\"win\" AND n.only=1 AND h.status = 'active'
- """)
- winmachines.__add__(list(cursor.fetchall()))
- hostslpdfile = header
- for machine in redmachines:
- hostslpdfile += machine[0] + "\n"
- hostslpdfile += "\n"
- for machine in winmachines:
- hostslpdfile += machine[0] + "\n"
- self.filedata['hosts.lpd'] = hostslpdfile
- self.Entries['ConfigFile']['/mcs/etc/hosts.lpd'] = self.FetchFile
-
- def buildNetgroups(self):
- """Makes the *-machine files"""
- header = """###################################################################
-# This file lists hosts in the '%s' machine netgroup, it is
-# automatically generated. DO NOT EDIT THIS FILE!
-#
-# Number of hosts in '%s' machine netgroup: %i
-#\n\n"""
-
- cursor = connection.cursor()
- # fetches all the hosts that with valid netgroup entries
- cursor.execute("""
- SELECT h.hostname, n.name, h.netgroup, n.only FROM ((hostbase_host h
- INNER JOIN hostbase_interface i ON h.id = i.host_id)
- INNER JOIN hostbase_ip p ON i.id = p.interface_id)
- INNER JOIN hostbase_name n ON p.id = n.ip_id
- WHERE h.netgroup <> '' AND h.netgroup <> 'none' AND h.status = 'active'
- ORDER BY h.netgroup, h.hostname
- """)
- nameslist = cursor.fetchall()
- # gets the first host and initializes the hash
- hostdata = nameslist[0]
- netgroups = {hostdata[2]: [hostdata[0]]}
- for row in nameslist:
- # if new netgroup, create it
- if row[2] not in netgroups:
- netgroups.update({row[2]: []})
- # if it belongs in the netgroup and has multiple interfaces, put them in
- if hostdata[0] == row[0] and row[3]:
- netgroups[row[2]].append(row[1])
- hostdata = row
- # if its a new host, write the old one to the hash
- elif hostdata[0] != row[0]:
- netgroups[row[2]].append(row[0])
- hostdata = row
-
- for netgroup in netgroups:
- fileoutput = StringIO()
- fileoutput.write(header % (netgroup, netgroup, len(netgroups[netgroup])))
- for each in netgroups[netgroup]:
- fileoutput.write(each + "\n")
- self.filedata['%s-machines' % netgroup] = fileoutput.getvalue()
- fileoutput.close()
- self.Entries['ConfigFile']['/my/adm/hostbase/makenets/machines/%s-machines' % netgroup] = self.FetchFile
-
- cursor.execute("""
- UPDATE hostbase_host SET dirty=0
- """)
diff --git a/src/lib/Bcfg2/Server/Plugins/Ldap.py b/src/lib/Bcfg2/Server/Plugins/Ldap.py
index f724402d0..553ddbc47 100644
--- a/src/lib/Bcfg2/Server/Plugins/Ldap.py
+++ b/src/lib/Bcfg2/Server/Plugins/Ldap.py
@@ -3,7 +3,6 @@ import logging
import sys
import time
import traceback
-import Bcfg2.Options
import Bcfg2.Server.Plugin
logger = logging.getLogger('Bcfg2.Plugins.Ldap')
@@ -44,10 +43,10 @@ class ConfigFile(Bcfg2.Server.Plugin.FileBacked):
The approach implemented here is having the user call a registering
decorator that updates a global variable in this module.
"""
- def __init__(self, filename, fam):
+ def __init__(self, filename):
self.filename = filename
Bcfg2.Server.Plugin.FileBacked.__init__(self, self.filename)
- fam.AddMonitor(self.filename, self)
+ self.fam.AddMonitor(self.filename, self)
def Index(self):
"""
@@ -69,12 +68,12 @@ class Ldap(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.Connector):
experimental = True
debug_flag = False
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ def __init__(self, core):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core)
Bcfg2.Server.Plugin.Connector.__init__(self)
- self.config = ConfigFile(self.data + "/config.py", core.fam)
+ self.config = ConfigFile(self.data + "/config.py")
- def debug_log(self, message, flag = None):
+ def debug_log(self, message, flag=None):
if (flag is None) and self.debug_flag or flag:
self.logger.error(message)
@@ -83,37 +82,39 @@ class Ldap(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.Connector):
try:
data = {}
self.debug_log("LdapPlugin debug: found queries " +
- str(LDAP_QUERIES))
+ str(LDAP_QUERIES))
for QueryClass in LDAP_QUERIES:
query = QueryClass()
if query.is_applicable(metadata):
self.debug_log("LdapPlugin debug: processing query '" +
- query.name + "'")
+ query.name + "'")
data[query.name] = query.get_result(metadata)
else:
self.debug_log("LdapPlugin debug: query '" + query.name +
- "' not applicable to host '" + metadata.hostname + "'")
+ "' not applicable to host '" +
+ metadata.hostname + "'")
return data
except Exception:
if hasattr(query, "name"):
logger.error("LdapPlugin error: " +
- "Exception during processing of query named '" +
- str(query.name) +
- "', query results will be empty" +
- " and may cause bind failures")
+ "Exception during processing of query named '" +
+ str(query.name) +
+ "', query results will be empty" +
+ " and may cause bind failures")
for line in traceback.format_exception(sys.exc_info()[0],
sys.exc_info()[1],
sys.exc_info()[2]):
logger.error("LdapPlugin error: " +
- line.replace("\n", ""))
+ line.replace("\n", ""))
return {}
+
class LdapConnection(object):
"""
Connection to an LDAP server.
"""
- def __init__(self, host = "localhost", port = 389,
- binddn = None, bindpw = None):
+ def __init__(self, host="localhost", port=389,
+ binddn=None, bindpw=None):
self.host = host
self.port = port
self.binddn = binddn
@@ -134,8 +135,8 @@ class LdapConnection(object):
for attempt in range(RETRY_COUNT + 1):
if attempt >= 1:
logger.error("LdapPlugin error: " +
- "LDAP server down (retry " + str(attempt) + "/" +
- str(RETRY_COUNT) + ")")
+ "LDAP server down (retry " + str(attempt) + "/" +
+ str(RETRY_COUNT) + ")")
try:
if not self.conn:
self.init_conn()
@@ -155,6 +156,7 @@ class LdapConnection(object):
def url(self):
return "ldap://" + self.host + ":" + str(self.port)
+
class LdapQuery(object):
"""
Query referencing an LdapConnection and providing several
@@ -211,9 +213,10 @@ class LdapQuery(object):
return self.result
else:
logger.error("LdapPlugin error: " +
- "No valid connection defined for query " + str(self))
+ "No valid connection defined for query " + str(self))
return None
+
class LdapSubQuery(LdapQuery):
"""
SubQueries are meant for internal use only and are not added
@@ -244,5 +247,5 @@ class LdapSubQuery(LdapQuery):
return self.process_result(metadata, **kwargs)
else:
logger.error("LdapPlugin error: " +
- "No valid connection defined for query " + str(self))
+ "No valid connection defined for query " + str(self))
return None
diff --git a/src/lib/Bcfg2/Server/Plugins/Metadata.py b/src/lib/Bcfg2/Server/Plugins/Metadata.py
index f805772a7..657e4df31 100644
--- a/src/lib/Bcfg2/Server/Plugins/Metadata.py
+++ b/src/lib/Bcfg2/Server/Plugins/Metadata.py
@@ -7,46 +7,56 @@ import sys
import time
import copy
import errno
-import fcntl
import socket
import logging
import lxml.etree
import Bcfg2.Server
-import Bcfg2.Server.Lint
+import Bcfg2.Options
import Bcfg2.Server.Plugin
import Bcfg2.Server.FileMonitor
from Bcfg2.Utils import locked
+from Bcfg2.Server.Cache import Cache
# pylint: disable=W0622
from Bcfg2.Compat import MutableMapping, all, any, wraps
# pylint: enable=W0622
from Bcfg2.version import Bcfg2VersionInfo
-try:
- from django.db import models
- HAS_DJANGO = True
-except ImportError:
- HAS_DJANGO = False
+# pylint: disable=C0103
+ClientVersions = None
+MetadataClientModel = None
+# pylint: enable=C0103
+HAS_DJANGO = False
-LOGGER = logging.getLogger(__name__)
+def load_django_models():
+ """ Load models for Django after option parsing has completed """
+ # pylint: disable=W0602
+ global MetadataClientModel, ClientVersions, HAS_DJANGO
+ # pylint: enable=W0602
-if HAS_DJANGO:
- class MetadataClientModel(models.Model,
+ try:
+ from django.db import models
+ HAS_DJANGO = True
+ except ImportError:
+ HAS_DJANGO = False
+ return
+
+ class MetadataClientModel(models.Model, # pylint: disable=W0621
Bcfg2.Server.Plugin.PluginDatabaseModel):
""" django model for storing clients in the database """
hostname = models.CharField(max_length=255, primary_key=True)
version = models.CharField(max_length=31, null=True)
- class ClientVersions(MutableMapping,
+ class ClientVersions(MutableMapping, # pylint: disable=W0621,W0612
Bcfg2.Server.Plugin.DatabaseBacked):
""" dict-like object to make it easier to access client bcfg2
versions from the database """
-
create = False
def __getitem__(self, key):
try:
- return MetadataClientModel.objects.get(hostname=key).version
+ return MetadataClientModel.objects.get(
+ hostname=key).version
except MetadataClientModel.DoesNotExist:
raise KeyError(key)
@@ -80,7 +90,7 @@ if HAS_DJANGO:
def keys(self):
""" Get keys for the mapping """
- return [c.hostname for c in MetadataClientModel.objects.all()]
+ return list(iter(self))
def __contains__(self, key):
try:
@@ -93,25 +103,19 @@ if HAS_DJANGO:
class XMLMetadataConfig(Bcfg2.Server.Plugin.XMLFileBacked):
"""Handles xml config files and all XInclude statements"""
- def __init__(self, metadata, watch_clients, basefile):
- # we tell XMLFileBacked _not_ to add a monitor for this file,
- # because the main Metadata plugin has already added one.
- # then we immediately set should_monitor to the proper value,
- # so that XInclude'd files get properly watched
+ def __init__(self, metadata, basefile):
fpath = os.path.join(metadata.data, basefile)
toptag = os.path.splitext(basefile)[0].title()
Bcfg2.Server.Plugin.XMLFileBacked.__init__(self, fpath,
- fam=metadata.core.fam,
should_monitor=False,
create=toptag)
- self.should_monitor = watch_clients
self.metadata = metadata
self.basefile = basefile
self.data = None
self.basedata = None
self.basedir = metadata.data
self.logger = metadata.logger
- self.pseudo_monitor = isinstance(metadata.core.fam,
+ self.pseudo_monitor = isinstance(Bcfg2.Server.FileMonitor.get_fam(),
Bcfg2.Server.FileMonitor.Pseudo)
def _get_xdata(self):
@@ -196,15 +200,7 @@ class XMLMetadataConfig(Bcfg2.Server.Plugin.XMLFileBacked):
while locked(fd):
pass
- try:
- datafile.write(newcontents)
- except:
- fcntl.lockf(fd, fcntl.LOCK_UN)
- msg = "Metadata: Failed to write new xml data to %s: %s" % \
- (tmpfile, sys.exc_info()[1])
- self.logger.error(msg, exc_info=1)
- os.unlink(tmpfile)
- raise Bcfg2.Server.Plugin.MetadataRuntimeError(msg)
+ datafile.write(newcontents)
datafile.close()
# check if clients.xml is a symlink
if os.path.islink(fname):
@@ -212,10 +208,10 @@ class XMLMetadataConfig(Bcfg2.Server.Plugin.XMLFileBacked):
try:
os.rename(tmpfile, fname)
- except: # pylint: disable=W0702
+ except OSError:
try:
os.unlink(tmpfile)
- except: # pylint: disable=W0702
+ except OSError:
pass
msg = "Metadata: Failed to rename %s: %s" % (tmpfile,
sys.exc_info()[1])
@@ -250,8 +246,7 @@ class XMLMetadataConfig(Bcfg2.Server.Plugin.XMLFileBacked):
def add_monitor(self, fpath):
self.extras.append(fpath)
- if self.fam and self.should_monitor:
- self.fam.AddMonitor(fpath, self.metadata)
+ self.fam.AddMonitor(fpath, self.metadata)
def HandleEvent(self, event=None):
"""Handle fam events"""
@@ -354,6 +349,8 @@ class MetadataQuery(object):
def __init__(self, by_name, get_clients, by_groups, by_profiles,
all_groups, all_groups_in_category):
+ self.logger = logging.getLogger(self.__class__.__name__)
+
#: Get :class:`Bcfg2.Server.Plugins.Metadata.ClientMetadata`
#: object for the given hostname.
#:
@@ -406,8 +403,9 @@ class MetadataQuery(object):
@wraps(func)
def inner(arg):
if isinstance(arg, str):
- LOGGER.warning("%s: %s takes a list as argument, not a string"
- % (self.__class__.__name__, func.__name__))
+ self.logger.warning("%s: %s takes a list as argument, not a "
+ "string" % (self.__class__.__name__,
+ func.__name__))
return func(arg)
# pylint: enable=C0111
@@ -490,25 +488,35 @@ class MetadataGroup(tuple): # pylint: disable=E0012,R0924
class Metadata(Bcfg2.Server.Plugin.Metadata,
- Bcfg2.Server.Plugin.Caching,
Bcfg2.Server.Plugin.ClientRunHooks,
Bcfg2.Server.Plugin.DatabaseBacked):
"""This class contains data for bcfg2 server metadata."""
__author__ = 'bcfg-dev@mcs.anl.gov'
sort_order = 500
-
- def __init__(self, core, datastore, watch_clients=True):
+ __rmi__ = Bcfg2.Server.Plugin.DatabaseBacked.__rmi__ + ['list_clients',
+ 'remove_client']
+
+ options = Bcfg2.Server.Plugin.DatabaseBacked.options + [
+ Bcfg2.Options.Common.password,
+ Bcfg2.Options.BooleanOption(
+ cf=('metadata', 'use_database'), dest="metadata_db",
+ help="Use database capabilities of the Metadata plugin"),
+ Bcfg2.Options.Option(
+ cf=('communication', 'authentication'), default='cert+password',
+ choices=['cert', 'bootstrap', 'cert+password'],
+ help='Default client authentication method')]
+ options_parsed_hook = staticmethod(load_django_models)
+
+ def __init__(self, core):
Bcfg2.Server.Plugin.Metadata.__init__(self)
- Bcfg2.Server.Plugin.Caching.__init__(self)
Bcfg2.Server.Plugin.ClientRunHooks.__init__(self)
- Bcfg2.Server.Plugin.DatabaseBacked.__init__(self, core, datastore)
- self.watch_clients = watch_clients
+ Bcfg2.Server.Plugin.DatabaseBacked.__init__(self, core)
self.states = dict()
self.extra = dict()
self.handlers = dict()
self.groups_xml = self._handle_file("groups.xml")
if (self._use_db and
- os.path.exists(os.path.join(self.data, "clients.xml"))):
+ os.path.exists(os.path.join(self.data, "clients.xml"))):
self.logger.warning("Metadata: database enabled but clients.xml "
"found, parsing in compatibility mode")
self.clients_xml = self._handle_file("clients.xml")
@@ -540,15 +548,16 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
self.ordered_groups = []
# mapping of hostname -> version string
if self._use_db:
- self.versions = ClientVersions(core, datastore)
+ self.versions = ClientVersions(core) # pylint: disable=E1102
else:
self.versions = dict()
self.uuid = {}
self.session_cache = {}
+ self.cache = Cache("Metadata")
self.default = None
self.pdirty = False
- self.password = core.setup['password']
+ self.password = Bcfg2.Options.setup.password
self.query = MetadataQuery(core.build_metadata,
self.list_clients,
self.get_client_names_by_groups,
@@ -576,16 +585,10 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
def _handle_file(self, fname):
""" set up the necessary magic for handling a metadata file
(clients.xml or groups.xml, e.g.) """
- if self.watch_clients:
- try:
- self.core.fam.AddMonitor(os.path.join(self.data, fname), self)
- except:
- err = sys.exc_info()[1]
- msg = "Unable to add file monitor for %s: %s" % (fname, err)
- self.logger.error(msg)
- raise Bcfg2.Server.Plugin.PluginInitError(msg)
- self.states[fname] = False
- xmlcfg = XMLMetadataConfig(self, self.watch_clients, fname)
+ Bcfg2.Server.FileMonitor.get_fam().AddMonitor(
+ os.path.join(self.data, fname), self)
+ self.states[fname] = False
+ xmlcfg = XMLMetadataConfig(self, fname)
aname = re.sub(r'[^A-z0-9_]', '_', os.path.basename(fname))
self.handlers[xmlcfg.HandleEvent] = getattr(self,
"_handle_%s_event" % aname)
@@ -600,7 +603,7 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
elif alias:
for child in node:
if (child.tag == "Alias" and
- child.attrib["name"] == name):
+ child.attrib["name"] == name):
return node
return None
@@ -666,7 +669,9 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
try:
client = MetadataClientModel.objects.get(hostname=client_name)
except MetadataClientModel.DoesNotExist:
+ # pylint: disable=E1102
client = MetadataClientModel(hostname=client_name)
+ # pylint: enable=E1102
client.save()
self.update_client_list()
return client
@@ -819,7 +824,7 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
if client.get('secure', 'false').lower() == 'true':
self.secure.append(clname)
if (client.get('location', 'fixed') == 'floating' or
- client.get('floating', 'false').lower() == 'true'):
+ client.get('floating', 'false').lower() == 'true'):
self.floating.append(clname)
if 'password' in client.attrib:
self.passwords[clname] = client.get('password')
@@ -854,7 +859,7 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
except KeyError:
self.clientgroups[clname] = [profile]
self.update_client_list()
- self.expire_cache()
+ self.cache.expire()
self.states['clients.xml'] = True
def _get_condition(self, element):
@@ -948,19 +953,16 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
self.group_membership.setdefault(gname, [])
self.group_membership[gname].append(
self._aggregate_conditions(conditions))
- self.expire_cache()
+ self.cache.expire()
self.states['groups.xml'] = True
- def expire_cache(self, key=None):
- self.core.metadata_cache.expire(key)
-
def HandleEvent(self, event):
"""Handle update events for data files."""
for handles, event_handler in self.handlers.items():
if handles(event):
# clear the entire cache when we get an event for any
# metadata file
- self.expire_cache()
+ self.cache.expire()
# clear out the list of category suppressions that
# have been warned about, since this may change when
@@ -1114,7 +1116,7 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
for p in self.group_membership[grpname]):
newgroups.add(grpname)
if (grpname in self.groups and
- self.groups[grpname].category):
+ self.groups[grpname].category):
categories[self.groups[grpname].category] = grpname
groups.update(newgroups)
for grpname, predicates in self.negated_groups.items():
@@ -1123,7 +1125,7 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
if any(p(client, groups, categories) for p in predicates):
removegroups.add(grpname)
if (grpname in self.groups and
- self.groups[grpname].category):
+ self.groups[grpname].category):
del categories[self.groups[grpname].category]
groups.difference_update(removegroups)
return (groups, categories)
@@ -1182,8 +1184,8 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
raise Bcfg2.Server.Plugin.MetadataRuntimeError("Metadata has not "
"been read yet")
client = client.lower()
- if client in self.core.metadata_cache:
- return self.core.metadata_cache[client]
+ if client in self.cache:
+ return self.cache[client]
if client in self.aliases:
client = self.aliases[client]
@@ -1280,7 +1282,7 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
addresses, categories, uuid, password, version,
self.query)
if self.core.metadata_cache_mode == 'initial':
- self.core.metadata_cache[client] = rv
+ self.cache[client] = rv
return rv
def get_all_group_names(self):
@@ -1373,6 +1375,7 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
return False
resolved = self.resolve_client(addresspair)
if resolved.lower() == client.lower():
+ self.logger.debug("Client %s address validates" % client)
return True
else:
self.logger.error("Got request for %s from incorrect address %s" %
@@ -1413,7 +1416,7 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
self.debug_log("Authenticating client %s" % client)
# validate id_method
- auth_type = self.auth.get(client, self.core.setup['authentication'])
+ auth_type = self.auth.get(client, Bcfg2.Options.setup.authentication)
if auth_type == 'cert' and id_method != 'cert':
self.logger.error("Client %s does not provide a cert, but only "
"cert auth is allowed" % client)
@@ -1421,13 +1424,14 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
# next we validate the address
if (id_method != 'uuid' and
- not self.validate_client_address(client, address)):
+ not self.validate_client_address(client, address)):
return False
if id_method == 'cert' and auth_type != 'cert+password':
# remember the cert-derived client name for this connection
if client in self.floating:
self.session_cache[address] = (time.time(), client)
+ self.logger.debug("Client %s certificate validates" % client)
# we are done if cert+password not required
return True
@@ -1454,6 +1458,7 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
# populate the session cache
if user != 'root':
self.session_cache[address] = (time.time(), client)
+ self.logger.debug("Client %s authenticated successfully" % client)
return True
# pylint: enable=R0911,R0912
@@ -1477,7 +1482,7 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
(len(removed), removed))
for client in added.union(removed):
- self.expire_cache(client)
+ self.cache.expire(client)
def start_client_run(self, metadata):
""" Hook to reread client list if the database is in use """
@@ -1486,7 +1491,7 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
def end_statistics(self, metadata):
""" Hook to toggle clients in bootstrap mode """
if self.auth.get(metadata.hostname,
- self.core.setup['authentication']) == 'bootstrap':
+ Bcfg2.Options.setup.authentication) == 'bootstrap':
self.update_client(metadata.hostname, dict(auth='cert'))
def viz(self, hosts, bundles, key, only_client, colors):
@@ -1594,7 +1599,7 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
for group in egroups:
for parent in group.findall('Group'):
if (parent.get('name') not in gseen and
- include_group(parent.get('name'))):
+ include_group(parent.get('name'))):
rv.append(gfmt % (parent.get('name'),
parent.get('name')))
gseen.append(parent.get("name"))
@@ -1602,171 +1607,3 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
rv.append('"group-%s" -> "group-%s";' %
(group.get('name'), parent.get('name')))
return rv
-
-
-class MetadataLint(Bcfg2.Server.Lint.ServerPlugin):
- """ ``bcfg2-lint`` plugin for :ref:`Metadata
- <server-plugins-grouping-metadata>`. This checks for several things:
-
- * ``<Client>`` tags nested inside other ``<Client>`` tags;
- * Deprecated options (like ``location="floating"``);
- * Profiles that don't exist, or that aren't profile groups;
- * Groups or clients that are defined multiple times;
- * Multiple default groups or a default group that isn't a profile
- group.
- """
-
- def Run(self):
- self.nested_clients()
- self.deprecated_options()
- self.bogus_profiles()
- self.duplicate_groups()
- self.duplicate_default_groups()
- self.duplicate_clients()
- self.default_is_profile()
-
- @classmethod
- def Errors(cls):
- return {"nested-client-tags": "warning",
- "deprecated-clients-options": "warning",
- "nonexistent-profile-group": "error",
- "non-profile-set-as-profile": "error",
- "duplicate-group": "error",
- "duplicate-client": "error",
- "multiple-default-groups": "error",
- "default-is-not-profile": "error"}
-
- def deprecated_options(self):
- """ Check for the ``location='floating'`` option, which has
- been deprecated in favor of ``floating='true'``. """
- if not hasattr(self.metadata, "clients_xml"):
- # using metadata database
- return
- clientdata = self.metadata.clients_xml.xdata
- for el in clientdata.xpath("//Client"):
- loc = el.get("location")
- if loc:
- if loc == "floating":
- floating = True
- else:
- floating = False
- self.LintError("deprecated-clients-options",
- "The location='%s' option is deprecated. "
- "Please use floating='%s' instead:\n%s" %
- (loc, floating, self.RenderXML(el)))
-
- def nested_clients(self):
- """ Check for a ``<Client/>`` tag inside a ``<Client/>`` tag,
- which is either redundant or will never match. """
- groupdata = self.metadata.groups_xml.xdata
- for el in groupdata.xpath("//Client//Client"):
- self.LintError("nested-client-tags",
- "Client %s nested within Client tag: %s" %
- (el.get("name"), self.RenderXML(el)))
-
- def bogus_profiles(self):
- """ Check for clients that have profiles that are either not
- flagged as profile groups in ``groups.xml``, or don't exist. """
- if not hasattr(self.metadata, "clients_xml"):
- # using metadata database
- return
- for client in self.metadata.clients_xml.xdata.findall('.//Client'):
- profile = client.get("profile")
- if profile not in self.metadata.groups:
- self.LintError("nonexistent-profile-group",
- "%s has nonexistent profile group %s:\n%s" %
- (client.get("name"), profile,
- self.RenderXML(client)))
- elif not self.metadata.groups[profile].is_profile:
- self.LintError("non-profile-set-as-profile",
- "%s is set as profile for %s, but %s is not a "
- "profile group:\n%s" %
- (profile, client.get("name"), profile,
- self.RenderXML(client)))
-
- def duplicate_default_groups(self):
- """ Check for multiple default groups. """
- defaults = []
- for grp in self.metadata.groups_xml.xdata.xpath("//Groups/Group") + \
- self.metadata.groups_xml.xdata.xpath("//Groups/Group//Group"):
- if grp.get("default", "false").lower() == "true":
- defaults.append(self.RenderXML(grp))
- if len(defaults) > 1:
- self.LintError("multiple-default-groups",
- "Multiple default groups defined:\n%s" %
- "\n".join(defaults))
-
- def duplicate_clients(self):
- """ Check for clients that are defined more than once. """
- if not hasattr(self.metadata, "clients_xml"):
- # using metadata database
- return
- self.duplicate_entries(
- self.metadata.clients_xml.xdata.xpath("//Client"),
- "client")
-
- def duplicate_groups(self):
- """ Check for groups that are defined more than once. There are two
- ways this can happen:
-
- 1. The group is listed twice with contradictory options.
- 2. The group is listed with no options *first*, and then with
- options later.
-
- In this context, 'first' refers to the order in which groups
- are parsed; see the loop condition below and
- _handle_groups_xml_event above for details. """
- groups = dict()
- duplicates = dict()
- for grp in self.metadata.groups_xml.xdata.xpath("//Groups/Group") + \
- self.metadata.groups_xml.xdata.xpath("//Groups/Group//Group"):
- grpname = grp.get("name")
- if grpname in duplicates:
- duplicates[grpname].append(grp)
- elif set(grp.attrib.keys()).difference(['negate', 'name']):
- # group has options
- if grpname in groups:
- duplicates[grpname] = [grp, groups[grpname]]
- else:
- groups[grpname] = grp
- else: # group has no options
- groups[grpname] = grp
- for grpname, grps in duplicates.items():
- self.LintError("duplicate-group",
- "Group %s is defined multiple times:\n%s" %
- (grpname,
- "\n".join(self.RenderXML(g) for g in grps)))
-
- def duplicate_entries(self, allentries, etype):
- """ Generic duplicate entry finder.
-
- :param allentries: A list of all entries to check for
- duplicates.
- :type allentries: list of lxml.etree._Element
- :param etype: The entry type. This will be used to determine
- the error name (``duplicate-<etype>``) and for
- display to the end user.
- :type etype: string
- """
- entries = dict()
- for el in allentries:
- if el.get("name") in entries:
- entries[el.get("name")].append(self.RenderXML(el))
- else:
- entries[el.get("name")] = [self.RenderXML(el)]
- for ename, els in entries.items():
- if len(els) > 1:
- self.LintError("duplicate-%s" % etype,
- "%s %s is defined multiple times:\n%s" %
- (etype.title(), ename, "\n".join(els)))
-
- def default_is_profile(self):
- """ Ensure that the default group is a profile group. """
- if (self.metadata.default and
- not self.metadata.groups[self.metadata.default].is_profile):
- xdata = \
- self.metadata.groups_xml.xdata.xpath("//Group[@name='%s']" %
- self.metadata.default)[0]
- self.LintError("default-is-not-profile",
- "Default group is not a profile group:\n%s" %
- self.RenderXML(xdata))
diff --git a/src/lib/Bcfg2/Server/Plugins/NagiosGen.py b/src/lib/Bcfg2/Server/Plugins/NagiosGen.py
index 8f1d03586..d3c38ef19 100644
--- a/src/lib/Bcfg2/Server/Plugins/NagiosGen.py
+++ b/src/lib/Bcfg2/Server/Plugins/NagiosGen.py
@@ -5,28 +5,25 @@ import re
import sys
import glob
import socket
-import Bcfg2.Server
-import Bcfg2.Server.Plugin
+from Bcfg2.Server.Plugin import Plugin, Generator, StructFile, \
+ PluginExecutionError
-class NagiosGen(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Generator):
+class NagiosGen(Plugin, Generator):
""" NagiosGen is a Bcfg2 plugin that dynamically generates Nagios
configuration file based on Bcfg2 data. """
__author__ = 'bcfg-dev@mcs.anl.gov'
line_fmt = '\t%-32s %s'
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Generator.__init__(self)
+ def __init__(self, core):
+ Plugin.__init__(self, core)
+ Generator.__init__(self)
self.config = \
- Bcfg2.Server.Plugin.StructFile(os.path.join(self.data,
- 'config.xml'),
- core.fam, should_monitor=True,
- create=self.name)
+ StructFile(os.path.join(self.data, 'config.xml'),
+ should_monitor=True, create=self.name)
self.Entries = {
'Path': {'/etc/nagiosgen.status': self.createhostconfig,
- '/etc/nagios/nagiosgen.cfg': self.createserverconfig}}
+ '/etc/nagios/conf.d/bcfg2.cfg': self.createserverconfig}}
self.client_attrib = {'encoding': 'ascii',
'owner': 'root',
@@ -42,13 +39,17 @@ class NagiosGen(Bcfg2.Server.Plugin.Plugin,
def createhostconfig(self, entry, metadata):
"""Build host specific configuration file."""
try:
- host_address = socket.gethostbyname(metadata.hostname)
- except socket.gaierror:
- self.logger.error("Failed to find IP address for %s" %
- metadata.hostname)
- raise Bcfg2.Server.Plugin.PluginExecutionError
+ host_address = socket.getaddrinfo(metadata.hostname, None)[0][4][0]
+ except socket.error:
+ self.logger.error()
+ raise PluginExecutionError("Failed to find IP address for %s" %
+ metadata.hostname)
host_groups = [grp for grp in metadata.groups
- if os.path.isfile('%s/%s-group.cfg' % (self.data, grp))]
+ if os.path.isfile('%s/%s-group.cfg' %
+ (self.data, grp))] + \
+ [bundle for bundle in metadata.bundles
+ if os.path.isfile('%s/%s-bundle.cfg' %
+ (self.data, bundle))]
host_config = ['define host {',
self.line_fmt % ('host_name', metadata.hostname),
self.line_fmt % ('alias', metadata.hostname),
@@ -56,7 +57,7 @@ class NagiosGen(Bcfg2.Server.Plugin.Plugin,
if host_groups:
host_config.append(self.line_fmt % ("hostgroups",
- ",".join(host_groups)))
+ ",".join(sorted(host_groups))))
# read the config
xtra = dict()
@@ -84,7 +85,8 @@ class NagiosGen(Bcfg2.Server.Plugin.Plugin,
def createserverconfig(self, entry, _):
"""Build monolithic server configuration file."""
host_configs = glob.glob(os.path.join(self.data, '*-host.cfg'))
- group_configs = glob.glob(os.path.join(self.data, '*-group.cfg'))
+ group_configs = glob.glob(os.path.join(self.data, '*-group.cfg')) + \
+ glob.glob(os.path.join(self.data, '*-bundle.cfg'))
host_data = []
group_data = []
for host in host_configs:
diff --git a/src/lib/Bcfg2/Server/Plugins/Ohai.py b/src/lib/Bcfg2/Server/Plugins/Ohai.py
index 0853ea993..461be9ba8 100644
--- a/src/lib/Bcfg2/Server/Plugins/Ohai.py
+++ b/src/lib/Bcfg2/Server/Plugins/Ohai.py
@@ -51,7 +51,7 @@ class OhaiCache(object):
if item not in self.cache:
try:
data = open(self.hostpath(item)).read()
- except:
+ except IOError:
raise KeyError(item)
self.cache[item] = json.loads(data)
return self.cache[item]
@@ -61,7 +61,7 @@ class OhaiCache(object):
del self.cache[item]
try:
os.unlink(self.hostpath(item))
- except:
+ except OSError:
raise IndexError("Could not unlink %s: %s" % (self.hostpath(item),
sys.exc_info()[1]))
@@ -80,11 +80,9 @@ class Ohai(Bcfg2.Server.Plugin.Plugin,
"""The Ohai plugin is used to detect information
about the client operating system.
"""
- name = 'Ohai'
- experimental = True
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ def __init__(self, core):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core)
Bcfg2.Server.Plugin.Probing.__init__(self)
Bcfg2.Server.Plugin.Connector.__init__(self)
self.probe = lxml.etree.Element('probe', name='Ohai', source='Ohai',
diff --git a/src/lib/Bcfg2/Server/Plugins/POSIXCompat.py b/src/lib/Bcfg2/Server/Plugins/POSIXCompat.py
index 71128d64c..cf53c5866 100644
--- a/src/lib/Bcfg2/Server/Plugins/POSIXCompat.py
+++ b/src/lib/Bcfg2/Server/Plugins/POSIXCompat.py
@@ -11,8 +11,8 @@ class POSIXCompat(Bcfg2.Server.Plugin.Plugin,
create = False
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ def __init__(self, core):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core)
Bcfg2.Server.Plugin.GoalValidator.__init__(self)
def validate_goals(self, metadata, goals):
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py b/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py
index 4a78f846f..7de79e2f3 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py
@@ -13,8 +13,7 @@ class AptCollection(Collection):
overrides nothing, and defers all operations to :class:`PacSource`
"""
- def __init__(self, metadata, sources, cachepath, basepath, fam,
- debug=False):
+ def __init__(self, metadata, sources, cachepath, basepath, debug=False):
# we define an __init__ that just calls the parent __init__,
# so that we can set the docstring on __init__ to something
# different from the parent __init__ -- namely, the parent
@@ -22,7 +21,7 @@ class AptCollection(Collection):
# which we use to delineate the actual docs from the
# .. autoattribute hacks we have to do to get private
# attributes included in sphinx 1.0 """
- Collection.__init__(self, metadata, sources, cachepath, basepath, fam,
+ Collection.__init__(self, metadata, sources, cachepath, basepath,
debug=debug)
__init__.__doc__ = Collection.__init__.__doc__.split(".. -----")[0]
@@ -53,10 +52,6 @@ class AptCollection(Collection):
class AptSource(Source):
""" Handle APT sources """
- #: :ref:`server-plugins-generators-packages-magic-groups` for
- #: ``AptSource`` are "apt", "debian", "ubuntu", and "nexenta"
- basegroups = ['apt', 'debian', 'ubuntu', 'nexenta']
-
#: AptSource sets the ``type`` on Package entries to "deb"
ptype = 'deb'
@@ -74,13 +69,11 @@ class AptSource(Source):
else:
return ["%sPackages.gz" % self.rawurl]
- def read_files(self):
+ def read_files(self): # pylint: disable=R0912
bdeps = dict()
+ brecs = dict()
bprov = dict()
self.essentialpkgs = set()
- depfnames = ['Depends', 'Pre-Depends']
- if self.recommended:
- depfnames.append('Recommends')
for fname in self.files:
if not self.rawurl:
barch = [x
@@ -92,10 +85,11 @@ class AptSource(Source):
barch = self.arches[0]
if barch not in bdeps:
bdeps[barch] = dict()
+ brecs[barch] = dict()
bprov[barch] = dict()
try:
reader = gzip.GzipFile(fname)
- except:
+ except IOError:
self.logger.error("Packages: Failed to read file %s" % fname)
raise
for line in reader.readlines():
@@ -106,10 +100,11 @@ class AptSource(Source):
pkgname = words[1].strip().rstrip()
self.pkgnames.add(pkgname)
bdeps[barch][pkgname] = []
+ brecs[barch][pkgname] = []
elif words[0] == 'Essential' and self.essential:
if words[1].strip() == 'yes':
self.essentialpkgs.add(pkgname)
- elif words[0] in depfnames:
+ elif words[0] in ['Depends', 'Pre-Depends', 'Recommends']:
vindex = 0
for dep in words[1].split(','):
if '|' in dep:
@@ -120,17 +115,24 @@ class AptSource(Source):
barch,
vindex)
vindex += 1
- bdeps[barch][pkgname].append(dyn_dname)
+
+ if words[0] == 'Recommends':
+ brecs[barch][pkgname].append(dyn_dname)
+ else:
+ bdeps[barch][pkgname].append(dyn_dname)
bprov[barch][dyn_dname] = set(cdeps)
else:
raw_dep = re.sub(r'\(.*\)', '', dep)
raw_dep = raw_dep.rstrip().strip()
- bdeps[barch][pkgname].append(raw_dep)
+ if words[0] == 'Recommends':
+ brecs[barch][pkgname].append(raw_dep)
+ else:
+ bdeps[barch][pkgname].append(raw_dep)
elif words[0] == 'Provides':
for pkg in words[1].split(','):
dname = pkg.rstrip().strip()
if dname not in bprov[barch]:
bprov[barch][dname] = set()
bprov[barch][dname].add(pkgname)
- self.process_files(bdeps, bprov)
+ self.process_files(bdeps, bprov, brecs)
read_files.__doc__ = Source.read_files.__doc__
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Collection.py b/src/lib/Bcfg2/Server/Plugins/Packages/Collection.py
index 39c51f351..004e27874 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/Collection.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Collection.py
@@ -73,17 +73,17 @@ The Collection Module
---------------------
"""
-import sys
import copy
-import logging
import lxml.etree
+import Bcfg2.Options
import Bcfg2.Server.Plugin
+from Bcfg2.Logger import Debuggable
from Bcfg2.Compat import any, md5 # pylint: disable=W0622
+from Bcfg2.Server.FileMonitor import get_fam
+from Bcfg2.Server.Statistics import track_statistics
-LOGGER = logging.getLogger(__name__)
-
-class Collection(list, Bcfg2.Server.Plugin.Debuggable):
+class Collection(list, Debuggable):
""" ``Collection`` objects represent the set of
:class:`Bcfg2.Server.Plugins.Packages.Source` objects that apply
to a given client, and can be used to query all software
@@ -93,8 +93,7 @@ class Collection(list, Bcfg2.Server.Plugin.Debuggable):
#: Whether or not this Packages backend supports package groups
__package_groups__ = False
- def __init__(self, metadata, sources, cachepath, basepath, fam,
- debug=False):
+ def __init__(self, metadata, sources, cachepath, basepath, debug=False):
"""
:param metadata: The client metadata for this collection
:type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
@@ -111,29 +110,24 @@ class Collection(list, Bcfg2.Server.Plugin.Debuggable):
directory, where more permanent data can be
stored
:type basepath: string
- :param fam: A file monitor object to use if this Collection
- needs to monitor for file activity
- :type fam: Bcfg2.Server.FileMonitor.FileMonitor
:param debug: Enable debugging output
:type debug: bool
.. -----
.. autoattribute:: __package_groups__
"""
- Bcfg2.Server.Plugin.Debuggable.__init__(self)
+ Debuggable.__init__(self)
list.__init__(self, sources)
- self.debug_flag = debug
+ self.debug_flag = self.debug_flag or debug
self.metadata = metadata
self.basepath = basepath
self.cachepath = cachepath
self.virt_pkgs = dict()
- self.fam = fam
+ self.fam = get_fam()
try:
- self.setup = sources[0].setup
self.ptype = sources[0].ptype
except IndexError:
- self.setup = None
self.ptype = "unknown"
@property
@@ -204,19 +198,6 @@ class Collection(list, Bcfg2.Server.Plugin.Debuggable):
return sorted(list(set(groups)))
@property
- def basegroups(self):
- """ Get a list of group names used by this Collection type in
- resolution of
- :ref:`server-plugins-generators-packages-magic-groups`.
-
- The base implementation simply aggregates the results of
- :attr:`Bcfg2.Server.Plugins.Packages.Source.Source.basegroups`."""
- groups = set()
- for source in self:
- groups.update(source.basegroups)
- return list(groups)
-
- @property
def cachefiles(self):
""" A list of the full path to all cachefiles used by this
collection.
@@ -229,7 +210,7 @@ class Collection(list, Bcfg2.Server.Plugin.Debuggable):
cachefiles.add(source.cachefile)
return list(cachefiles)
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def get_groups(self, grouplist):
""" Given a list of package group names, return a dict of
``<group name>: <list of packages>``. This method is provided
@@ -250,7 +231,7 @@ class Collection(list, Bcfg2.Server.Plugin.Debuggable):
rv[group] = self.get_group(group, ptype)
return rv
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def get_group(self, group, ptype=None):
""" Get the list of packages of the given type in a package
group.
@@ -308,7 +289,7 @@ class Collection(list, Bcfg2.Server.Plugin.Debuggable):
return any(source.is_virtual_package(self.metadata, package)
for source in self)
- def get_deps(self, package):
+ def get_deps(self, package, recs=None):
""" Get a list of the dependencies of the given package.
The base implementation simply aggregates the results of
@@ -318,9 +299,14 @@ class Collection(list, Bcfg2.Server.Plugin.Debuggable):
:type package: string
:returns: list of strings, but see :ref:`pkg-objects`
"""
+ recommended = None
+ if recs and package in recs:
+ recommended = recs[package]
+
for source in self:
if source.is_package(self.metadata, package):
- return source.get_deps(self.metadata, package)
+ return source.get_deps(self.metadata, package, recommended)
+
return []
def get_essential(self):
@@ -386,20 +372,6 @@ class Collection(list, Bcfg2.Server.Plugin.Debuggable):
for source in self:
source.filter_unknown(unknown)
- def magic_groups_match(self):
- """ Returns True if the client's
- :ref:`server-plugins-generators-packages-magic-groups` match
- the magic groups for any of the sources contained in this
- Collection.
-
- The base implementation returns True if any source
- :func:`Bcfg2.Server.Plugins.Packages.Source.Source.magic_groups_match`
- returns True.
-
- :returns: bool
- """
- return any(s.magic_groups_match(self.metadata) for s in self)
-
def build_extra_structures(self, independent):
""" Add additional entries to the ``<Independent/>`` section
of the final configuration. This can be used to handle, e.g.,
@@ -476,9 +448,7 @@ class Collection(list, Bcfg2.Server.Plugin.Debuggable):
"""
for pkg in pkglist:
lxml.etree.SubElement(entry, 'BoundPackage', name=pkg,
- version=self.setup.cfp.get("packages",
- "version",
- default="auto"),
+ version=Bcfg2.Options.setup.packages_version,
type=self.ptype, origin='Packages')
def get_new_packages(self, initial, complete):
@@ -499,8 +469,9 @@ class Collection(list, Bcfg2.Server.Plugin.Debuggable):
"""
return list(complete.difference(initial))
- @Bcfg2.Server.Plugin.track_statistics()
- def complete(self, packagelist): # pylint: disable=R0912,R0914
+ @track_statistics()
+ def complete(self, packagelist, # pylint: disable=R0912,R0914
+ recommended=None):
""" Build a complete list of all packages and their dependencies.
:param packagelist: Set of initial packages computed from the
@@ -564,7 +535,7 @@ class Collection(list, Bcfg2.Server.Plugin.Debuggable):
self.debug_log("Packages: handling package requirement %s" %
(current,))
packages.add(current)
- deps = self.get_deps(current)
+ deps = self.get_deps(current, recommended)
newdeps = set(deps).difference(examined)
if newdeps:
self.debug_log("Packages: Package %s added requirements %s"
@@ -630,22 +601,8 @@ def get_collection_class(source_type):
:type source_type: string
:returns: type - the Collection subclass that should be used to
instantiate an object to contain sources of the given type. """
- modname = "Bcfg2.Server.Plugins.Packages.%s" % source_type.title()
- try:
- module = sys.modules[modname]
- except KeyError:
- try:
- module = __import__(modname).Server.Plugins.Packages
- except ImportError:
- msg = "Packages: Unknown source type %s" % source_type
- LOGGER.error(msg)
- raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
-
- try:
- cclass = getattr(module, source_type.title() + "Collection")
- except AttributeError:
- msg = "Packages: No collection class found for %s sources" % \
- source_type
- LOGGER.error(msg)
- raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
- return cclass
+ for mod in Bcfg2.Options.setup.packages_backends:
+ if mod.__name__.endswith(".%s" % source_type.title()):
+ return getattr(mod, "%sCollection" % source_type.title())
+ raise Bcfg2.Server.Plugin.PluginExecutionError(
+ "Packages: No collection class found for %s sources" % source_type)
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Pac.py b/src/lib/Bcfg2/Server/Plugins/Packages/Pac.py
index 99aed5ce5..0e15d2e15 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/Pac.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Pac.py
@@ -12,8 +12,7 @@ class PacCollection(Collection):
overrides nothing, and defers all operations to :class:`PacSource`
"""
- def __init__(self, metadata, sources, cachepath, basepath, fam,
- debug=False):
+ def __init__(self, metadata, sources, cachepath, basepath, debug=False):
# we define an __init__ that just calls the parent __init__,
# so that we can set the docstring on __init__ to something
# different from the parent __init__ -- namely, the parent
@@ -21,7 +20,7 @@ class PacCollection(Collection):
# which we use to delineate the actual docs from the
# .. autoattribute hacks we have to do to get private
# attributes included in sphinx 1.0 """
- Collection.__init__(self, metadata, sources, cachepath, basepath, fam,
+ Collection.__init__(self, metadata, sources, cachepath, basepath,
debug=debug)
__init__.__doc__ = Collection.__init__.__doc__.split(".. -----")[0]
@@ -29,10 +28,6 @@ class PacCollection(Collection):
class PacSource(Source):
""" Handle Pacman sources """
- #: :ref:`server-plugins-generators-packages-magic-groups` for
- #: ``PacSource`` are "arch" and "parabola"
- basegroups = ['arch', 'parabola']
-
#: PacSource sets the ``type`` on Package entries to "pacman"
ptype = 'pacman'
@@ -72,7 +67,7 @@ class PacSource(Source):
try:
self.debug_log("Packages: try to read %s" % fname)
tar = tarfile.open(fname, "r")
- except:
+ except (IOError, tarfile.TarError):
self.logger.error("Packages: Failed to read file %s" % fname)
raise
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py b/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py
index c47e18201..1af046ec0 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py
@@ -4,12 +4,12 @@
import os
import sys
import Bcfg2.Server.Plugin
+from Bcfg2.Server.Statistics import track_statistics
from Bcfg2.Server.Plugins.Packages.Source import SourceInitError
# pylint: disable=E0012,R0924
-class PackagesSources(Bcfg2.Server.Plugin.StructFile,
- Bcfg2.Server.Plugin.Debuggable):
+class PackagesSources(Bcfg2.Server.Plugin.StructFile):
""" PackagesSources handles parsing of the
:mod:`Bcfg2.Server.Plugins.Packages` ``sources.xml`` file, and the
creation of the appropriate
@@ -19,7 +19,7 @@ class PackagesSources(Bcfg2.Server.Plugin.StructFile,
__identifier__ = None
create = "Sources"
- def __init__(self, filename, cachepath, fam, packages, setup):
+ def __init__(self, filename, cachepath, packages):
"""
:param filename: The full path to ``sources.xml``
:type filename: string
@@ -27,21 +27,15 @@ class PackagesSources(Bcfg2.Server.Plugin.StructFile,
:class:`Bcfg2.Server.Plugins.Packages.Source.Source`
data will be cached
:type cachepath: string
- :param fam: The file access monitor to use to create watches
- on ``sources.xml`` and any XIncluded files.
- :type fam: Bcfg2.Server.FileMonitor.FileMonitor
:param packages: The Packages plugin object ``sources.xml`` is
being parsed on behalf of (i.e., the calling
object)
:type packages: Bcfg2.Server.Plugins.Packages.Packages
- :param setup: A Bcfg2 options dict
- :type setup: dict
:raises: :class:`Bcfg2.Server.Plugin.exceptions.PluginInitError` -
If ``sources.xml`` cannot be read
"""
- Bcfg2.Server.Plugin.Debuggable.__init__(self)
- Bcfg2.Server.Plugin.StructFile.__init__(self, filename, fam=fam,
+ Bcfg2.Server.Plugin.StructFile.__init__(self, filename,
should_monitor=True)
#: The full path to the directory where
@@ -57,8 +51,6 @@ class PackagesSources(Bcfg2.Server.Plugin.StructFile,
err = sys.exc_info()[1]
self.logger.error("Could not create Packages cache at %s: %s" %
(self.cachepath, err))
- #: The Bcfg2 options dict
- self.setup = setup
#: The :class:`Bcfg2.Server.Plugins.Packages.Packages` that
#: instantiated this ``PackagesSources`` object
@@ -72,10 +64,9 @@ class PackagesSources(Bcfg2.Server.Plugin.StructFile,
self.parsed = set()
def set_debug(self, debug):
- Bcfg2.Server.Plugin.Debuggable.set_debug(self, debug)
+ Bcfg2.Server.Plugin.StructFile.set_debug(self, debug)
for source in self.entries:
source.set_debug(debug)
- set_debug.__doc__ = Bcfg2.Server.Plugin.Plugin.set_debug.__doc__
def HandleEvent(self, event=None):
""" HandleEvent is called whenever the FAM registers an event.
@@ -106,7 +97,7 @@ class PackagesSources(Bcfg2.Server.Plugin.StructFile,
load its data. """
return sorted(list(self.parsed)) == sorted(self.extras)
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def Index(self):
Bcfg2.Server.Plugin.StructFile.Index(self)
self.entries = []
@@ -120,7 +111,7 @@ class PackagesSources(Bcfg2.Server.Plugin.StructFile,
``Index`` is responsible for calling :func:`source_from_xml`
for each ``Source`` tag in each file. """
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def source_from_xml(self, xsource):
""" Create a
:class:`Bcfg2.Server.Plugins.Packages.Source.Source` subclass
@@ -141,19 +132,17 @@ class PackagesSources(Bcfg2.Server.Plugin.StructFile,
xsource.get("url"))))
return None
- try:
- module = getattr(__import__("Bcfg2.Server.Plugins.Packages.%s" %
- stype.title()).Server.Plugins.Packages,
- stype.title())
- cls = getattr(module, "%sSource" % stype.title())
- except (ImportError, AttributeError):
- err = sys.exc_info()[1]
- self.logger.error("Packages: Unknown source type %s (%s)" % (stype,
- err))
+ cls = None
+ for mod in Bcfg2.Options.setup.packages_backends:
+ if mod.__name__.endswith(".%s" % stype.title()):
+ cls = getattr(mod, "%sSource" % stype.title())
+ break
+ else:
+ self.logger.error("Packages: Unknown source type %s" % stype)
return None
try:
- source = cls(self.cachepath, xsource, self.setup)
+ source = cls(self.cachepath, xsource)
except SourceInitError:
err = sys.exc_info()[1]
self.logger.error("Packages: %s" % err)
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Pkgng.py b/src/lib/Bcfg2/Server/Plugins/Packages/Pkgng.py
new file mode 100644
index 000000000..736cdcdd4
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Pkgng.py
@@ -0,0 +1,86 @@
+""" pkgng backend for :mod:`Bcfg2.Server.Plugins.Packages` """
+
+import lzma
+import tarfile
+
+try:
+ import json
+ # py2.4 json library is structured differently
+ json.loads # pylint: disable=W0104
+except (ImportError, AttributeError):
+ import simplejson as json
+
+from Bcfg2.Server.Plugins.Packages.Collection import Collection
+from Bcfg2.Server.Plugins.Packages.Source import Source
+
+
+class PkgngCollection(Collection):
+ """ Handle collections of pkgng sources. This is a no-op object
+ that simply inherits from
+ :class:`Bcfg2.Server.Plugins.Packages.Collection.Collection`,
+ overrides nothing, and defers all operations to :class:`PacSource`
+ """
+
+ def __init__(self, metadata, sources, cachepath, basepath, debug=False):
+ # we define an __init__ that just calls the parent __init__,
+ # so that we can set the docstring on __init__ to something
+ # different from the parent __init__ -- namely, the parent
+ # __init__ docstring, minus everything after ``.. -----``,
+ # which we use to delineate the actual docs from the
+ # .. autoattribute hacks we have to do to get private
+ # attributes included in sphinx 1.0 """
+ Collection.__init__(self, metadata, sources, cachepath, basepath,
+ debug=debug)
+ __init__.__doc__ = Collection.__init__.__doc__.split(".. -----")[0]
+
+
+class PkgngSource(Source):
+ """ Handle pkgng sources """
+
+ #: PkgngSource sets the ``type`` on Package entries to "pkgng"
+ ptype = 'pkgng'
+
+ @property
+ def urls(self):
+ """ A list of URLs to the base metadata file for each
+ repository described by this source. """
+ if not self.rawurl:
+ rv = []
+ for part in self.components:
+ for arch in self.arches:
+ rv.append("%s/freebsd:%s:%s/%s/packagesite.txz" %
+ (self.url, self.version, arch, part))
+ return rv
+ else:
+ return ["%s/packagesite.txz" % self.rawurl]
+
+ def read_files(self):
+ bdeps = dict()
+ for fname in self.files:
+ if not self.rawurl:
+ abi = [x
+ for x in fname.split('@')
+ if x.startswith('freebsd:')][0][8:]
+ barch = ':'.join(abi.split(':')[1:])
+ else:
+ # RawURL entries assume that they only have one <Arch></Arch>
+ # element and that it is the architecture of the source.
+ barch = self.arches[0]
+ if barch not in bdeps:
+ bdeps[barch] = dict()
+ try:
+ tar = tarfile.open(fileobj=lzma.LZMAFile(fname))
+ reader = tar.extractfile('packagesite.yaml')
+ except (IOError, tarfile.TarError):
+ self.logger.error("Packages: Failed to read file %s" % fname)
+ raise
+ for line in reader.readlines():
+ if not isinstance(line, str):
+ line = line.decode('utf-8')
+ pkg = json.loads(line)
+ pkgname = pkg['name']
+ self.pkgnames.add(pkgname)
+ if 'deps' in pkg:
+ bdeps[barch][pkgname] = pkg['deps'].keys()
+ self.process_files(bdeps, dict())
+ read_files.__doc__ = Source.read_files.__doc__
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Source.py b/src/lib/Bcfg2/Server/Plugins/Packages/Source.py
index 7aa688f12..86f7698f7 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/Source.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Source.py
@@ -27,7 +27,6 @@ in your ``Source`` subclass:
* :func:`Source.urls`
* :func:`Source.read_files`
-* :attr:`Source.basegroups`
Additionally, you may want to consider overriding the following
methods and attributes:
@@ -50,10 +49,11 @@ in your ``Source`` subclass. For an example of this kind of
import os
import re
import sys
-import Bcfg2.Server.Plugin
+from Bcfg2.Logger import Debuggable
from Bcfg2.Compat import HTTPError, HTTPBasicAuthHandler, \
HTTPPasswordMgrWithDefaultRealm, install_opener, build_opener, urlopen, \
cPickle, md5
+from Bcfg2.Server.Statistics import track_statistics
def fetch_url(url):
@@ -92,7 +92,7 @@ class SourceInitError(Exception):
REPO_RE = re.compile(r'(?:pulp/repos/|/RPMS\.|/)([^/]+)/?$')
-class Source(Bcfg2.Server.Plugin.Debuggable): # pylint: disable=R0902
+class Source(Debuggable): # pylint: disable=R0902
""" ``Source`` objects represent a single <Source> tag in
``sources.xml``. Note that a single Source tag can itself
describe multiple repositories (if it uses the "url" attribute
@@ -106,28 +106,21 @@ class Source(Bcfg2.Server.Plugin.Debuggable): # pylint: disable=R0902
those features.
"""
- #: The list of
- #: :ref:`server-plugins-generators-packages-magic-groups` that
- #: make sources of this type available to clients.
- basegroups = []
-
#: The Package type handled by this Source class. The ``type``
#: attribute of Package entries will be set to the value ``ptype``
#: when they are handled by :mod:`Bcfg2.Server.Plugins.Packages`.
ptype = None
- def __init__(self, basepath, xsource, setup): # pylint: disable=R0912
+ def __init__(self, basepath, xsource): # pylint: disable=R0912
"""
:param basepath: The base filesystem path under which cache
data for this source should be stored
:type basepath: string
:param xsource: The XML tag that describes this source
:type source: lxml.etree._Element
- :param setup: A Bcfg2 options dict
- :type setup: dict
:raises: :class:`Bcfg2.Server.Plugins.Packages.Source.SourceInitError`
"""
- Bcfg2.Server.Plugin.Debuggable.__init__(self)
+ Debuggable.__init__(self)
#: The base filesystem path under which cache data for this
#: source should be stored
@@ -136,9 +129,6 @@ class Source(Bcfg2.Server.Plugin.Debuggable): # pylint: disable=R0902
#: The XML tag that describes this source
self.xsource = xsource
- #: A Bcfg2 options dict
- self.setup = setup
-
#: A set of package names that are deemed "essential" by this
#: source
self.essentialpkgs = set()
@@ -230,6 +220,10 @@ class Source(Bcfg2.Server.Plugin.Debuggable): # pylint: disable=R0902
#: :class:`Bcfg2.Server.Plugins.Packages.Collection.Collection`
self.provides = dict()
+ #: A dict of ``<package name>`` -> ``<list of recommended
+ #: symbols>``. This will not necessarily be populated.
+ self.recommends = dict()
+
self._init_attributes(xsource)
#: The file (or directory) used for this source's cache data
@@ -348,8 +342,7 @@ class Source(Bcfg2.Server.Plugin.Debuggable): # pylint: disable=R0902
:return: list of strings - group names
"""
return sorted(list(set([g for g in metadata.groups
- if (g in self.basegroups or
- g in self.groups or
+ if (g in self.groups or
g in self.arches)])))
def load_state(self):
@@ -361,7 +354,7 @@ class Source(Bcfg2.Server.Plugin.Debuggable): # pylint: disable=R0902
:raises: cPickle.UnpicklingError - If the saved data is corrupt """
data = open(self.cachefile, 'rb')
(self.pkgnames, self.deps, self.provides,
- self.essentialpkgs) = cPickle.load(data)
+ self.essentialpkgs, self.recommends) = cPickle.load(data)
def save_state(self):
""" Save state to :attr:`cachefile`. If caching and
@@ -369,15 +362,16 @@ class Source(Bcfg2.Server.Plugin.Debuggable): # pylint: disable=R0902
does not need to be implemented. """
cache = open(self.cachefile, 'wb')
cPickle.dump((self.pkgnames, self.deps, self.provides,
- self.essentialpkgs), cache, 2)
+ self.essentialpkgs, self.recommends), cache, 2)
cache.close()
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def setup_data(self, force_update=False):
- """ Perform all data fetching and setup tasks. For most
- backends, this involves downloading all metadata from the
- repository, parsing it, and caching the parsed data locally.
- The order of operations is:
+ """Perform all data fetching and setup tasks.
+
+ For most backends, this involves downloading all metadata from
+ the repository, parsing it, and caching the parsed data
+ locally. The order of operations is:
#. Call :func:`load_state` to try to load data from the local
cache.
@@ -395,6 +389,15 @@ class Source(Bcfg2.Server.Plugin.Debuggable): # pylint: disable=R0902
upstream repository.
:type force_update: bool
"""
+ # there are a bunch of wildcard except statements here,
+ # because the various functions called herein (``load_state``,
+ # ``read_files``, ``update``) are defined entirely by the
+ # Packages plugins that implement them.
+ #
+ # TODO: we should define an exception subclass that each of
+ # these functions can raise when an *expected* error condition
+ # is encountered.
+ #
# pylint: disable=W0702
if not force_update:
if os.path.exists(self.cachefile):
@@ -569,13 +572,14 @@ class Source(Bcfg2.Server.Plugin.Debuggable): # pylint: disable=R0902
as its final step."""
pass
- def process_files(self, dependencies, provides):
+ def process_files(self, dependencies, # pylint: disable=R0912,W0102
+ provides, recommends=dict()):
""" Given dicts of depends and provides generated by
:func:`read_files`, this generates :attr:`deps` and
:attr:`provides` and calls :func:`save_state` to save the
cached data to disk.
- Both arguments are dicts of dicts of lists. Keys are the
+ All arguments are dicts of dicts of lists. Keys are the
arches of packages contained in this source; values are dicts
whose keys are package names and values are lists of either
dependencies for each package the symbols provided by each
@@ -587,14 +591,20 @@ class Source(Bcfg2.Server.Plugin.Debuggable): # pylint: disable=R0902
:param provides: A dict of symbols provided by packages in
this repository.
:type provides: dict; see above.
+ :param recommends: A dict of recommended dependencies
+ found for this source.
+ :type recommends: dict; see above.
"""
self.deps['global'] = dict()
+ self.recommends['global'] = dict()
self.provides['global'] = dict()
for barch in dependencies:
self.deps[barch] = dict()
+ self.recommends[barch] = dict()
self.provides[barch] = dict()
for pkgname in self.pkgnames:
pset = set()
+ rset = set()
for barch in dependencies:
if pkgname not in dependencies[barch]:
dependencies[barch][pkgname] = []
@@ -604,6 +614,18 @@ class Source(Bcfg2.Server.Plugin.Debuggable): # pylint: disable=R0902
else:
for barch in dependencies:
self.deps[barch][pkgname] = dependencies[barch][pkgname]
+
+ for barch in recommends:
+ if pkgname not in recommends[barch]:
+ recommends[barch][pkgname] = []
+ rset.add(tuple(recommends[barch][pkgname]))
+ if len(rset) == 1:
+ self.recommends['global'][pkgname] = rset.pop()
+ else:
+ for barch in recommends:
+ self.recommends[barch][pkgname] = \
+ recommends[barch][pkgname]
+
provided = set()
for bprovided in list(provides.values()):
provided.update(set(bprovided))
@@ -681,16 +703,15 @@ class Source(Bcfg2.Server.Plugin.Debuggable): # pylint: disable=R0902
def applies(self, metadata):
""" Return true if this source applies to the given client,
- i.e., the client is in all necessary groups and
- :ref:`server-plugins-generators-packages-magic-groups`.
+ i.e., the client is in all necessary groups.
:param metadata: The client metadata to check to see if this
source applies
:type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
:returns: bool
"""
- # check base groups
- if not self.magic_groups_match(metadata):
+ # check arch groups
+ if not self.arch_groups_match(metadata):
return False
# check Group/Client tags from sources.xml
@@ -712,17 +733,24 @@ class Source(Bcfg2.Server.Plugin.Debuggable): # pylint: disable=R0902
"""
return ['global'] + [a for a in self.arches if a in metadata.groups]
- def get_deps(self, metadata, package):
+ def get_deps(self, metadata, package, recommended=None):
""" Get a list of the dependencies of the given package.
:param package: The name of the symbol
:type package: string
:returns: list of strings
"""
+ recs = []
+ if ((recommended is None and self.recommended) or
+ (recommended and recommended.lower() == 'true')):
+ for arch in self.get_arches(metadata):
+ if package in self.recommends[arch]:
+ recs.extend(self.recommends[arch][package])
+
for arch in self.get_arches(metadata):
if package in self.deps[arch]:
- return self.deps[arch][package]
- return []
+ recs.extend(self.deps[arch][package])
+ return recs
def get_provides(self, metadata, package):
""" Get a list of all symbols provided by the given package.
@@ -761,29 +789,13 @@ class Source(Bcfg2.Server.Plugin.Debuggable): # pylint: disable=R0902
"""
return []
- def magic_groups_match(self, metadata):
- """ Returns True if the client's
- :ref:`server-plugins-generators-packages-magic-groups` match
- the magic groups this source. Also returns True if magic
- groups are off in the configuration and the client's
- architecture matches (i.e., architecture groups are *always*
- checked).
+ def arch_groups_match(self, metadata):
+ """ Returns True if the client is in an arch group that
+ matches the arch of this source.
:returns: bool
"""
- found_arch = False
for arch in self.arches:
if arch in metadata.groups:
- found_arch = True
- break
- if not found_arch:
- return False
-
- if not self.setup.cfp.getboolean("packages", "magic_groups",
- default=False):
- return True
- else:
- for group in self.basegroups:
- if group in metadata.groups:
- return True
- return False
+ return True
+ return False
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py b/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py
index b877e7541..14d6db8a0 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py
@@ -60,8 +60,10 @@ import socket
import logging
import lxml.etree
import Bcfg2.Server.Plugin
+import Bcfg2.Server.FileMonitor
from lockfile import FileLock
from Bcfg2.Utils import Executor
+from distutils.spawn import find_executable # pylint: disable=E0611
# pylint: disable=W0622
from Bcfg2.Compat import StringIO, cPickle, HTTPError, URLError, \
ConfigParser, any
@@ -69,6 +71,7 @@ from Bcfg2.Compat import StringIO, cPickle, HTTPError, URLError, \
from Bcfg2.Server.Plugins.Packages.Collection import Collection
from Bcfg2.Server.Plugins.Packages.Source import SourceInitError, Source, \
fetch_url
+from Bcfg2.Server.Statistics import track_statistics
LOGGER = logging.getLogger(__name__)
@@ -107,13 +110,36 @@ PULPSERVER = None
PULPCONFIG = None
-def _setup_pulp(setup):
+options = [ # pylint: disable=C0103
+ Bcfg2.Options.Common.client_timeout,
+ Bcfg2.Options.PathOption(
+ cf=("packages:yum", "helper"), dest="yum_helper",
+ help="Path to the bcfg2-yum-helper executable"),
+ Bcfg2.Options.BooleanOption(
+ cf=("packages:yum", "use_yum_libraries"),
+ help="Use Python yum libraries"),
+ Bcfg2.Options.PathOption(
+ cf=("packages:yum", "gpg_keypath"), default="/etc/pki/rpm-gpg",
+ help="GPG key path on the client"),
+ Bcfg2.Options.Option(
+ cf=("packages:yum", "*"), dest="yum_options",
+ help="Other yum options to include in generated yum configs")]
+if HAS_PULP:
+ options.append(
+ Bcfg2.Options.Option(
+ cf=("packages:pulp", "username"), dest="pulp_username",
+ help="Username for Pulp authentication"))
+ options.append(
+ Bcfg2.Options.Option(
+ cf=("packages:pulp", "password"), dest="pulp_password",
+ help="Password for Pulp authentication"))
+
+
+def _setup_pulp():
""" Connect to a Pulp server and pass authentication credentials.
This only needs to be called once, but multiple calls won't hurt
anything.
- :param setup: A Bcfg2 options dict
- :type setup: dict
:returns: :class:`pulp.client.api.server.PulpServer`
"""
global PULPSERVER, PULPCONFIG
@@ -124,19 +150,6 @@ def _setup_pulp(setup):
raise Bcfg2.Server.Plugin.PluginInitError(msg)
if PULPSERVER is None:
- try:
- username = setup.cfp.get("packages:pulp", "username")
- password = setup.cfp.get("packages:pulp", "password")
- except ConfigParser.NoSectionError:
- msg = "Packages: No [pulp] section found in bcfg2.conf"
- LOGGER.error(msg)
- raise Bcfg2.Server.Plugin.PluginInitError(msg)
- except ConfigParser.NoOptionError:
- msg = "Packages: Required option not found in bcfg2.conf: %s" % \
- sys.exc_info()[1]
- LOGGER.error(msg)
- raise Bcfg2.Server.Plugin.PluginInitError(msg)
-
PULPCONFIG = ConsumerConfig()
serveropts = PULPCONFIG.server
@@ -144,7 +157,9 @@ def _setup_pulp(setup):
int(serveropts['port']),
serveropts['scheme'],
serveropts['path'])
- PULPSERVER.set_basic_auth_credentials(username, password)
+ PULPSERVER.set_basic_auth_credentials(
+ Bcfg2.Options.setup.pulp_username,
+ Bcfg2.Options.setup.pulp_password)
server.set_active_server(PULPSERVER)
return PULPSERVER
@@ -175,7 +190,7 @@ class PulpCertificateSet(Bcfg2.Server.Plugin.EntrySet):
#: The path to certificates on consumer machines
certpath = "/etc/pki/consumer/cert.pem"
- def __init__(self, path, fam):
+ def __init__(self, path):
"""
:param path: The path to the directory where Pulp consumer
certificates will be stored
@@ -193,7 +208,7 @@ class PulpCertificateSet(Bcfg2.Server.Plugin.EntrySet):
important='true',
sensitive='true',
paranoid=self.metadata['paranoid'])
- self.fam = fam
+ self.fam = Bcfg2.Server.FileMonitor.get_fam()
self.fam.AddMonitor(path, self)
def HandleEvent(self, event):
@@ -274,9 +289,8 @@ class YumCollection(Collection):
#: :class:`PulpCertificateSet` object used to handle Pulp certs
pulp_cert_set = None
- def __init__(self, metadata, sources, cachepath, basepath, fam,
- debug=False):
- Collection.__init__(self, metadata, sources, cachepath, basepath, fam,
+ def __init__(self, metadata, sources, cachepath, basepath, debug=False):
+ Collection.__init__(self, metadata, sources, cachepath, basepath,
debug=debug)
self.keypath = os.path.join(self.cachepath, "keys")
@@ -297,13 +311,15 @@ class YumCollection(Collection):
if not os.path.exists(self.cachefile):
self.debug_log("Creating common cache %s" % self.cachefile)
os.mkdir(self.cachefile)
- if not self.disableMetaData:
+ if Bcfg2.Options.setup.packages_metadata:
self.setup_data()
+ self.cmd = Executor()
else:
self.cachefile = None
+ self.cmd = None
if HAS_PULP and self.has_pulp_sources:
- _setup_pulp(self.setup)
+ _setup_pulp()
if self.pulp_cert_set is None:
certdir = os.path.join(
self.basepath,
@@ -319,28 +335,7 @@ class YumCollection(Collection):
self.logger.error("Could not create Pulp consumer "
"cert directory at %s: %s" %
(certdir, err))
- self.__class__.pulp_cert_set = PulpCertificateSet(certdir,
- self.fam)
-
- @property
- def disableMetaData(self): # pylint: disable=C0103
- """ Report whether or not metadata processing is enabled.
- This duplicates code in Packages/__init__.py, and can probably
- be removed in Bcfg2 1.4 when we have a module-level setup
- object. """
- if self.setup is None:
- return True
- try:
- return not self.setup.cfp.getboolean("packages", "resolver")
- except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
- return False
- except ValueError:
- # for historical reasons we also accept "enabled" and
- # "disabled"
- return self.setup.cfp.get(
- "packages",
- "metadata",
- default="enabled").lower() == "disabled"
+ self.__class__.pulp_cert_set = PulpCertificateSet(certdir)
@property
def __package_groups__(self):
@@ -348,24 +343,18 @@ class YumCollection(Collection):
@property
def helper(self):
- """ The full path to :file:`bcfg2-yum-helper`. First, we
- check in the config file to see if it has been explicitly
- specified; next we see if it's in $PATH (which we do by making
- a call to it; I wish there was a way to do this without
- forking, but apparently not); finally we check in /usr/sbin,
- the default location. """
+ """The full path to :file:`bcfg2-yum-helper`. First, we check in the
+ config file to see if it has been explicitly specified; next
+ we see if it's in $PATH; finally we default to /usr/sbin, the
+ default location. """
# pylint: disable=W0212
- if not self.__class__._helper:
- try:
- self.__class__._helper = self.setup.cfp.get("packages:yum",
- "helper")
- except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
+ if not self._helper:
+ self.__class__._helper = Bcfg2.Options.setup.yum_helper
+ if not self.__class__._helper:
# first see if bcfg2-yum-helper is in PATH
- try:
- self.debug_log("Checking for bcfg2-yum-helper in $PATH")
- self.cmd.run(['bcfg2-yum-helper'])
- self.__class__._helper = 'bcfg2-yum-helper'
- except OSError:
+ self.debug_log("Checking for bcfg2-yum-helper in $PATH")
+ self.__class__._helper = find_executable('bcfg2-yum-helper')
+ if not self.__class__._helper:
self.__class__._helper = "/usr/sbin/bcfg2-yum-helper"
return self.__class__._helper
# pylint: enable=W0212
@@ -374,9 +363,7 @@ class YumCollection(Collection):
def use_yum(self):
""" True if we should use the yum Python libraries, False
otherwise """
- return HAS_YUM and self.setup.cfp.getboolean("packages:yum",
- "use_yum_libraries",
- default=False)
+ return HAS_YUM and Bcfg2.Options.setup.use_yum_libraries
@property
def has_pulp_sources(self):
@@ -393,7 +380,7 @@ class YumCollection(Collection):
cachefiles.add(self.cachefile)
return list(cachefiles)
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def write_config(self):
""" Write the server-side config file to :attr:`cfgfile` based
on the data from :func:`get_config`"""
@@ -412,15 +399,15 @@ class YumCollection(Collection):
debuglevel="0",
sslverify="0",
reposdir="/dev/null")
- if self.setup['debug']:
+ if Bcfg2.Options.setup.debug:
mainopts['debuglevel'] = "5"
- elif self.setup['verbose']:
+ elif Bcfg2.Options.setup.verbose:
mainopts['debuglevel'] = "2"
try:
- for opt in self.setup.cfp.options("packages:yum"):
+ for opt, val in Bcfg2.Options.setup.yum_options.items():
if opt not in self.option_blacklist:
- mainopts[opt] = self.setup.cfp.get("packages:yum", opt)
+ mainopts[opt] = val
except ConfigParser.NoSectionError:
pass
@@ -515,7 +502,7 @@ class YumCollection(Collection):
return "# This config was generated automatically by the Bcfg2 " \
"Packages plugin\n\n" + buf.getvalue()
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def build_extra_structures(self, independent):
""" Add additional entries to the ``<Independent/>`` section
of the final configuration. This adds several kinds of
@@ -561,8 +548,7 @@ class YumCollection(Collection):
for key in needkeys:
# figure out the path of the key on the client
- keydir = self.setup.cfp.get("global", "gpg_keypath",
- default="/etc/pki/rpm-gpg")
+ keydir = Bcfg2.Options.setup.gpg_keypath
remotekey = os.path.join(keydir, os.path.basename(key))
localkey = os.path.join(self.keypath, os.path.basename(key))
kdata = open(localkey).read()
@@ -607,7 +593,7 @@ class YumCollection(Collection):
# each pulp source can only have one arch, so we don't
# have to check the arch in url_map
if (source.pulp_id and
- source.pulp_id not in consumer['repoids']):
+ source.pulp_id not in consumer['repoids']):
try:
consumerapi.bind(self.metadata.hostname,
source.pulp_id)
@@ -622,7 +608,7 @@ class YumCollection(Collection):
name=self.pulp_cert_set.certpath)
self.pulp_cert_set.bind_entry(crt, self.metadata)
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def _get_pulp_consumer(self, consumerapi=None):
""" Get a Pulp consumer object for the client.
@@ -645,13 +631,9 @@ class YumCollection(Collection):
err = sys.exc_info()[1]
self.logger.error("Packages: Could not contact Pulp server: %s" %
err)
- except:
- err = sys.exc_info()[1]
- self.logger.error("Packages: Unknown error querying Pulp server: "
- "%s" % err)
return consumer
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def _add_gpg_instances(self, keyentry, localkey, remotekey, keydata=None):
""" Add GPG keys instances to a ``Package`` entry. This is
called from :func:`build_extra_structures` to add GPG keys to
@@ -694,7 +676,7 @@ class YumCollection(Collection):
self.logger.error("Packages: Could not read GPG key %s: %s" %
(localkey, err))
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def get_groups(self, grouplist):
""" If using the yum libraries, given a list of package group
names, return a dict of ``<group name>: <list of packages>``.
@@ -781,8 +763,7 @@ class YumCollection(Collection):
""" Given a package tuple, return a dict of attributes
suitable for applying to either a Package or an Instance
tag """
- attrs = dict(version=self.setup.cfp.get("packages", "version",
- default="auto"))
+ attrs = dict(version=Bcfg2.Options.setup.packages_version)
if attrs['version'] == 'any' or not isinstance(pkgtup, tuple):
return attrs
@@ -871,8 +852,8 @@ class YumCollection(Collection):
new.append(pkg)
return new
- @Bcfg2.Server.Plugin.track_statistics()
- def complete(self, packagelist):
+ @track_statistics()
+ def complete(self, packagelist, recommended=None):
""" Build a complete list of all packages and their dependencies.
When using the Python yum libraries, this defers to the
@@ -890,7 +871,7 @@ class YumCollection(Collection):
resolved.
"""
if not self.use_yum:
- return Collection.complete(self, packagelist)
+ return Collection.complete(self, packagelist, recommended)
lock = FileLock(os.path.join(self.cachefile, "lock"))
slept = 0
@@ -925,7 +906,7 @@ class YumCollection(Collection):
else:
return set(), set()
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def call_helper(self, command, inputdata=None):
""" Make a call to :ref:`bcfg2-yum-helper`. The yum libs have
horrific memory leaks, so apparently the right way to get
@@ -943,22 +924,20 @@ class YumCollection(Collection):
``bcfg2-yum-helper`` command.
"""
cmd = [self.helper, "-c", self.cfgfile]
- if self.setup['verbose']:
+ if Bcfg2.Options.setup.verbose:
cmd.append("-v")
if self.debug_flag:
- if not self.setup['verbose']:
- # ensure that running in debug gets -vv, even if
- # verbose is not enabled
- cmd.append("-v")
- cmd.append("-v")
+ cmd.append("-d")
cmd.append(command)
self.debug_log("Packages: running %s" % " ".join(cmd))
if inputdata:
- result = self.cmd.run(cmd, timeout=self.setup['client_timeout'],
+ result = self.cmd.run(cmd,
+ timeout=Bcfg2.Options.setup.client_timeout,
inputdata=json.dumps(inputdata))
else:
- result = self.cmd.run(cmd, timeout=self.setup['client_timeout'])
+ result = self.cmd.run(cmd,
+ timeout=Bcfg2.Options.setup.client_timeout)
if not result.success:
self.logger.error("Packages: error running bcfg2-yum-helper: %s" %
result.error)
@@ -1021,14 +1000,10 @@ class YumCollection(Collection):
class YumSource(Source):
""" Handle yum sources """
- #: :ref:`server-plugins-generators-packages-magic-groups` for
- #: ``YumSource`` are "yum", "redhat", "centos", and "fedora"
- basegroups = ['yum', 'redhat', 'centos', 'fedora']
-
#: YumSource sets the ``type`` on Package entries to "yum"
ptype = 'yum'
- def __init__(self, basepath, xsource, setup):
+ def __init__(self, basepath, xsource):
self.filemap = dict()
self.file_to_arch = dict()
self.needed_paths = set()
@@ -1037,7 +1012,7 @@ class YumSource(Source):
self.pulp_id = None
self.repo = None
- Source.__init__(self, basepath, xsource, setup)
+ Source.__init__(self, basepath, xsource)
__init__.__doc__ = Source.__init__.__doc__
def _init_attributes(self, xsource):
@@ -1046,7 +1021,7 @@ class YumSource(Source):
if HAS_PULP and xsource.get("pulp_id"):
self.pulp_id = xsource.get("pulp_id")
- _setup_pulp(self.setup)
+ _setup_pulp()
repoapi = RepositoryAPI()
try:
self.repo = repoapi.repository(self.pulp_id)
@@ -1067,10 +1042,6 @@ class YumSource(Source):
err = sys.exc_info()[1]
raise SourceInitError("Could not contact Pulp server: %s" %
err)
- except:
- err = sys.exc_info()[1]
- raise SourceInitError("Unknown error querying Pulp server: %s"
- % err)
self.rawurl = "%s/%s" % (PULPCONFIG.cds['baseurl'],
self.repo['relative_path'])
self.arches = [self.repo['arch']]
@@ -1085,9 +1056,7 @@ class YumSource(Source):
def use_yum(self):
""" True if we should use the yum Python libraries, False
otherwise """
- return HAS_YUM and self.setup.cfp.getboolean("packages:yum",
- "use_yum_libraries",
- default=False)
+ return HAS_YUM and Bcfg2.Options.setup.use_yum_libraries
def save_state(self):
""" If using the builtin yum parser, save state to
@@ -1257,7 +1226,7 @@ class YumSource(Source):
return 0
# pylint: enable=R0911,R0912
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def read_files(self):
""" When using the builtin yum parser, read and parse locally
downloaded metadata files. This diverges from the stock
@@ -1305,7 +1274,7 @@ class YumSource(Source):
self.packages[key].difference(self.packages['global'])
self.save_state()
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def parse_filelist(self, data, arch):
""" parse filelists.xml.gz data """
if arch not in self.filemap:
@@ -1319,7 +1288,7 @@ class YumSource(Source):
self.filemap[arch][fentry.text] = \
set([pkg.get('name')])
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def parse_primary(self, data, arch):
""" parse primary.xml.gz data """
if arch not in self.packages:
@@ -1370,7 +1339,7 @@ class YumSource(Source):
self.provides[arch][prov] = list()
self.provides[arch][prov].append(pkgname)
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def parse_group(self, data):
""" parse comps.xml.gz data """
for group in data.getchildren():
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/YumHelper.py b/src/lib/Bcfg2/Server/Plugins/Packages/YumHelper.py
new file mode 100644
index 000000000..b2e43bde7
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/YumHelper.py
@@ -0,0 +1,399 @@
+""" Libraries for bcfg2-yum-helper plugin, used if yum library support
+is enabled. The yum libs have horrific memory leaks, so apparently
+the right way to get around that in long-running processes it to have
+a short-lived helper. No, seriously -- check out the yum-updatesd
+code. It's pure madness. """
+
+import os
+import sys
+import yum
+import logging
+import Bcfg2.Options
+import Bcfg2.Logger
+from Bcfg2.Compat import wraps
+from lockfile import FileLock, LockTimeout
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+
+def pkg_to_tuple(package):
+ """ json doesn't distinguish between tuples and lists, but yum
+ does, so we convert a package in list format to one in tuple
+ format """
+ if isinstance(package, list):
+ return tuple(package)
+ else:
+ return package
+
+
+def pkgtup_to_string(package):
+ """ given a package tuple, return a human-readable string
+ describing the package """
+ if package[3] in ['auto', 'any']:
+ return package[0]
+
+ rv = [package[0], "-"]
+ if package[2]:
+ rv.extend([package[2], ':'])
+ rv.extend([package[3], '-', package[4]])
+ if package[1]:
+ rv.extend(['.', package[1]])
+ return ''.join(str(e) for e in rv)
+
+
+class YumHelper(object):
+ """ Yum helper base object """
+
+ def __init__(self, cfgfile, verbose=1):
+ self.cfgfile = cfgfile
+ self.yumbase = yum.YumBase()
+ # pylint: disable=E1121,W0212
+ try:
+ self.yumbase.preconf.debuglevel = verbose
+ self.yumbase.preconf.fn = cfgfile
+ self.yumbase._getConfig()
+ except AttributeError:
+ self.yumbase._getConfig(cfgfile, debuglevel=verbose)
+ # pylint: enable=E1121,W0212
+ self.logger = logging.getLogger(self.__class__.__name__)
+
+
+class DepSolver(YumHelper):
+ """ Yum dependency solver. This is used for operations that only
+ read from the yum cache, and thus operates in cacheonly mode. """
+
+ def __init__(self, cfgfile, verbose=1):
+ YumHelper.__init__(self, cfgfile, verbose=verbose)
+ # internally, yum uses an integer, not a boolean, for conf.cache
+ self.yumbase.conf.cache = 1
+ self._groups = None
+
+ def get_groups(self):
+ """ getter for the groups property """
+ if self._groups is not None:
+ return self._groups
+ else:
+ return ["noarch"]
+
+ def set_groups(self, groups):
+ """ setter for the groups property """
+ self._groups = set(groups).union(["noarch"])
+
+ groups = property(get_groups, set_groups)
+
+ def get_package_object(self, pkgtup, silent=False):
+ """ given a package tuple, get a yum package object """
+ try:
+ matches = yum.packageSack.packagesNewestByName(
+ self.yumbase.pkgSack.searchPkgTuple(pkgtup))
+ except yum.Errors.PackageSackError:
+ if not silent:
+ self.logger.warning("Package '%s' not found" %
+ self.get_package_name(pkgtup))
+ matches = []
+ except yum.Errors.RepoError:
+ err = sys.exc_info()[1]
+ self.logger.error("Temporary failure loading metadata for %s: %s" %
+ (self.get_package_name(pkgtup), err))
+ matches = []
+
+ pkgs = self._filter_arch(matches)
+ if pkgs:
+ return pkgs[0]
+ else:
+ return None
+
+ def get_group(self, group, ptype="default"):
+ """ Resolve a package group name into a list of packages """
+ if group.startswith("@"):
+ group = group[1:]
+
+ try:
+ if self.yumbase.comps.has_group(group):
+ group = self.yumbase.comps.return_group(group)
+ else:
+ self.logger.error("%s is not a valid group" % group)
+ return []
+ except yum.Errors.GroupsError:
+ err = sys.exc_info()[1]
+ self.logger.warning(err)
+ return []
+
+ if ptype == "default":
+ return [p
+ for p, d in list(group.default_packages.items())
+ if d]
+ elif ptype == "mandatory":
+ return [p
+ for p, m in list(group.mandatory_packages.items())
+ if m]
+ elif ptype == "optional" or ptype == "all":
+ return group.packages
+ else:
+ self.logger.warning("Unknown group package type '%s'" % ptype)
+ return []
+
+ def _filter_arch(self, packages):
+ """ filter packages in the given list that do not have an
+ architecture in the list of groups for this client """
+ matching = []
+ for pkg in packages:
+ if pkg.arch in self.groups:
+ matching.append(pkg)
+ else:
+ self.logger.debug("%s has non-matching architecture (%s)" %
+ (pkg, pkg.arch))
+ if matching:
+ return matching
+ else:
+ # no packages match architecture; we'll assume that the
+ # user knows what s/he is doing and this is a multiarch
+ # box.
+ return packages
+
+ def get_package_name(self, package):
+ """ get the name of a package or virtual package from the
+ internal representation used by this Collection class """
+ if isinstance(package, tuple):
+ if len(package) == 3:
+ return yum.misc.prco_tuple_to_string(package)
+ else:
+ return pkgtup_to_string(package)
+ else:
+ return str(package)
+
+ def complete(self, packagelist):
+ """ resolve dependencies and generate a complete package list
+ from the given list of initial packages """
+ packages = set()
+ unknown = set()
+ for pkg in packagelist:
+ if isinstance(pkg, tuple):
+ pkgtup = pkg
+ else:
+ pkgtup = (pkg, None, None, None, None)
+ pkgobj = self.get_package_object(pkgtup)
+ if not pkgobj:
+ self.logger.debug("Unknown package %s" %
+ self.get_package_name(pkg))
+ unknown.add(pkg)
+ else:
+ if self.yumbase.tsInfo.exists(pkgtup=pkgobj.pkgtup):
+ self.logger.debug("%s added to transaction multiple times"
+ % pkgobj)
+ else:
+ self.logger.debug("Adding %s to transaction" % pkgobj)
+ self.yumbase.tsInfo.addInstall(pkgobj)
+ self.yumbase.resolveDeps()
+
+ for txmbr in self.yumbase.tsInfo:
+ packages.add(txmbr.pkgtup)
+ return list(packages), list(unknown)
+
+
+def acquire_lock(func):
+ """ decorator for CacheManager methods that gets and release a
+ lock while the method runs """
+ @wraps(func)
+ def inner(self, *args, **kwargs):
+ """ Get and release a lock while running the function this
+ wraps. """
+ self.logger.debug("Acquiring lock at %s" % self.lockfile)
+ while not self.lock.i_am_locking():
+ try:
+ self.lock.acquire(timeout=60) # wait up to 60 seconds
+ except LockTimeout:
+ self.lock.break_lock()
+ self.lock.acquire()
+ try:
+ func(self, *args, **kwargs)
+ finally:
+ self.lock.release()
+ self.logger.debug("Released lock at %s" % self.lockfile)
+
+ return inner
+
+
+class CacheManager(YumHelper):
+ """ Yum cache manager. Unlike :class:`DepSolver`, this can write
+ to the yum cache, and so is used for operations that muck with the
+ cache. (Technically, :func:`CacheManager.clean_cache` could be in
+ either DepSolver or CacheManager, but for consistency I've put it
+ here.) """
+
+ def __init__(self, cfgfile, verbose=1):
+ YumHelper.__init__(self, cfgfile, verbose=verbose)
+ self.lockfile = \
+ os.path.join(os.path.dirname(self.yumbase.conf.config_file_path),
+ "lock")
+ self.lock = FileLock(self.lockfile)
+
+ @acquire_lock
+ def clean_cache(self):
+ """ clean the yum cache """
+ for mdtype in ["Headers", "Packages", "Sqlite", "Metadata",
+ "ExpireCache"]:
+ # for reasons that are entirely obvious, all of the yum
+ # API clean* methods return a tuple of 0 (zero, always
+ # zero) and a list containing a single message about how
+ # many files were deleted. so useful. thanks, yum.
+ msg = getattr(self.yumbase, "clean%s" % mdtype)()[1][0]
+ if not msg.startswith("0 "):
+ self.logger.info(msg)
+
+ @acquire_lock
+ def populate_cache(self):
+ """ populate the yum cache """
+ for repo in self.yumbase.repos.findRepos('*'):
+ repo.metadata_expire = 0
+ repo.mdpolicy = "group:all"
+ self.yumbase.doRepoSetup()
+ self.yumbase.repos.doSetup()
+ for repo in self.yumbase.repos.listEnabled():
+ # this populates the cache as a side effect
+ repo.repoXML # pylint: disable=W0104
+ try:
+ repo.getGroups()
+ except yum.Errors.RepoMDError:
+ pass # this repo has no groups
+ self.yumbase.repos.populateSack(mdtype='metadata', cacheonly=1)
+ self.yumbase.repos.populateSack(mdtype='filelists', cacheonly=1)
+ self.yumbase.repos.populateSack(mdtype='otherdata', cacheonly=1)
+ # this does something with the groups cache as a side effect
+ self.yumbase.comps # pylint: disable=W0104
+
+
+class HelperSubcommand(Bcfg2.Options.Subcommand):
+ """ Base class for all yum helper subcommands """
+
+ # the value to JSON encode and print out if the command fails
+ fallback = None
+
+ # whether or not this command accepts input on stdin
+ accept_input = True
+
+ def __init__(self):
+ Bcfg2.Options.Subcommand.__init__(self)
+ self.verbosity = 0
+ if Bcfg2.Options.setup.debug:
+ self.verbosity = 5
+ elif Bcfg2.Options.setup.verbose:
+ self.verbosity = 1
+
+ def run(self, setup):
+ try:
+ data = json.loads(sys.stdin.read())
+ except ValueError:
+ self.logger.error("Error decoding JSON input: %s" %
+ sys.exc_info()[1])
+ print(json.dumps(self.fallback))
+ return 2
+
+ try:
+ print(json.dumps(self._run(setup, data)))
+ except: # pylint: disable=W0702
+ self.logger.error("Unexpected error running %s: %s" %
+ self.__class__.__name__.lower(),
+ sys.exc_info()[1], exc_info=1)
+ print(json.dumps(self.fallback))
+ return 2
+ return 0
+
+ def _run(self, setup, data):
+ """ Actually run the command """
+ raise NotImplementedError
+
+
+class DepSolverSubcommand(HelperSubcommand): # pylint: disable=W0223
+ """ Base class for helper commands that use the depsolver (i.e.,
+ only resolve dependencies, don't modify the cache) """
+
+ def __init__(self):
+ HelperSubcommand.__init__(self)
+ self.depsolver = DepSolver(Bcfg2.Options.setup.yum_config,
+ self.verbosity)
+
+
+class CacheManagerSubcommand(HelperSubcommand): # pylint: disable=W0223
+ """ Base class for helper commands that use the cachemanager
+ (i.e., modify the cache) """
+ fallback = False
+ accept_input = False
+
+ def __init__(self):
+ HelperSubcommand.__init__(self)
+ self.cachemgr = CacheManager(Bcfg2.Options.setup.yum_config,
+ self.verbosity)
+
+
+class Clean(CacheManagerSubcommand):
+ """ Clean the cache """
+ def _run(self, setup, data): # pylint: disable=W0613
+ self.cachemgr.clean_cache()
+ return True
+
+
+class MakeCache(CacheManagerSubcommand):
+ """ Update the on-disk cache """
+ def _run(self, setup, data): # pylint: disable=W0613
+ self.cachemgr.populate_cache()
+ return True
+
+
+class Complete(DepSolverSubcommand):
+ """ Given an initial set of packages, get a complete set of
+ packages with all dependencies resolved """
+ fallback = dict(packages=[], unknown=[])
+
+ def _run(self, _, data):
+ self.depsolver.groups = data['groups']
+ self.fallback['unknown'] = data['packages']
+ (packages, unknown) = self.depsolver.complete(
+ [pkg_to_tuple(p) for p in data['packages']])
+ return dict(packages=list(packages), unknown=list(unknown))
+
+
+class GetGroups(DepSolverSubcommand):
+ """ Resolve the given package groups """
+ def _run(self, _, data):
+ rv = dict()
+ for gdata in data:
+ if "type" in gdata:
+ packages = self.depsolver.get_group(gdata['group'],
+ ptype=gdata['type'])
+ else:
+ packages = self.depsolver.get_group(gdata['group'])
+ rv[gdata['group']] = list(packages)
+ return rv
+
+
+Get_Groups = GetGroups # pylint: disable=C0103
+
+
+class CLI(Bcfg2.Options.CommandRegistry):
+ """ The bcfg2-yum-helper CLI """
+ options = [
+ Bcfg2.Options.PathOption(
+ "-c", "--yum-config", help="Yum config file"),
+ Bcfg2.Options.PositionalArgument(
+ "command", help="Yum helper command",
+ choices=['clean', 'complete', 'get_groups'])]
+
+ def __init__(self):
+ Bcfg2.Options.CommandRegistry.__init__(self)
+ self.register_commands(globals().values(), parent=HelperSubcommand)
+ parser = Bcfg2.Options.get_parser("Bcfg2 yum helper",
+ components=[self])
+ parser.add_options(self.subcommand_options)
+ parser.parse()
+ self.logger = logging.getLogger(parser.prog)
+
+ def run(self):
+ """ Run bcfg2-yum-helper """
+ if not os.path.exists(Bcfg2.Options.setup.yum_config):
+ self.logger.error("Config file %s not found" %
+ Bcfg2.Options.setup.yum_config)
+ return 1
+ return self.runcommand()
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py b/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py
index 4b58c0fdb..3aa5c415f 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py
@@ -7,20 +7,33 @@ import sys
import glob
import shutil
import lxml.etree
-import Bcfg2.Logger
+import Bcfg2.Options
+import Bcfg2.Server.Cache
import Bcfg2.Server.Plugin
-from Bcfg2.Compat import ConfigParser, urlopen, HTTPError, URLError, \
- MutableMapping
+from Bcfg2.Compat import urlopen, HTTPError, URLError, MutableMapping
from Bcfg2.Server.Plugins.Packages.Collection import Collection, \
get_collection_class
from Bcfg2.Server.Plugins.Packages.PackagesSources import PackagesSources
+from Bcfg2.Server.Statistics import track_statistics
-#: The default path for generated yum configs
-YUM_CONFIG_DEFAULT = "/etc/yum.repos.d/bcfg2.repo"
-#: The default path for generated apt configs
-APT_CONFIG_DEFAULT = \
- "/etc/apt/sources.list.d/bcfg2-packages-generated-sources.list"
+def packages_boolean(value):
+ """ For historical reasons, the Packages booleans 'resolver' and
+ 'metadata' both accept "enabled" in addition to the normal boolean
+ values. """
+ if value == 'disabled':
+ return False
+ elif value == 'enabled':
+ return True
+ else:
+ return value
+
+
+class PackagesBackendAction(Bcfg2.Options.ComponentAction):
+ """ ComponentAction to load Packages backends """
+ bases = ['Bcfg2.Server.Plugins.Packages']
+ module = True
+ fail_silently = True
class OnDemandDict(MutableMapping):
@@ -70,7 +83,6 @@ class OnDemandDict(MutableMapping):
class Packages(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Caching,
Bcfg2.Server.Plugin.StructureValidator,
Bcfg2.Server.Plugin.Generator,
Bcfg2.Server.Plugin.Connector,
@@ -85,6 +97,39 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
.. private-include: _build_packages"""
+ options = [
+ Bcfg2.Options.Option(
+ cf=("packages", "backends"), dest="packages_backends",
+ help="Packages backends to load",
+ type=Bcfg2.Options.Types.comma_list,
+ action=PackagesBackendAction,
+ default=['Yum', 'Apt', 'Pac', 'Pkgng']),
+ Bcfg2.Options.PathOption(
+ cf=("packages", "cache"), dest="packages_cache",
+ help="Path to the Packages cache",
+ default='<repository>/Packages/cache'),
+ Bcfg2.Options.Option(
+ cf=("packages", "resolver"), dest="packages_resolver",
+ help="Disable the Packages resolver",
+ type=packages_boolean, default=True),
+ Bcfg2.Options.Option(
+ cf=("packages", "metadata"), dest="packages_metadata",
+ help="Disable all Packages metadata processing",
+ type=packages_boolean, default=True),
+ Bcfg2.Options.Option(
+ cf=("packages", "version"), dest="packages_version",
+ help="Set default Package entry version", default="auto",
+ choices=["auto", "any"]),
+ Bcfg2.Options.PathOption(
+ cf=("packages", "yum_config"),
+ help="The default path for generated yum configs",
+ default="/etc/yum.repos.d/bcfg2.repo"),
+ Bcfg2.Options.PathOption(
+ cf=("packages", "apt_config"),
+ help="The default path for generated apt configs",
+ default="/etc/apt/sources.list.d/"
+ "bcfg2-packages-generated-sources.list")]
+
#: Packages is an alternative to
#: :mod:`Bcfg2.Server.Plugins.Pkgmgr` and conflicts with it.
conflicts = ['Pkgmgr']
@@ -93,12 +138,8 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
#: and :func:`Reload`
__rmi__ = Bcfg2.Server.Plugin.Plugin.__rmi__ + ['Refresh', 'Reload']
- __child_rmi__ = Bcfg2.Server.Plugin.Plugin.__child_rmi__ + \
- [('Refresh', 'expire_cache'), ('Reload', 'expire_cache')]
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Caching.__init__(self)
+ def __init__(self, core):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core)
Bcfg2.Server.Plugin.StructureValidator.__init__(self)
Bcfg2.Server.Plugin.Generator.__init__(self)
Bcfg2.Server.Plugin.Connector.__init__(self)
@@ -107,9 +148,7 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
#: Packages does a potentially tremendous amount of on-disk
#: caching. ``cachepath`` holds the base directory to where
#: data should be cached.
- self.cachepath = \
- self.core.setup.cfp.get("packages", "cache",
- default=os.path.join(self.data, 'cache'))
+ self.cachepath = Bcfg2.Options.setup.packages_cache
#: Where Packages should store downloaded GPG key files
self.keypath = os.path.join(self.cachepath, 'keys')
@@ -117,14 +156,6 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
# create key directory if needed
os.makedirs(self.keypath)
- # warn about deprecated magic groups
- if self.core.setup.cfp.getboolean("packages", "magic_groups",
- default=False):
- self.logger.warning("Packages: Magic groups are deprecated and "
- "will be removed in a future release")
- self.logger.warning("You can disable magic groups by setting "
- "magic_groups=0 in [packages] in bcfg2.conf")
-
# pylint: disable=C0301
#: The
#: :class:`Bcfg2.Server.Plugins.Packages.PackagesSources.PackagesSources`
@@ -132,8 +163,7 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
#: :class:`Bcfg2.Server.Plugins.Packages.Source.Source` objects for
#: this plugin.
self.sources = PackagesSources(os.path.join(self.data, "sources.xml"),
- self.cachepath, core.fam, self,
- self.core.setup)
+ self.cachepath, self)
#: We cache
#: :class:`Bcfg2.Server.Plugins.Packages.Collection.Collection`
@@ -153,7 +183,7 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
#: :attr:`Bcfg2.Server.Plugins.Packages.Collection.Collection.cachekey`,
#: a unique key identifying the collection by its *config*,
#: which could be shared among multiple clients.
- self.collections = dict()
+ self.collections = Bcfg2.Server.Cache.Cache("Packages", "collections")
#: clients is a cache mapping of hostname ->
#: :attr:`Bcfg2.Server.Plugins.Packages.Collection.Collection.cachekey`
@@ -161,21 +191,8 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
#: :class:`Bcfg2.Server.Plugins.Packages.Collection.Collection`
#: object when one is requested, so each entry is very
#: short-lived -- it's purged at the end of each client run.
- self.clients = dict()
-
- #: groupcache caches group lookups. It maps Collections (via
- #: :attr:`Bcfg2.Server.Plugins.Packages.Collection.Collection.cachekey`)
- #: to sets of package groups, and thence to the packages
- #: indicated by those groups.
- self.groupcache = dict()
-
- #: pkgcache caches complete package sets. It maps Collections
- #: (via
- #: :attr:`Bcfg2.Server.Plugins.Packages.Collection.Collection.cachekey`)
- #: to sets of initial packages, and thence to the final
- #: (complete) package selections resolved from the initial
- #: packages
- self.pkgcache = dict()
+ self.clients = Bcfg2.Server.Cache.Cache("Packages", "cache")
+
# pylint: enable=C0301
__init__.__doc__ = Bcfg2.Server.Plugin.Plugin.__init__.__doc__
@@ -187,48 +204,6 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
return rv
set_debug.__doc__ = Bcfg2.Server.Plugin.Plugin.set_debug.__doc__
- @property
- def disableResolver(self): # pylint: disable=C0103
- """ Report the state of the resolver. This can be disabled in
- the configuration. Note that disabling metadata (see
- :attr:`disableMetaData`) implies disabling the resolver.
-
- This property cannot be set. """
- if self.disableMetaData:
- # disabling metadata without disabling the resolver Breaks
- # Things
- return True
- try:
- return not self.core.setup.cfp.getboolean("packages", "resolver")
- except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
- return False
- except ValueError:
- # for historical reasons we also accept "enabled" and
- # "disabled", which are not handled according to the
- # Python docs but appear to be handled properly by
- # ConfigParser in at least some versions
- return self.core.setup.cfp.get(
- "packages",
- "resolver",
- default="enabled").lower() == "disabled"
-
- @property
- def disableMetaData(self): # pylint: disable=C0103
- """ Report whether or not metadata processing is enabled.
-
- This property cannot be set. """
- try:
- return not self.core.setup.cfp.getboolean("packages", "resolver")
- except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
- return False
- except ValueError:
- # for historical reasons we also accept "enabled" and
- # "disabled"
- return self.core.setup.cfp.get(
- "packages",
- "metadata",
- default="enabled").lower() == "disabled"
-
def create_config(self, entry, metadata):
""" Create yum/apt config for the specified client.
@@ -276,9 +251,7 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
"""
if entry.tag == 'Package':
collection = self.get_collection(metadata)
- entry.set('version', self.core.setup.cfp.get("packages",
- "version",
- default="auto"))
+ entry.set('version', Bcfg2.Options.setup.packages_version)
entry.set('type', collection.ptype)
elif entry.tag == 'Path':
self.create_config(entry, metadata)
@@ -304,27 +277,15 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
:raises: :class:`Bcfg2.Server.Plugin.exceptions.PluginExecutionError`
"""
if entry.tag == 'Package':
- if self.core.setup.cfp.getboolean("packages", "magic_groups",
- default=False):
- collection = self.get_collection(metadata)
- if collection.magic_groups_match():
- return True
- else:
- return True
+ return True
elif entry.tag == 'Path':
# managed entries for yum/apt configs
- if (entry.get("name") ==
- self.core.setup.cfp.get("packages",
- "yum_config",
- default=YUM_CONFIG_DEFAULT) or
- entry.get("name") ==
- self.core.setup.cfp.get("packages",
- "apt_config",
- default=APT_CONFIG_DEFAULT)):
+ if entry.get("name") in [Bcfg2.Options.setup.apt_config,
+ Bcfg2.Options.setup.yum_config]:
return True
return False
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def validate_structures(self, metadata, structures):
""" Do the real work of Packages. This does two things:
@@ -353,15 +314,15 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
:returns: None
"""
collection = self.get_collection(metadata)
- indep = lxml.etree.Element('Independent')
+ indep = lxml.etree.Element('Independent', name=self.__class__.__name__)
self._build_packages(metadata, indep, structures,
collection=collection)
collection.build_extra_structures(indep)
structures.append(indep)
- @Bcfg2.Server.Plugin.track_statistics()
- def _build_packages(self, metadata, independent, structures,
- collection=None):
+ @track_statistics()
+ def _build_packages(self, metadata, independent, # pylint: disable=R0914
+ structures, collection=None):
""" Perform dependency resolution and build the complete list
of packages that need to be included in the specification by
:func:`validate_structures`, based on the initial list of
@@ -382,8 +343,10 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
:func:`get_collection`
:type collection: Bcfg2.Server.Plugins.Packages.Collection.Collection
"""
- if self.disableResolver:
- # Config requests no resolver
+ if (not Bcfg2.Options.setup.packages_metadata or
+ not Bcfg2.Options.setup.packages_resolver):
+ # Config requests no resolver. Note that disabling
+ # metadata implies disabling the resolver.
for struct in structures:
for pkg in struct.xpath('//Package | //BoundPackage'):
if pkg.get("group"):
@@ -396,10 +359,15 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
initial = set()
to_remove = []
groups = []
+ recommended = dict()
+
for struct in structures:
for pkg in struct.xpath('//Package | //BoundPackage'):
if pkg.get("name"):
initial.update(collection.packages_from_entry(pkg))
+
+ if pkg.get("recommended"):
+ recommended[pkg.get("name")] = pkg.get("recommended")
elif pkg.get("group"):
groups.append((pkg.get("group"),
pkg.get("type")))
@@ -422,11 +390,12 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
groups.sort()
# check for this set of groups in the group cache
+ gcache = Bcfg2.Server.Cache.Cache("Packages", "pkg_groups",
+ collection.cachekey)
gkey = hash(tuple(groups))
- if gkey not in self.groupcache[collection.cachekey]:
- self.groupcache[collection.cachekey][gkey] = \
- collection.get_groups(groups)
- for pkgs in self.groupcache[collection.cachekey][gkey].values():
+ if gkey not in gcache:
+ gcache[gkey] = collection.get_groups(groups)
+ for pkgs in gcache[gkey].values():
base.update(pkgs)
# essential pkgs are those marked as such by the distribution
@@ -434,10 +403,11 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
# check for this set of packages in the package cache
pkey = hash(tuple(base))
- if pkey not in self.pkgcache[collection.cachekey]:
- self.pkgcache[collection.cachekey][pkey] = \
- collection.complete(base)
- packages, unknown = self.pkgcache[collection.cachekey][pkey]
+ pcache = Bcfg2.Server.Cache.Cache("Packages", "pkg_sets",
+ collection.cachekey)
+ if pkey not in pcache:
+ pcache[pkey] = collection.complete(base, recommended)
+ packages, unknown = pcache[pkey]
if unknown:
self.logger.info("Packages: Got %d unknown entries" % len(unknown))
self.logger.info("Packages: %s" % list(unknown))
@@ -447,7 +417,7 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
newpkgs.sort()
collection.packages_to_entry(newpkgs, independent)
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def Refresh(self):
""" Packages.Refresh() => True|False
@@ -455,7 +425,7 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
self._load_config(force_update=True)
return True
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def Reload(self):
""" Packages.Refresh() => True|False
@@ -463,7 +433,8 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
self._load_config()
return True
- def expire_cache(self, _=None):
+ def child_reload(self, _=None):
+ """ Reload the Packages configuration on a child process. """
self.Reload()
def _load_config(self, force_update=False):
@@ -490,18 +461,15 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
for collection in list(self.collections.values()):
cachefiles.update(collection.cachefiles)
- if not self.disableMetaData:
+ if Bcfg2.Options.setup.packages_metadata:
collection.setup_data(force_update)
# clear Collection and package caches
- self.clients = dict()
- self.collections = dict()
- self.groupcache = dict()
- self.pkgcache = dict()
+ Bcfg2.Server.Cache.expire("Packages")
for source in self.sources.entries:
cachefiles.add(source.cachefile)
- if not self.disableMetaData:
+ if Bcfg2.Options.setup.packages_metadata:
source.setup_data(force_update)
for cfile in glob.glob(os.path.join(self.cachepath, "cache-*")):
@@ -533,7 +501,7 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
if localfile not in keyfiles:
keyfiles.append(localfile)
if ((force_update and key not in keys) or
- not os.path.exists(localfile)):
+ not os.path.exists(localfile)):
self.logger.info("Packages: Downloading and parsing %s" %
key)
try:
@@ -547,16 +515,12 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
err = sys.exc_info()[1]
self.logger.error("Packages: Error writing %s to %s: "
"%s" % (key, localfile, err))
- except:
- err = sys.exc_info()[1]
- self.logger.error("Packages: Unknown error fetching "
- "%s: %s" % (key, err))
for kfile in glob.glob(os.path.join(self.keypath, "*")):
if kfile not in keyfiles:
os.unlink(kfile)
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def get_collection(self, metadata):
""" Get a
:class:`Bcfg2.Server.Plugins.Packages.Collection.Collection`
@@ -573,12 +537,7 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
if not self.sources.loaded:
# if sources.xml has not received a FAM event yet, defer;
# instantiate a dummy Collection object
- collection = Collection(metadata, [], self.cachepath, self.data,
- self.core.fam)
- ckey = collection.cachekey
- self.groupcache.setdefault(ckey, dict())
- self.pkgcache.setdefault(ckey, dict())
- return collection
+ return Collection(metadata, [], self.cachepath, self.data)
if metadata.hostname in self.clients:
return self.collections[self.clients[metadata.hostname]]
@@ -610,13 +569,11 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
"for %s" % (cclass.__name__, metadata.hostname))
collection = cclass(metadata, relevant, self.cachepath, self.data,
- self.core.fam, debug=self.debug_flag)
+ debug=self.debug_flag)
ckey = collection.cachekey
if cclass != Collection:
self.clients[metadata.hostname] = ckey
self.collections[ckey] = collection
- self.groupcache.setdefault(ckey, dict())
- self.pkgcache.setdefault(ckey, dict())
return collection
def get_additional_data(self, metadata):
@@ -665,8 +622,7 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
:param metadata: The client metadata
:type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
"""
- if metadata.hostname in self.clients:
- del self.clients[metadata.hostname]
+ self.clients.expire(metadata.hostname)
def end_statistics(self, metadata):
""" Hook to clear the cache for this client in :attr:`clients`
diff --git a/src/lib/Bcfg2/Server/Plugins/Pkgmgr.py b/src/lib/Bcfg2/Server/Plugins/Pkgmgr.py
index a1dcb575f..7c6ab0ed7 100644
--- a/src/lib/Bcfg2/Server/Plugins/Pkgmgr.py
+++ b/src/lib/Bcfg2/Server/Plugins/Pkgmgr.py
@@ -1,24 +1,18 @@
'''This module implements a package management scheme for all images'''
-import os
import re
-import glob
+import sys
import logging
import lxml.etree
import Bcfg2.Server.Plugin
-import Bcfg2.Server.Lint
+from Bcfg2.Server.Plugin import PluginExecutionError
-try:
- set
-except NameError:
- # deprecated since python 2.6
- from sets import Set as set
logger = logging.getLogger('Bcfg2.Plugins.Pkgmgr')
class FuzzyDict(dict):
- fuzzy = re.compile('(?P<name>.*):(?P<alist>\S+(,\S+)*)')
+ fuzzy = re.compile(r'(?P<name>.*):(?P<alist>\S+(,\S+)*)')
def __getitem__(self, key):
if isinstance(key, str):
@@ -41,101 +35,219 @@ class FuzzyDict(dict):
def get(self, key, default=None):
try:
return self.__getitem__(key)
- except:
+ except KeyError:
if default:
return default
raise
-class PNode(Bcfg2.Server.Plugin.INode):
+class PNode(object):
"""PNode has a list of packages available at a
particular group intersection.
"""
- splitters = {'rpm': re.compile('^(.*/)?(?P<name>[\w\+\d\.]+(-[\w\+\d\.]+)*)-' + \
- '(?P<version>[\w\d\.]+-([\w\d\.]+))\.(?P<arch>\S+)\.rpm$'),
- 'encap': re.compile('^(?P<name>[\w-]+)-(?P<version>[\w\d\.+-]+).encap.*$')}
+ splitters = dict(
+ rpm=re.compile(
+ r'^(.*/)?(?P<name>[\w\+\d\.]+(-[\w\+\d\.]+)*)-' +
+ r'(?P<version>[\w\d\.]+-([\w\d\.]+))\.(?P<arch>\S+)\.rpm$'),
+ encap=re.compile(
+ r'^(?P<name>[\w-]+)-(?P<version>[\w\d\.+-]+).encap.*$'))
+ raw = dict(
+ Client="lambda m, e:'%(name)s' == m.hostname and predicate(m, e)",
+ Group="lambda m, e:'%(name)s' in m.groups and predicate(m, e)")
+ nraw = dict(
+ Client="lambda m, e:'%(name)s' != m.hostname and predicate(m, e)",
+ Group="lambda m, e:'%(name)s' not in m.groups and predicate(m, e)")
+ containers = ['Group', 'Client']
ignore = ['Package']
- def Match(self, metadata, data, entry=lxml.etree.Element("None")):
- """Return a dictionary of package mappings."""
- if self.predicate(metadata, entry):
- for key in self.contents:
- try:
- data[key].update(self.contents[key])
- except:
- data[key] = FuzzyDict()
- data[key].update(self.contents[key])
- for child in self.children:
- child.Match(metadata, data)
-
def __init__(self, data, pdict, parent=None):
# copy local attributes to all child nodes if no local attribute exists
if 'Package' not in pdict:
pdict['Package'] = set()
for child in data.getchildren():
- attrs = set(data.attrib.keys()).difference(child.attrib.keys() + ['name'])
+ attrs = set(data.attrib.keys()).difference(
+ child.attrib.keys() + ['name'])
for attr in attrs:
try:
child.set(attr, data.get(attr))
except:
- # don't fail on things like comments and other immutable elements
+ # don't fail on things like comments and other
+ # immutable elements
pass
- Bcfg2.Server.Plugin.INode.__init__(self, data, pdict, parent)
+ self.data = data
+ self.contents = {}
+ if parent is None:
+ self.predicate = lambda m, e: True
+ else:
+ predicate = parent.predicate
+ if data.get('negate', 'false').lower() == 'true':
+ psrc = self.nraw
+ else:
+ psrc = self.raw
+ if data.tag in list(psrc.keys()):
+ self.predicate = eval(psrc[data.tag] %
+ {'name': data.get('name')},
+ {'predicate': predicate})
+ else:
+ raise PluginExecutionError("Unknown tag: %s" % data.tag)
+ self.children = []
+ self._load_children(data, pdict)
+
if 'Package' not in self.contents:
self.contents['Package'] = FuzzyDict()
for pkg in data.findall('./Package'):
- if 'name' in pkg.attrib and pkg.get('name') not in pdict['Package']:
+ if ('name' in pkg.attrib and
+ pkg.get('name') not in pdict['Package']):
pdict['Package'].add(pkg.get('name'))
- if pkg.get('name') != None:
+ if pkg.get('name') is not None:
self.contents['Package'][pkg.get('name')] = {}
if pkg.getchildren():
self.contents['Package'][pkg.get('name')]['__children__'] \
- = pkg.getchildren()
+ = pkg.getchildren()
if 'simplefile' in pkg.attrib:
- pkg.set('url', "%s/%s" % (pkg.get('uri'), pkg.get('simplefile')))
+ pkg.set('url',
+ "%s/%s" % (pkg.get('uri'), pkg.get('simplefile')))
self.contents['Package'][pkg.get('name')].update(pkg.attrib)
else:
if 'file' in pkg.attrib:
if 'multiarch' in pkg.attrib:
archs = pkg.get('multiarch').split()
srcs = pkg.get('srcs', pkg.get('multiarch')).split()
- url = ' '.join(["%s/%s" % (pkg.get('uri'),
- pkg.get('file') % {'src':srcs[idx],
- 'arch':archs[idx]})
- for idx in range(len(archs))])
+ url = ' '.join(
+ ["%s/%s" % (pkg.get('uri'),
+ pkg.get('file') % {'src': srcs[idx],
+ 'arch': archs[idx]})
+ for idx in range(len(archs))])
pkg.set('url', url)
else:
pkg.set('url', '%s/%s' % (pkg.get('uri'),
pkg.get('file')))
- if pkg.get('type') in self.splitters and pkg.get('file') != None:
- mdata = self.splitters[pkg.get('type')].match(pkg.get('file'))
+ if (pkg.get('type') in self.splitters and
+ pkg.get('file') is not None):
+ mdata = \
+ self.splitters[pkg.get('type')].match(pkg.get('file'))
if not mdata:
- logger.error("Failed to match pkg %s" % pkg.get('file'))
+ logger.error("Failed to match pkg %s" %
+ pkg.get('file'))
continue
pkgname = mdata.group('name')
self.contents['Package'][pkgname] = mdata.groupdict()
self.contents['Package'][pkgname].update(pkg.attrib)
if pkg.attrib.get('file'):
- self.contents['Package'][pkgname]['url'] = pkg.get('url')
- self.contents['Package'][pkgname]['type'] = pkg.get('type')
+ self.contents['Package'][pkgname]['url'] = \
+ pkg.get('url')
+ self.contents['Package'][pkgname]['type'] = \
+ pkg.get('type')
if pkg.get('verify'):
- self.contents['Package'][pkgname]['verify'] = pkg.get('verify')
+ self.contents['Package'][pkgname]['verify'] = \
+ pkg.get('verify')
if pkg.get('multiarch'):
- self.contents['Package'][pkgname]['multiarch'] = pkg.get('multiarch')
+ self.contents['Package'][pkgname]['multiarch'] = \
+ pkg.get('multiarch')
if pkgname not in pdict['Package']:
pdict['Package'].add(pkgname)
if pkg.getchildren():
- self.contents['Package'][pkgname]['__children__'] = pkg.getchildren()
+ self.contents['Package'][pkgname]['__children__'] = \
+ pkg.getchildren()
else:
- self.contents['Package'][pkg.get('name')].update(pkg.attrib)
+ self.contents['Package'][pkg.get('name')].update(
+ pkg.attrib)
+ def _load_children(self, data, idict):
+ """ load children """
+ for item in data.getchildren():
+ if item.tag in self.ignore:
+ continue
+ elif item.tag in self.containers:
+ self.children.append(self.__class__(item, idict, self))
+ else:
+ try:
+ self.contents[item.tag][item.get('name')] = \
+ dict(item.attrib)
+ except KeyError:
+ self.contents[item.tag] = \
+ {item.get('name'): dict(item.attrib)}
+ if item.text:
+ self.contents[item.tag][item.get('name')]['__text__'] = \
+ item.text
+ if item.getchildren():
+ self.contents[item.tag][item.get('name')]['__children__'] \
+ = item.getchildren()
+ try:
+ idict[item.tag].append(item.get('name'))
+ except KeyError:
+ idict[item.tag] = [item.get('name')]
-class PkgSrc(Bcfg2.Server.Plugin.XMLSrc):
- """PkgSrc files contain a PNode hierarchy that
- returns matching package entries.
- """
+ def Match(self, metadata, data, entry=lxml.etree.Element("None")):
+ """Return a dictionary of package mappings."""
+ if self.predicate(metadata, entry):
+ for key in self.contents:
+ data.setdefault(key, FuzzyDict).update(self.contents[key])
+ for child in self.children:
+ child.Match(metadata, data)
+
+
+class PkgSrc(Bcfg2.Server.Plugin.XMLFileBacked):
+ """ XMLSrc files contain a
+ :class:`Bcfg2.Server.Plugin.helpers.INode` hierarchy that returns
+ matching entries. XMLSrc objects are deprecated and
+ :class:`Bcfg2.Server.Plugin.helpers.StructFile` should be
+ preferred where possible."""
__node__ = PNode
__cacheobj__ = FuzzyDict
+ __priority_required__ = True
+
+ def __init__(self, filename, should_monitor=False):
+ Bcfg2.Server.Plugin.XMLFileBacked.__init__(self, filename,
+ should_monitor)
+ self.items = {}
+ self.cache = None
+ self.pnode = None
+ self.priority = -1
+
+ def HandleEvent(self, _=None):
+ """Read file upon update."""
+ try:
+ data = open(self.name).read()
+ except IOError:
+ msg = "Failed to read file %s: %s" % (self.name, sys.exc_info()[1])
+ logger.error(msg)
+ raise PluginExecutionError(msg)
+ self.items = {}
+ try:
+ xdata = lxml.etree.XML(data, parser=Bcfg2.Server.XMLParser)
+ except lxml.etree.XMLSyntaxError:
+ msg = "Failed to parse file %s: %s" % (self.name,
+ sys.exc_info()[1])
+ logger.error(msg)
+ raise PluginExecutionError(msg)
+ self.pnode = self.__node__(xdata, self.items)
+ self.cache = None
+ try:
+ self.priority = int(xdata.get('priority'))
+ except (ValueError, TypeError):
+ if self.__priority_required__:
+ msg = "Got bogus priority %s for file %s" % \
+ (xdata.get('priority'), self.name)
+ logger.error(msg)
+ raise PluginExecutionError(msg)
+
+ del xdata, data
+
+ def Cache(self, metadata):
+ """Build a package dict for a given host."""
+ if self.cache is None or self.cache[0] != metadata:
+ cache = (metadata, self.__cacheobj__())
+ if self.pnode is None:
+ logger.error("Cache method called early for %s; "
+ "forcing data load" % self.name)
+ self.HandleEvent()
+ return
+ self.pnode.Match(metadata, cache[1])
+ self.cache = cache
+
+ def __str__(self):
+ return str(self.items)
class Pkgmgr(Bcfg2.Server.Plugin.PrioDir):
@@ -165,53 +277,14 @@ class Pkgmgr(Bcfg2.Server.Plugin.PrioDir):
mdata = FuzzyDict.fuzzy.match(pname)
if mdata:
arches = mdata.group('alist').split(',')
- [entry.remove(inst) for inst in \
- entry.findall('Instance') \
- if inst.get('arch') not in arches]
+ for inst in entry.findall('Instance'):
+ if inst.get('arch') not in arches:
+ entry.remove(inst)
def HandlesEntry(self, entry, metadata):
- return entry.tag == 'Package' and entry.get('name').split(':')[0] in list(self.Entries['Package'].keys())
+ return (
+ entry.tag == 'Package' and
+ entry.get('name').split(':')[0] in self.Entries['Package'].keys())
def HandleEntry(self, entry, metadata):
self.BindEntry(entry, metadata)
-
-
-class PkgmgrLint(Bcfg2.Server.Lint.ServerlessPlugin):
- """ Find duplicate :ref:`Pkgmgr
- <server-plugins-generators-pkgmgr>` entries with the same
- priority. """
-
- def Run(self):
- pset = set()
- for pfile in glob.glob(os.path.join(self.config['repo'], 'Pkgmgr',
- '*.xml')):
- if self.HandlesFile(pfile):
- xdata = lxml.etree.parse(pfile).getroot()
- # get priority, type, group
- priority = xdata.get('priority')
- ptype = xdata.get('type')
- for pkg in xdata.xpath("//Package"):
- if pkg.getparent().tag == 'Group':
- grp = pkg.getparent().get('name')
- if (type(grp) is not str and
- grp.getparent().tag == 'Group'):
- pgrp = grp.getparent().get('name')
- else:
- pgrp = 'none'
- else:
- grp = 'none'
- pgrp = 'none'
- ptuple = (pkg.get('name'), priority, ptype, grp, pgrp)
- # check if package is already listed with same
- # priority, type, grp
- if ptuple in pset:
- self.LintError(
- "duplicate-package",
- "Duplicate Package %s, priority:%s, type:%s" %
- (pkg.get('name'), priority, ptype))
- else:
- pset.add(ptuple)
-
- @classmethod
- def Errors(cls):
- return {"duplicate-packages": "error"}
diff --git a/src/lib/Bcfg2/Server/Plugins/Probes.py b/src/lib/Bcfg2/Server/Plugins/Probes.py
index 5d846b4bb..76aab69b5 100644
--- a/src/lib/Bcfg2/Server/Plugins/Probes.py
+++ b/src/lib/Bcfg2/Server/Plugins/Probes.py
@@ -8,15 +8,33 @@ import copy
import operator
import lxml.etree
import Bcfg2.Server
+import Bcfg2.Server.Cache
import Bcfg2.Server.Plugin
-from Bcfg2.Compat import any, unicode # pylint: disable=W0622
-
-try:
- from django.db import models
- from django.core.exceptions import MultipleObjectsReturned
- HAS_DJANGO = True
+from Bcfg2.Compat import unicode, any # pylint: disable=W0622
+import Bcfg2.Server.FileMonitor
+from Bcfg2.Logger import Debuggable
+from Bcfg2.Server.Statistics import track_statistics
+
+HAS_DJANGO = False
+# pylint: disable=C0103
+ProbesDataModel = None
+ProbesGroupsModel = None
+# pylint: enable=C0103
+
+
+def load_django_models():
+ """ Load models for Django after option parsing has completed """
+ # pylint: disable=W0602
+ global ProbesDataModel, ProbesGroupsModel, HAS_DJANGO
+ # pylint: enable=W0602
+ try:
+ from django.db import models
+ HAS_DJANGO = True
+ except ImportError:
+ HAS_DJANGO = False
+ return
- class ProbesDataModel(models.Model,
+ class ProbesDataModel(models.Model, # pylint: disable=W0621,W0612
Bcfg2.Server.Plugin.PluginDatabaseModel):
""" The database model for storing probe data """
hostname = models.CharField(max_length=255)
@@ -24,13 +42,12 @@ try:
timestamp = models.DateTimeField(auto_now=True)
data = models.TextField(null=True)
- class ProbesGroupsModel(models.Model,
+ class ProbesGroupsModel(models.Model, # pylint: disable=W0621,W0612
Bcfg2.Server.Plugin.PluginDatabaseModel):
""" The database model for storing probe groups """
hostname = models.CharField(max_length=255)
group = models.CharField(max_length=255)
-except ImportError:
- HAS_DJANGO = False
+
try:
import json
@@ -51,6 +68,226 @@ except ImportError:
HAS_YAML = False
+class ProbeStore(Debuggable):
+ """ Caching abstraction layer between persistent probe data
+ storage and the Probes plugin."""
+
+ def __init__(self, core, datadir): # pylint: disable=W0613
+ Debuggable.__init__(self)
+ self._groupcache = Bcfg2.Server.Cache.Cache("Probes", "probegroups")
+ self._datacache = Bcfg2.Server.Cache.Cache("Probes", "probedata")
+
+ def get_groups(self, hostname):
+ """ Get the list of groups for the given host """
+ if hostname not in self._groupcache:
+ self._load_groups(hostname)
+ return self._groupcache.get(hostname, [])
+
+ def set_groups(self, hostname, groups):
+ """ Set the list of groups for the given host """
+ raise NotImplementedError
+
+ def get_data(self, hostname):
+ """ Get a dict of probe data for the given host """
+ if hostname not in self._datacache:
+ self._load_data(hostname)
+ return self._datacache.get(hostname, dict())
+
+ def set_data(self, hostname, data):
+ """ Set probe data for the given host """
+ raise NotImplementedError
+
+ def _load_groups(self, hostname):
+ """ When probe groups are not found in the cache, this
+ function is called to load them from the backend (XML or
+ database). """
+ raise NotImplementedError
+
+ def _load_data(self, hostname):
+ """ When probe groups are not found in the cache, this
+ function is called to load them from the backend (XML or
+ database). """
+ raise NotImplementedError
+
+ def commit(self):
+ """ Commit the current data in the cache to the persistent
+ backend store. This is not used with the
+ :class:`Bcfg2.Server.Plugins.Probes.DBProbeStore`, because it
+ commits on every change. """
+ pass
+
+
+class DBProbeStore(ProbeStore, Bcfg2.Server.Plugin.DatabaseBacked):
+ """ Caching abstraction layer between the database and the Probes
+ plugin. """
+ create = False
+
+ def __init__(self, core, datadir):
+ Bcfg2.Server.Plugin.DatabaseBacked.__init__(self, core)
+ ProbeStore.__init__(self, core, datadir)
+
+ @property
+ def _use_db(self):
+ return True
+
+ def _load_groups(self, hostname):
+ Bcfg2.Server.Cache.expire("Probes", "probegroups", hostname)
+ groupdata = ProbesGroupsModel.objects.filter(hostname=hostname)
+ self._groupcache[hostname] = list(set(r.group for r in groupdata))
+ Bcfg2.Server.Cache.expire("Metadata", hostname)
+
+ @Bcfg2.Server.Plugin.DatabaseBacked.get_db_lock
+ def set_groups(self, hostname, groups):
+ Bcfg2.Server.Cache.expire("Probes", "probegroups", hostname)
+ olddata = self._groupcache.get(hostname, [])
+ self._groupcache[hostname] = groups
+ for group in groups:
+ try:
+ ProbesGroupsModel.objects.get_or_create(
+ hostname=hostname,
+ group=group)
+ except ProbesGroupsModel.MultipleObjectsReturned:
+ ProbesGroupsModel.objects.filter(hostname=hostname,
+ group=group).delete()
+ ProbesGroupsModel.objects.get_or_create(
+ hostname=hostname,
+ group=group)
+ ProbesGroupsModel.objects.filter(
+ hostname=hostname).exclude(group__in=groups).delete()
+ if olddata != groups:
+ Bcfg2.Server.Cache.expire("Metadata", hostname)
+
+ def _load_data(self, hostname):
+ Bcfg2.Server.Cache.expire("Probes", "probegroups", hostname)
+ Bcfg2.Server.Cache.expire("Probes", "probedata", hostname)
+ self._datacache[hostname] = ClientProbeDataSet()
+ ts_set = False
+ for pdata in ProbesDataModel.objects.filter(hostname=hostname):
+ if not ts_set:
+ self._datacache[hostname].timestamp = \
+ time.mktime(pdata.timestamp.timetuple())
+ ts_set = True
+ self._datacache[hostname][pdata.probe] = ProbeData(pdata.data)
+ Bcfg2.Server.Cache.expire("Metadata", hostname)
+
+ @Bcfg2.Server.Plugin.DatabaseBacked.get_db_lock
+ def set_data(self, hostname, data):
+ Bcfg2.Server.Cache.expire("Probes", "probedata", hostname)
+ self._datacache[hostname] = ClientProbeDataSet()
+ expire_metadata = False
+ for probe, pdata in data.items():
+ self._datacache[hostname][probe] = pdata
+ try:
+ record, created = ProbesDataModel.objects.get_or_create(
+ hostname=hostname,
+ probe=probe)
+ except ProbesDataModel.MultipleObjectsReturned:
+ ProbesDataModel.objects.filter(hostname=hostname,
+ probe=probe).delete()
+ record, created = ProbesDataModel.objects.get_or_create(
+ hostname=hostname,
+ probe=probe)
+ expire_metadata |= created
+ if record.data != pdata:
+ record.data = pdata
+ record.save()
+ expire_metadata = True
+ qset = ProbesDataModel.objects.filter(
+ hostname=hostname).exclude(probe__in=data.keys())
+ if len(qset):
+ qset.delete()
+ expire_metadata = True
+ if expire_metadata:
+ Bcfg2.Server.Cache.expire("Metadata", hostname)
+
+
+class XMLProbeStore(ProbeStore):
+ """ Caching abstraction layer between ``probed.xml`` and the
+ Probes plugin."""
+ def __init__(self, core, datadir):
+ ProbeStore.__init__(self, core, datadir)
+ self._fname = os.path.join(datadir, 'probed.xml')
+ self._load_data()
+
+ def _load_data(self, _=None):
+ """ Load probe data from probed.xml """
+ Bcfg2.Server.Cache.expire("Probes", "probegroups")
+ Bcfg2.Server.Cache.expire("Probes", "probedata")
+ if not os.path.exists(self._fname):
+ self.commit()
+ try:
+ data = lxml.etree.parse(self._fname,
+ parser=Bcfg2.Server.XMLParser).getroot()
+ except (IOError, lxml.etree.XMLSyntaxError):
+ err = sys.exc_info()[1]
+ self.logger.error("Failed to read file probed.xml: %s" % err)
+ return
+ for client in data.getchildren():
+ self._datacache[client.get('name')] = \
+ ClientProbeDataSet(timestamp=client.get("timestamp"))
+ self._groupcache[client.get('name')] = []
+ for pdata in client:
+ if pdata.tag == 'Probe':
+ self._datacache[client.get('name')][pdata.get('name')] = \
+ ProbeData(pdata.get("value"))
+ elif pdata.tag == 'Group':
+ self._groupcache[client.get('name')].append(
+ pdata.get('name'))
+
+ Bcfg2.Server.Cache.expire("Metadata")
+
+ def _load_groups(self, hostname):
+ self._load_data(hostname)
+
+ def commit(self):
+ """ Write received probe data to probed.xml """
+ top = lxml.etree.Element("Probed")
+ for client, probed in sorted(self._datacache.items()):
+ # make a copy of probe data for this client in case it
+ # submits probe data while we're trying to write
+ # probed.xml
+ probedata = copy.copy(probed)
+ ctag = \
+ lxml.etree.SubElement(top, 'Client', name=client,
+ timestamp=str(int(probedata.timestamp)))
+ for probe in sorted(probedata):
+ try:
+ lxml.etree.SubElement(
+ ctag, 'Probe', name=probe,
+ value=self._datacache[client][probe].decode('utf-8'))
+ except AttributeError:
+ lxml.etree.SubElement(
+ ctag, 'Probe', name=probe,
+ value=self._datacache[client][probe])
+ for group in sorted(self._groupcache[client]):
+ lxml.etree.SubElement(ctag, "Group", name=group)
+ try:
+ top.getroottree().write(self._fname,
+ xml_declaration=False,
+ pretty_print='true')
+ except IOError:
+ err = sys.exc_info()[1]
+ self.logger.error("Failed to write %s: %s" % (self._fname, err))
+
+ def set_groups(self, hostname, groups):
+ Bcfg2.Server.Cache.expire("Probes", "probegroups", hostname)
+ olddata = self._groupcache.get(hostname, [])
+ self._groupcache[hostname] = groups
+ if olddata != groups:
+ Bcfg2.Server.Cache.expire("Metadata", hostname)
+
+ def set_data(self, hostname, data):
+ Bcfg2.Server.Cache.expire("Probes", "probedata", hostname)
+ self._datacache[hostname] = ClientProbeDataSet()
+ expire_metadata = False
+ for probe, pdata in data.items():
+ olddata = self._datacache[hostname].get(probe, ProbeData(''))
+ self._datacache[hostname][probe] = pdata
+ expire_metadata |= olddata != data
+ if expire_metadata:
+ Bcfg2.Server.Cache.expire("Metadata", hostname)
+
+
class ClientProbeDataSet(dict):
""" dict of probe => [probe data] that records a timestamp for
each host """
@@ -124,17 +361,16 @@ class ProbeSet(Bcfg2.Server.Plugin.EntrySet):
bangline = re.compile(r'^#!\s*(?P<interpreter>.*)$')
basename_is_regex = True
- def __init__(self, path, fam, encoding, plugin_name):
+ def __init__(self, path, plugin_name):
self.plugin_name = plugin_name
Bcfg2.Server.Plugin.EntrySet.__init__(self, r'[0-9A-Za-z_\-]+', path,
- Bcfg2.Server.Plugin.SpecificData,
- encoding)
- fam.AddMonitor(path, self)
+ Bcfg2.Server.Plugin.SpecificData)
+ Bcfg2.Server.FileMonitor.get_fam().AddMonitor(path, self)
def HandleEvent(self, event):
""" handle events on everything but probed.xml """
if (event.filename != self.path and
- not event.filename.endswith("probed.xml")):
+ not event.filename.endswith("probed.xml")):
return self.handle_event(event)
def get_probe_data(self, metadata):
@@ -161,7 +397,7 @@ class ProbeSet(Bcfg2.Server.Plugin.EntrySet):
probe.set('name', os.path.basename(name))
probe.set('source', self.plugin_name)
if (metadata.version_info and
- metadata.version_info > (1, 3, 1, '', 0)):
+ metadata.version_info > (1, 3, 1, '', 0)):
try:
probe.text = entry.data.decode('utf-8')
except AttributeError:
@@ -169,7 +405,7 @@ class ProbeSet(Bcfg2.Server.Plugin.EntrySet):
else:
try:
probe.text = entry.data
- except: # pylint: disable=W0702
+ except ValueError:
self.logger.error("Client unable to handle unicode "
"probes. Skipping %s" %
probe.get('name'))
@@ -187,235 +423,85 @@ class ProbeSet(Bcfg2.Server.Plugin.EntrySet):
class Probes(Bcfg2.Server.Plugin.Probing,
- Bcfg2.Server.Plugin.Caching,
Bcfg2.Server.Plugin.Connector,
Bcfg2.Server.Plugin.DatabaseBacked):
""" A plugin to gather information from a client machine """
__author__ = 'bcfg-dev@mcs.anl.gov'
- def __init__(self, core, datastore):
+ groupline_re = re.compile(r'^group:\s*(?P<groupname>\S+)\s*')
+
+ options = [
+ Bcfg2.Options.BooleanOption(
+ cf=('probes', 'use_database'), dest="probes_db",
+ help="Use database capabilities of the Probes plugin"),
+ Bcfg2.Options.Option(
+ cf=('probes', 'allowed_groups'), dest="probes_allowed_groups",
+ help="Whitespace-separated list of group name regexps to which "
+ "probes can assign a client",
+ default=[re.compile('.*')],
+ type=Bcfg2.Options.Types.anchored_regex_list)]
+ options_parsed_hook = staticmethod(load_django_models)
+
+ def __init__(self, core):
Bcfg2.Server.Plugin.Probing.__init__(self)
- Bcfg2.Server.Plugin.Caching.__init__(self)
Bcfg2.Server.Plugin.Connector.__init__(self)
- Bcfg2.Server.Plugin.DatabaseBacked.__init__(self, core, datastore)
-
- try:
- self.probes = ProbeSet(self.data, core.fam, core.setup['encoding'],
- self.name)
- except:
- err = sys.exc_info()[1]
- raise Bcfg2.Server.Plugin.PluginInitError(err)
-
- self.allowed_cgroups = core.setup['probe_allowed_groups']
- self.probedata = dict()
- self.cgroups = dict()
- self.load_data()
- __init__.__doc__ = Bcfg2.Server.Plugin.DatabaseBacked.__init__.__doc__
+ Bcfg2.Server.Plugin.DatabaseBacked.__init__(self, core)
- @Bcfg2.Server.Plugin.track_statistics()
- def write_data(self, client):
- """ Write probe data out for use with bcfg2-info """
+ self.probes = ProbeSet(self.data, self.name)
if self._use_db:
- return self._write_data_db(client)
+ self.probestore = DBProbeStore(core, self.data)
else:
- return self._write_data_xml(client)
+ self.probestore = XMLProbeStore(core, self.data)
- def _write_data_xml(self, _):
- """ Write received probe data to probed.xml """
- top = lxml.etree.Element("Probed")
- for client, probed in sorted(self.probedata.items()):
- # make a copy of probe data for this client in case it
- # submits probe data while we're trying to write
- # probed.xml
- probedata = copy.copy(probed)
- ctag = \
- lxml.etree.SubElement(top, 'Client', name=client,
- timestamp=str(int(probedata.timestamp)))
- for probe in sorted(probedata):
- try:
- lxml.etree.SubElement(
- ctag, 'Probe', name=probe,
- value=str(
- self.probedata[client][probe]).decode('utf-8'))
- except AttributeError:
- lxml.etree.SubElement(
- ctag, 'Probe', name=probe,
- value=str(self.probedata[client][probe]))
- for group in sorted(self.cgroups[client]):
- lxml.etree.SubElement(ctag, "Group", name=group)
- try:
- top.getroottree().write(os.path.join(self.data, 'probed.xml'),
- xml_declaration=False,
- pretty_print='true')
- except IOError:
- err = sys.exc_info()[1]
- self.logger.error("Failed to write probed.xml: %s" % err)
+ @track_statistics()
+ def GetProbes(self, metadata):
+ return self.probes.get_probe_data(metadata)
- @Bcfg2.Server.Plugin.DatabaseBacked.get_db_lock
- def _write_data_db(self, client):
- """ Write received probe data to the database """
- for probe, data in self.probedata[client.hostname].items():
- try:
- pdata = ProbesDataModel.objects.get_or_create(
- hostname=client.hostname,
- probe=probe)[0]
- except MultipleObjectsReturned:
- ProbesDataModel.objects.filter(hostname=client.hostname,
- probe=probe).delete()
- ProbesDataModel.objects.get_or_create(
- hostname=client.hostname,
- probe=probe)
- if pdata.data != data:
- pdata.data = data
- pdata.save()
-
- ProbesDataModel.objects.filter(
- hostname=client.hostname).exclude(
- probe__in=self.probedata[client.hostname]).delete()
-
- for group in self.cgroups[client.hostname]:
- try:
- ProbesGroupsModel.objects.get_or_create(
- hostname=client.hostname,
- group=group)
- except MultipleObjectsReturned:
- ProbesGroupsModel.objects.filter(hostname=client.hostname,
- group=group).delete()
- ProbesGroupsModel.objects.get_or_create(
- hostname=client.hostname,
- group=group)
- ProbesGroupsModel.objects.filter(
- hostname=client.hostname).exclude(
- group__in=self.cgroups[client.hostname]).delete()
-
- def expire_cache(self, key=None):
- self.load_data(client=key)
-
- def load_data(self, client=None):
- """ Load probe data from the appropriate backend (probed.xml
- or the database) """
- if self._use_db:
- return self._load_data_db(client=client)
- else:
- # the XML backend doesn't support loading data for single
- # clients, so it reloads all data
- return self._load_data_xml()
-
- def _load_data_xml(self):
- """ Load probe data from probed.xml """
- try:
- data = lxml.etree.parse(os.path.join(self.data, 'probed.xml'),
- parser=Bcfg2.Server.XMLParser).getroot()
- except (IOError, lxml.etree.XMLSyntaxError):
- err = sys.exc_info()[1]
- self.logger.error("Failed to read file probed.xml: %s" % err)
- return
- self.probedata = {}
- self.cgroups = {}
- for client in data.getchildren():
- self.probedata[client.get('name')] = \
- ClientProbeDataSet(timestamp=client.get("timestamp"))
- self.cgroups[client.get('name')] = []
- for pdata in client:
- if pdata.tag == 'Probe':
- self.probedata[client.get('name')][pdata.get('name')] = \
- ProbeData(pdata.get("value"))
- elif pdata.tag == 'Group':
- self.cgroups[client.get('name')].append(pdata.get('name'))
-
- if self.core.metadata_cache_mode in ['cautious', 'aggressive']:
- self.core.expire_caches_by_type(Bcfg2.Server.Plugin.Metadata)
-
- def _load_data_db(self, client=None):
- """ Load probe data from the database """
- if client is None:
- self.probedata = {}
- self.cgroups = {}
- probedata = ProbesDataModel.objects.all()
- groupdata = ProbesGroupsModel.objects.all()
- else:
- self.probedata.pop(client, None)
- self.cgroups.pop(client, None)
- probedata = ProbesDataModel.objects.filter(hostname=client)
- groupdata = ProbesGroupsModel.objects.filter(hostname=client)
-
- for pdata in probedata:
- if pdata.hostname not in self.probedata:
- self.probedata[pdata.hostname] = ClientProbeDataSet(
- timestamp=time.mktime(pdata.timestamp.timetuple()))
- self.probedata[pdata.hostname][pdata.probe] = ProbeData(pdata.data)
- for pgroup in groupdata:
- if pgroup.hostname not in self.cgroups:
- self.cgroups[pgroup.hostname] = []
- self.cgroups[pgroup.hostname].append(pgroup.group)
-
- if self.core.metadata_cache_mode in ['cautious', 'aggressive']:
- self.core.expire_caches_by_type(Bcfg2.Server.Plugin.Metadata,
- key=client)
-
- @Bcfg2.Server.Plugin.track_statistics()
- def GetProbes(self, meta):
- return self.probes.get_probe_data(meta)
- GetProbes.__doc__ = Bcfg2.Server.Plugin.Probing.GetProbes.__doc__
-
- @Bcfg2.Server.Plugin.track_statistics()
def ReceiveData(self, client, datalist):
- if self.core.metadata_cache_mode in ['cautious', 'aggressive']:
- if client.hostname in self.cgroups:
- olddata = copy.copy(self.cgroups[client.hostname])
- else:
- olddata = []
-
- cgroups = []
- cprobedata = ClientProbeDataSet()
+ cgroups = set()
+ cdata = dict()
for data in datalist:
- self.ReceiveDataItem(client, data, cgroups, cprobedata)
- self.cgroups[client.hostname] = cgroups
- self.probedata[client.hostname] = cprobedata
-
- if (self.core.metadata_cache_mode in ['cautious', 'aggressive'] and
- olddata != self.cgroups[client.hostname]):
- self.core.metadata_cache.expire(client.hostname)
- self.write_data(client)
- ReceiveData.__doc__ = Bcfg2.Server.Plugin.Probing.ReceiveData.__doc__
-
- def ReceiveDataItem(self, client, data, cgroups, cprobedata):
- """Receive probe results pertaining to client."""
+ groups, cdata[data.get("name")] = \
+ self.ReceiveDataItem(client, data)
+ cgroups.update(groups)
+ self.probestore.set_groups(client.hostname, list(cgroups))
+ self.probestore.set_data(client.hostname, cdata)
+ self.probestore.commit()
+
+ def ReceiveDataItem(self, client, data):
+ """ Receive probe results pertaining to client. Returns a
+ tuple of (<probe groups>, <probe data>). """
if data.text is None:
self.logger.info("Got null response to probe %s from %s" %
(data.get('name'), client.hostname))
- cprobedata[data.get('name')] = ProbeData('')
- return
+ return [], ''
dlines = data.text.split('\n')
self.logger.debug("Processing probe from %s: %s:%s" %
(client.hostname, data.get('name'),
[line.strip() for line in dlines]))
+ groups = []
for line in dlines[:]:
- if line.split(':')[0] == 'group':
- newgroup = line.split(':')[1].strip()
- if newgroup not in cgroups:
- if self._group_allowed(newgroup):
- cgroups.append(newgroup)
- else:
- self.logger.info(
- "Disallowed group assignment %s from %s" %
- (newgroup, client.hostname))
+ match = self.groupline_re.match(line)
+ if match:
+ newgroup = match.group("groupname")
+ if self._group_allowed(newgroup):
+ groups.append(newgroup)
+ else:
+ self.logger.warning(
+ "Disallowed group assignment %s from %s" %
+ (newgroup, client.hostname))
dlines.remove(line)
- dobj = ProbeData("\n".join(dlines))
- cprobedata[data.get('name')] = dobj
+ return (groups, ProbeData("\n".join(dlines)))
+
+ def get_additional_groups(self, metadata):
+ return self.probestore.get_groups(metadata.hostname)
+
+ def get_additional_data(self, metadata):
+ return self.probestore.get_data(metadata.hostname)
def _group_allowed(self, group):
""" Determine if the named group can be set as a probe group
by checking the regexes listed in the [probes] groups_allowed
setting """
- return any(r.match(group) for r in self.allowed_cgroups)
-
- def get_additional_groups(self, meta):
- return self.cgroups.get(meta.hostname, list())
- get_additional_groups.__doc__ = \
- Bcfg2.Server.Plugin.Connector.get_additional_groups.__doc__
-
- def get_additional_data(self, meta):
- return self.probedata.get(meta.hostname, ClientProbeDataSet())
- get_additional_data.__doc__ = \
- Bcfg2.Server.Plugin.Connector.get_additional_data.__doc__
+ return any(r.match(group)
+ for r in Bcfg2.Options.setup.probes_allowed_groups)
diff --git a/src/lib/Bcfg2/Server/Plugins/Properties.py b/src/lib/Bcfg2/Server/Plugins/Properties.py
index bbca01ead..c4dd75e60 100644
--- a/src/lib/Bcfg2/Server/Plugins/Properties.py
+++ b/src/lib/Bcfg2/Server/Plugins/Properties.py
@@ -7,13 +7,9 @@ import sys
import copy
import logging
import lxml.etree
+import Bcfg2.Options
import Bcfg2.Server.Plugin
from Bcfg2.Server.Plugin import PluginExecutionError
-try:
- import Bcfg2.Encryption
- HAS_CRYPTO = True
-except ImportError:
- HAS_CRYPTO = False
try:
import json
@@ -35,8 +31,6 @@ except ImportError:
LOGGER = logging.getLogger(__name__)
-SETUP = None
-
class PropertyFile(object):
""" Base Properties file handler """
@@ -53,12 +47,9 @@ class PropertyFile(object):
""" Write the data in this data structure back to the property
file. This public method performs checking to ensure that
writing is possible and then calls :func:`_write`. """
- if not SETUP.cfp.getboolean("properties", "writes_enabled",
- default=True):
- msg = "Properties files write-back is disabled in the " + \
- "configuration"
- LOGGER.error(msg)
- raise PluginExecutionError(msg)
+ if not Bcfg2.Options.setup.writes_enabled:
+ raise PluginExecutionError("Properties files write-back is "
+ "disabled in the configuration")
try:
self.validate_data()
except PluginExecutionError:
@@ -88,13 +79,12 @@ class PropertyFile(object):
class JSONPropertyFile(Bcfg2.Server.Plugin.FileBacked, PropertyFile):
- """ Handle JSON Properties files. """
+ """Handle JSON Properties files."""
- def __init__(self, name, fam=None):
- Bcfg2.Server.Plugin.FileBacked.__init__(self, name, fam=fam)
+ def __init__(self, name):
+ Bcfg2.Server.Plugin.FileBacked.__init__(self, name)
PropertyFile.__init__(self, name)
self.json = None
- __init__.__doc__ = Bcfg2.Server.Plugin.FileBacked.__init__.__doc__
def Index(self):
try:
@@ -103,21 +93,18 @@ class JSONPropertyFile(Bcfg2.Server.Plugin.FileBacked, PropertyFile):
err = sys.exc_info()[1]
raise PluginExecutionError("Could not load JSON data from %s: %s" %
(self.name, err))
- Index.__doc__ = Bcfg2.Server.Plugin.FileBacked.Index.__doc__
def _write(self):
json.dump(self.json, open(self.name, 'wb'))
return True
- _write.__doc__ = PropertyFile._write.__doc__
def validate_data(self):
try:
json.dumps(self.json)
- except:
+ except TypeError:
err = sys.exc_info()[1]
raise PluginExecutionError("Data for %s cannot be dumped to JSON: "
"%s" % (self.name, err))
- validate_data.__doc__ = PropertyFile.validate_data.__doc__
def __str__(self):
return str(self.json)
@@ -129,8 +116,8 @@ class JSONPropertyFile(Bcfg2.Server.Plugin.FileBacked, PropertyFile):
class YAMLPropertyFile(Bcfg2.Server.Plugin.FileBacked, PropertyFile):
""" Handle YAML Properties files. """
- def __init__(self, name, fam=None):
- Bcfg2.Server.Plugin.FileBacked.__init__(self, name, fam=fam)
+ def __init__(self, name):
+ Bcfg2.Server.Plugin.FileBacked.__init__(self, name)
PropertyFile.__init__(self, name)
self.yaml = None
__init__.__doc__ = Bcfg2.Server.Plugin.FileBacked.__init__.__doc__
@@ -168,8 +155,8 @@ class YAMLPropertyFile(Bcfg2.Server.Plugin.FileBacked, PropertyFile):
class XMLPropertyFile(Bcfg2.Server.Plugin.StructFile, PropertyFile):
""" Handle XML Properties files. """
- def __init__(self, name, fam=None, should_monitor=False):
- Bcfg2.Server.Plugin.StructFile.__init__(self, name, fam=fam,
+ def __init__(self, name, should_monitor=False):
+ Bcfg2.Server.Plugin.StructFile.__init__(self, name,
should_monitor=should_monitor)
PropertyFile.__init__(self, name)
@@ -202,45 +189,8 @@ class XMLPropertyFile(Bcfg2.Server.Plugin.StructFile, PropertyFile):
else:
return True
- def Index(self):
- Bcfg2.Server.Plugin.StructFile.Index(self)
- if HAS_CRYPTO:
- for el in self.xdata.xpath("//*[@encrypted]"):
- try:
- el.text = self._decrypt(el).encode('ascii',
- 'xmlcharrefreplace')
- except UnicodeDecodeError:
- self.logger.info("Properties: Decrypted %s to gibberish, "
- "skipping" % el.tag)
- except (TypeError, Bcfg2.Encryption.EVPError):
- strict = self.xdata.get(
- "decrypt",
- SETUP.cfp.get(Bcfg2.Encryption.CFG_SECTION, "decrypt",
- default="strict")) == "strict"
- msg = "Properties: Failed to decrypt %s element in %s" % \
- (el.tag, self.name)
- if strict:
- raise PluginExecutionError(msg)
- else:
- self.logger.debug(msg)
-
- def _decrypt(self, element):
- """ Decrypt a single encrypted properties file element """
- if not element.text or not element.text.strip():
- return
- passes = Bcfg2.Encryption.get_passphrases(SETUP)
- try:
- passphrase = passes[element.get("encrypted")]
- return Bcfg2.Encryption.ssl_decrypt(
- element.text, passphrase,
- algorithm=Bcfg2.Encryption.get_algorithm(SETUP))
- except KeyError:
- raise Bcfg2.Encryption.EVPError("No passphrase named '%s'" %
- element.get("encrypted"))
- raise Bcfg2.Encryption.EVPError("Failed to decrypt")
-
def get_additional_data(self, metadata):
- if SETUP.cfp.getboolean("properties", "automatch", default=False):
+ if Bcfg2.Options.setup.automatch:
default_automatch = "true"
else:
default_automatch = "false"
@@ -262,6 +212,13 @@ class Properties(Bcfg2.Server.Plugin.Plugin,
Bcfg2.Server.Plugin.DirectoryBacked):
""" The properties plugin maps property files into client metadata
instances. """
+ options = [
+ Bcfg2.Options.BooleanOption(
+ cf=("properties", "writes_enabled"), default=True,
+ help="Enable or disable Properties write-back"),
+ Bcfg2.Options.BooleanOption(
+ cf=("properties", "automatch"),
+ help="Enable Properties automatch")]
#: Extensions that are understood by Properties.
extensions = ["xml"]
@@ -280,12 +237,10 @@ class Properties(Bcfg2.Server.Plugin.Plugin,
#: Ignore XML schema (``.xsd``) files
ignore = re.compile(r'.*\.xsd$')
- def __init__(self, core, datastore):
- global SETUP # pylint: disable=W0603
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ def __init__(self, core):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core)
Bcfg2.Server.Plugin.Connector.__init__(self)
- Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data, core.fam)
- SETUP = core.setup
+ Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data)
#: Instead of creating children of this object with a static
#: object, we use :func:`property_dispatcher` to create a
@@ -293,23 +248,21 @@ class Properties(Bcfg2.Server.Plugin.Plugin,
self.__child__ = self.property_dispatcher
__init__.__doc__ = Bcfg2.Server.Plugin.Plugin.__init__.__doc__
- def property_dispatcher(self, fname, fam):
+ def property_dispatcher(self, fname):
""" Dispatch an event on a Properties file to the
appropriate object.
:param fname: The name of the file that received the event
:type fname: string
- :param fam: The file monitor the event was received by
- :type fam: Bcfg2.Server.FileMonitor.FileMonitor
:returns: An object of the appropriate subclass of
:class:`PropertyFile`
"""
if fname.endswith(".xml"):
- return XMLPropertyFile(fname, fam)
+ return XMLPropertyFile(fname)
elif HAS_JSON and fname.endswith(".json"):
- return JSONPropertyFile(fname, fam)
+ return JSONPropertyFile(fname)
elif HAS_YAML and (fname.endswith(".yaml") or fname.endswith(".yml")):
- return YAMLPropertyFile(fname, fam)
+ return YAMLPropertyFile(fname)
else:
raise Bcfg2.Server.Plugin.PluginExecutionError(
"Properties: Unknown extension %s" % fname)
diff --git a/src/lib/Bcfg2/Server/Plugins/PuppetENC.py b/src/lib/Bcfg2/Server/Plugins/PuppetENC.py
index 072f3f7e7..59fbe6f03 100644
--- a/src/lib/Bcfg2/Server/Plugins/PuppetENC.py
+++ b/src/lib/Bcfg2/Server/Plugins/PuppetENC.py
@@ -4,7 +4,7 @@ import os
import sys
import Bcfg2.Server
import Bcfg2.Server.Plugin
-from subprocess import Popen, PIPE
+from Bcfg2.Utils import Executor
try:
from syck import load as yaml_load, error as yaml_error
@@ -28,16 +28,15 @@ class PuppetENC(Bcfg2.Server.Plugin.Plugin,
Bcfg2.Server.Plugin.DirectoryBacked):
""" A plugin to run Puppet external node classifiers
(http://docs.puppetlabs.com/guides/external_nodes.html) """
- experimental = True
__child__ = PuppetENCFile
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ def __init__(self, core):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core)
Bcfg2.Server.Plugin.Connector.__init__(self)
Bcfg2.Server.Plugin.ClientRunHooks.__init__(self)
- Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data,
- self.core.fam)
+ Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data)
self.cache = dict()
+ self.cmd = Executor()
def _run_encs(self, metadata):
""" Run all Puppet ENCs """
@@ -46,20 +45,17 @@ class PuppetENC(Bcfg2.Server.Plugin.Plugin,
epath = os.path.join(self.data, enc)
self.debug_log("PuppetENC: Running ENC %s for %s" %
(enc, metadata.hostname))
- proc = Popen([epath, metadata.hostname], stdin=PIPE, stdout=PIPE,
- stderr=PIPE)
- (out, err) = proc.communicate()
- rv = proc.wait()
- if rv != 0:
- msg = "PuppetENC: Error running ENC %s for %s (%s): %s" % \
- (enc, metadata.hostname, rv, err)
+ result = self.cmd.run([epath, metadata.hostname])
+ if not result.success:
+ msg = "PuppetENC: Error running ENC %s for %s: %s" % \
+ (enc, metadata.hostname, result.error)
self.logger.error(msg)
raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
- if err:
- self.debug_log("ENC Error: %s" % err)
+ if result.stderr:
+ self.debug_log("ENC Error: %s" % result.stderr)
try:
- yaml = yaml_load(out)
+ yaml = yaml_load(result.stdout)
self.debug_log("Loaded data from %s for %s: %s" %
(enc, metadata.hostname, yaml))
except yaml_error:
@@ -69,13 +65,7 @@ class PuppetENC(Bcfg2.Server.Plugin.Plugin,
self.logger.error(msg)
raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
- groups = dict()
- if "classes" in yaml:
- # stock Puppet ENC output format
- groups = yaml['classes']
- elif "groups" in yaml:
- # more Bcfg2-ish output format
- groups = yaml['groups']
+ groups = yaml.get("classes", yaml.get("groups", dict()))
if groups:
if isinstance(groups, list):
self.debug_log("ENC %s adding groups to %s: %s" %
diff --git a/src/lib/Bcfg2/Server/Plugins/Reporting.py b/src/lib/Bcfg2/Server/Plugins/Reporting.py
index fa11d9250..5c73546b4 100644
--- a/src/lib/Bcfg2/Server/Plugins/Reporting.py
+++ b/src/lib/Bcfg2/Server/Plugins/Reporting.py
@@ -3,13 +3,11 @@
import sys
import time
import platform
-import traceback
import lxml.etree
-from Bcfg2.Reporting.Transport import load_transport_from_config, \
- TransportError
-from Bcfg2.Options import REPORTING_COMMON_OPTIONS
+import Bcfg2.Options
+from Bcfg2.Reporting.Transport.base import TransportError
from Bcfg2.Server.Plugin import Statistics, PullSource, Threaded, \
- Debuggable, PluginInitError, PluginExecutionError
+ PluginInitError, PluginExecutionError
# required for reporting
try:
@@ -33,25 +31,23 @@ def _rpc_call(method):
# pylint: disable=W0223
-class Reporting(Statistics, Threaded, PullSource, Debuggable):
+class Reporting(Statistics, Threaded, PullSource):
""" Unified statistics and reporting plugin """
- __rmi__ = Debuggable.__rmi__ + ['Ping', 'GetExtra', 'GetCurrentEntry']
+ __rmi__ = Statistics.__rmi__ + ['Ping', 'GetExtra', 'GetCurrentEntry']
+
+ options = [Bcfg2.Options.Common.reporting_transport]
CLIENT_METADATA_FIELDS = ('profile', 'bundles', 'aliases', 'addresses',
'groups', 'categories', 'uuid', 'version')
- def __init__(self, core, datastore):
- Statistics.__init__(self, core, datastore)
+ def __init__(self, core):
+ Statistics.__init__(self, core)
PullSource.__init__(self)
Threaded.__init__(self)
- Debuggable.__init__(self)
self.whoami = platform.node()
self.transport = None
- core.setup.update(REPORTING_COMMON_OPTIONS)
- core.setup.reparse()
-
if not HAS_SOUTH:
msg = "Django south is required for Reporting"
self.logger.error(msg)
@@ -59,20 +55,19 @@ class Reporting(Statistics, Threaded, PullSource, Debuggable):
# This must be loaded here for bcfg2-admin
try:
- self.transport = load_transport_from_config(self.core.setup)
+ self.transport = Bcfg2.Options.setup.reporting_transport()
except TransportError:
- msg = "%s: Failed to load transport: %s" % \
- (self.name, traceback.format_exc().splitlines()[-1])
- self.logger.error(msg)
- raise PluginInitError(msg)
+ raise PluginInitError("%s: Failed to instantiate transport: %s" %
+ (self.name, sys.exc_info()[1]))
if self.debug_flag:
self.transport.set_debug(self.debug_flag)
def start_threads(self):
+ """Nothing to do here"""
pass
def set_debug(self, debug):
- rv = Debuggable.set_debug(self, debug)
+ rv = Statistics.set_debug(self, debug)
if self.transport is not None:
self.transport.set_debug(debug)
return rv
@@ -106,11 +101,11 @@ class Reporting(Statistics, Threaded, PullSource, Debuggable):
except TransportError:
continue
except:
- self.logger.error("%s: Attempt %s: Failed to add statistic %s"
+ self.logger.error("%s: Attempt %s: Failed to add statistic: %s"
% (self.__class__.__name__, i,
- traceback.format_exc().splitlines()[-1]))
- self.logger.error("%s: Retry limit reached for %s" %
- (self.__class__.__name__, client.hostname))
+ sys.exc_info()[1]))
+ raise PluginExecutionError("%s: Retry limit reached for %s" %
+ (self.__class__.__name__, client.hostname))
def shutdown(self):
super(Reporting, self).shutdown()
diff --git a/src/lib/Bcfg2/Server/Plugins/Rules.py b/src/lib/Bcfg2/Server/Plugins/Rules.py
index 21862c5db..a3f682ed6 100644
--- a/src/lib/Bcfg2/Server/Plugins/Rules.py
+++ b/src/lib/Bcfg2/Server/Plugins/Rules.py
@@ -1,6 +1,7 @@
"""This generator provides rule-based entry mappings."""
import re
+import Bcfg2.Options
import Bcfg2.Server.Plugin
@@ -8,45 +9,44 @@ class Rules(Bcfg2.Server.Plugin.PrioDir):
"""This is a generator that handles service assignments."""
__author__ = 'bcfg-dev@mcs.anl.gov'
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.PrioDir.__init__(self, core, datastore)
+ options = Bcfg2.Server.Plugin.PrioDir.options + [
+ Bcfg2.Options.BooleanOption(
+ cf=("rules", "regex"), dest="rules_regex",
+ help="Allow regular expressions in Rules")]
+
+ def __init__(self, core):
+ Bcfg2.Server.Plugin.PrioDir.__init__(self, core)
self._regex_cache = dict()
def HandlesEntry(self, entry, metadata):
- if entry.tag in self.Entries:
- return self._matches(entry, metadata,
- self.Entries[entry.tag].keys())
+ for src in self.entries.values():
+ for candidate in src.XMLMatch(metadata).xpath("//%s" % entry.tag):
+ if self._matches(entry, metadata, candidate):
+ return True
return False
- def BindEntry(self, entry, metadata):
- attrs = self.get_attrs(entry, metadata)
- for key, val in list(attrs.items()):
- if key not in entry.attrib:
- entry.attrib[key] = val
+ HandleEntry = Bcfg2.Server.Plugin.PrioDir.BindEntry
- HandleEntry = BindEntry
-
- def _matches(self, entry, metadata, rules):
- if Bcfg2.Server.Plugin.PrioDir._matches(self, entry, metadata, rules):
+ def _matches(self, entry, metadata, candidate):
+ if Bcfg2.Server.Plugin.PrioDir._matches(self, entry, metadata,
+ candidate):
return True
elif (entry.tag == "Path" and
- ((entry.get('name').endswith("/") and
- entry.get('name').rstrip("/") in rules) or
- (not entry.get('name').endswith("/") and
- entry.get('name') + '/' in rules))):
+ entry.get('name').rstrip("/") ==
+ candidate.get("name").rstrip("/")):
# special case for Path tags:
# http://trac.mcs.anl.gov/projects/bcfg2/ticket/967
return True
elif self._regex_enabled:
# attempt regular expression matching
- for rule in rules:
- if rule not in self._regex_cache:
- self._regex_cache[rule] = re.compile("%s$" % rule)
- if self._regex_cache[rule].match(entry.get('name')):
- return True
+ rule = candidate.get("name")
+ if rule not in self._regex_cache:
+ self._regex_cache[rule] = re.compile("%s$" % rule)
+ if self._regex_cache[rule].match(entry.get('name')):
+ return True
return False
@property
def _regex_enabled(self):
""" Return True if rules regexes are enabled, False otherwise """
- return self.core.setup.cfp.getboolean("rules", "regex", default=False)
+ return Bcfg2.Options.setup.rules_regex
diff --git a/src/lib/Bcfg2/Server/Plugins/SEModules.py b/src/lib/Bcfg2/Server/Plugins/SEModules.py
index fa47f9496..248b662f9 100644
--- a/src/lib/Bcfg2/Server/Plugins/SEModules.py
+++ b/src/lib/Bcfg2/Server/Plugins/SEModules.py
@@ -43,9 +43,6 @@ class SEModules(Bcfg2.Server.Plugin.GroupSpool):
#: SEModules manages ``SEModule`` entries
entry_type = 'SEModule'
- #: The SEModules plugin is experimental
- experimental = True
-
def _get_module_filename(self, entry):
""" GroupSpool stores entries as /foo.pp, but we want people
to be able to specify module entries as name='foo' or
diff --git a/src/lib/Bcfg2/Server/Plugins/SSHbase.py b/src/lib/Bcfg2/Server/Plugins/SSHbase.py
index 0152fcf70..7736bd050 100644
--- a/src/lib/Bcfg2/Server/Plugins/SSHbase.py
+++ b/src/lib/Bcfg2/Server/Plugins/SSHbase.py
@@ -5,27 +5,25 @@ import os
import sys
import socket
import shutil
-import logging
import tempfile
-from itertools import chain
-from subprocess import Popen, PIPE
+import lxml.etree
+import Bcfg2.Options
import Bcfg2.Server.Plugin
+from itertools import chain
+from Bcfg2.Utils import Executor
from Bcfg2.Server.Plugin import PluginExecutionError
from Bcfg2.Compat import any, u_str, b64encode # pylint: disable=W0622
-
-LOGGER = logging.getLogger(__name__)
+try:
+ from Bcfg2.Server.Encryption import ssl_encrypt, bruteforce_decrypt, \
+ EVPError
+ HAS_CRYPTO = True
+except ImportError:
+ HAS_CRYPTO = False
class KeyData(Bcfg2.Server.Plugin.SpecificData):
""" class to handle key data for HostKeyEntrySet """
- def __init__(self, name, specific, encoding):
- Bcfg2.Server.Plugin.SpecificData.__init__(self,
- name,
- specific,
- encoding)
- self.encoding = encoding
-
def __lt__(self, other):
return self.name < other.name
@@ -42,49 +40,62 @@ class KeyData(Bcfg2.Server.Plugin.SpecificData):
entry.text = b64encode(self.data)
else:
try:
- entry.text = u_str(self.data, self.encoding)
+ entry.text = u_str(self.data, Bcfg2.Options.setup.encoding)
except UnicodeDecodeError:
msg = "Failed to decode %s: %s" % (entry.get('name'),
sys.exc_info()[1])
- LOGGER.error(msg)
- LOGGER.error("Please verify you are using the proper encoding")
+ self.logger.error(msg)
+ self.logger.error("Please verify you are using the proper "
+ "encoding")
raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
except ValueError:
msg = "Error in specification for %s: %s" % (entry.get('name'),
sys.exc_info()[1])
- LOGGER.error(msg)
- LOGGER.error("You need to specify base64 encoding for %s" %
- entry.get('name'))
+ self.logger.error(msg)
+ self.logger.error("You need to specify base64 encoding for %s"
+ % entry.get('name'))
raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
if entry.text in ['', None]:
entry.set('empty', 'true')
+ def handle_event(self, event):
+ Bcfg2.Server.Plugin.SpecificData.handle_event(self, event)
+ if event.filename.endswith(".crypt"):
+ if self.data is None:
+ return
+ # todo: let the user specify a passphrase by name
+ try:
+ self.data = bruteforce_decrypt(self.data)
+ except EVPError:
+ raise PluginExecutionError("Failed to decrypt %s" % self.name)
+
class HostKeyEntrySet(Bcfg2.Server.Plugin.EntrySet):
""" EntrySet to handle all kinds of host keys """
def __init__(self, basename, path):
- if basename.startswith("ssh_host_key"):
- encoding = "base64"
- else:
- encoding = None
- Bcfg2.Server.Plugin.EntrySet.__init__(self, basename, path, KeyData,
- encoding)
+ Bcfg2.Server.Plugin.EntrySet.__init__(self, basename, path, KeyData)
self.metadata = {'owner': 'root',
'group': 'root',
'type': 'file'}
- if encoding is not None:
- self.metadata['encoding'] = encoding
+ if basename.startswith("ssh_host_key"):
+ self.metadata['encoding'] = "base64"
if basename.endswith('.pub'):
self.metadata['mode'] = '0644'
else:
self.metadata['mode'] = '0600'
+ def specificity_from_filename(self, fname, specific=None):
+ if fname.endswith(".crypt"):
+ fname = fname[0:-6]
+ return Bcfg2.Server.Plugin.EntrySet.specificity_from_filename(
+ self, fname, specific=specific)
+
class KnownHostsEntrySet(Bcfg2.Server.Plugin.EntrySet):
""" EntrySet to handle the ssh_known_hosts file """
def __init__(self, path):
Bcfg2.Server.Plugin.EntrySet.__init__(self, "ssh_known_hosts", path,
- KeyData, None)
+ KeyData)
self.metadata = {'owner': 'root',
'group': 'root',
'type': 'file',
@@ -92,7 +103,6 @@ class KnownHostsEntrySet(Bcfg2.Server.Plugin.EntrySet):
class SSHbase(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Caching,
Bcfg2.Server.Plugin.Connector,
Bcfg2.Server.Plugin.Generator,
Bcfg2.Server.Plugin.PullTarget):
@@ -125,9 +135,13 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
"ssh_host_rsa_key.pub",
"ssh_host_key.pub"]
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Caching.__init__(self)
+ options = [
+ Bcfg2.Options.Option(
+ cf=("sshbase", "passphrase"), dest="sshbase_passphrase",
+ help="Passphrase used to encrypt generated private SSH host keys")]
+
+ def __init__(self, core):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core)
Bcfg2.Server.Plugin.Connector.__init__(self)
Bcfg2.Server.Plugin.Generator.__init__(self)
Bcfg2.Server.Plugin.PullTarget.__init__(self)
@@ -139,7 +153,8 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
# do so once
self.badnames = dict()
- core.fam.AddMonitor(self.data, self)
+ self.fam = Bcfg2.Server.FileMonitor.get_fam()
+ self.fam.AddMonitor(self.data, self)
self.static = dict()
self.entries = dict()
@@ -152,9 +167,15 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
self.entries["/etc/ssh/" + keypattern] = \
HostKeyEntrySet(keypattern, self.data)
self.Entries['Path']["/etc/ssh/" + keypattern] = self.build_hk
+ self.cmd = Executor()
- def expire_cache(self, key=None):
- self.__skn = False
+ @property
+ def passphrase(self):
+ """ The passphrase used to encrypt private keys """
+ if HAS_CRYPTO and Bcfg2.Options.setup.sshbase_passphrase:
+ return Bcfg2.Options.setup.passphrases[
+ Bcfg2.Options.setup.sshbase_passphrase]
+ return None
def get_skn(self):
"""Build memory cache of the ssh known hosts file."""
@@ -180,20 +201,19 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
newnames.add(name.split('.')[0])
try:
newips.update(self.get_ipcache_entry(name)[0])
- except: # pylint: disable=W0702
+ except PluginExecutionError:
continue
names[cmeta.hostname].update(newnames)
names[cmeta.hostname].update(cmeta.addresses)
names[cmeta.hostname].update(newips)
# TODO: Only perform reverse lookups on IPs if an
# option is set.
- if True:
- for ip in newips:
- try:
- names[cmeta.hostname].update(
- self.get_namecache_entry(ip))
- except: # pylint: disable=W0702
- continue
+ for ip in newips:
+ try:
+ names[cmeta.hostname].update(
+ self.get_namecache_entry(ip))
+ except socket.herror:
+ continue
names[cmeta.hostname] = sorted(names[cmeta.hostname])
pubkeys = [pubk for pubk in list(self.entries.keys())
@@ -254,7 +274,11 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
return
for entry in list(self.entries.values()):
- if entry.specific.match(event.filename):
+ if event.filename.endswith(".crypt"):
+ fname = event.filename[0:-6]
+ else:
+ fname = event.filename
+ if entry.specific.match(fname):
entry.handle_event(event)
if any(event.filename.startswith(kp)
for kp in self.keypatterns
@@ -264,7 +288,7 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
self.skn = False
return
- if event.filename in ['info', 'info.xml', ':info']:
+ if event.filename == 'info.xml':
for entry in list(self.entries.values()):
entry.handle_event(event)
return
@@ -291,7 +315,8 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
if self.ipcache[client]:
return self.ipcache[client]
else:
- raise socket.gaierror
+ raise PluginExecutionError("No cached IP address for %s" %
+ client)
else:
# need to add entry
try:
@@ -300,14 +325,17 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
self.ipcache[client] = (ipaddr, client)
return (ipaddr, client)
except socket.gaierror:
- ipaddr = Popen(["getent", "hosts", client],
- stdout=PIPE).stdout.read().strip().split()
- if ipaddr:
- self.ipcache[client] = (ipaddr, client)
- return (ipaddr, client)
+ result = self.cmd.run(["getent", "hosts", client])
+ if result.success:
+ ipaddr = result.stdout.strip().split()
+ if ipaddr:
+ self.ipcache[client] = (ipaddr, client)
+ return (ipaddr, client)
self.ipcache[client] = False
- self.logger.error("Failed to find IP address for %s" % client)
- raise socket.gaierror
+ msg = "Failed to find IP address for %s: %s" % (client,
+ result.error)
+ self.logger.error(msg)
+ raise PluginExecutionError(msg)
def get_namecache_entry(self, cip):
"""Build a cache of name lookups from client IP addresses."""
@@ -316,7 +344,7 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
if self.namecache[cip]:
return self.namecache[cip]
else:
- raise socket.gaierror
+ raise socket.herror
else:
# add an entry that has not been cached
try:
@@ -327,7 +355,7 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
self.namecache[cip] = []
self.namecache[cip].extend(rvlookup[1])
return self.namecache[cip]
- except socket.gaierror:
+ except socket.herror:
self.namecache[cip] = False
self.logger.error("Failed to find any names associated with "
"IP address %s" % cip)
@@ -377,13 +405,15 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
msg = "%s still not registered" % filename
self.logger.error(msg)
raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
- self.core.fam.handle_events_in_interval(1)
+ self.fam.handle_events_in_interval(1)
tries += 1
try:
self.entries[entry.get('name')].bind_entry(entry, metadata)
is_bound = True
except Bcfg2.Server.Plugin.PluginExecutionError:
- pass
+ print("Failed to bind %s: %s") % (
+ lxml.etree.tostring(entry),
+ sys.exc_info()[1])
def GenerateHostKeyPair(self, client, filename):
"""Generate new host key pair for client."""
@@ -406,19 +436,34 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
cmd = ["ssh-keygen", "-q", "-f", temploc, "-N", "",
"-t", keytype, "-C", "root@%s" % client]
self.debug_log("SSHbase: Running: %s" % " ".join(cmd))
- proc = Popen(cmd, stdout=PIPE, stdin=PIPE)
- err = proc.communicate()[1]
- if proc.wait():
+ result = self.cmd.run(cmd)
+ if not result.success:
raise PluginExecutionError("SSHbase: Error running ssh-keygen: %s"
- % err)
+ % result.error)
+
+ if self.passphrase:
+ self.debug_log("SSHbase: Encrypting private key for %s" % fileloc)
+ try:
+ data = ssl_encrypt(open(temploc).read(), self.passphrase)
+ except IOError:
+ raise PluginExecutionError("Unable to read temporary SSH key: "
+ "%s" % sys.exc_info()[1])
+ except EVPError:
+ raise PluginExecutionError("Unable to encrypt SSH key: %s" %
+ sys.exc_info()[1])
+ try:
+ open("%s.crypt" % fileloc, "wb").write(data)
+ except IOError:
+ raise PluginExecutionError("Unable to write encrypted SSH "
+ "key: %s" % sys.exc_info()[1])
try:
- shutil.copy(temploc, fileloc)
+ if not self.passphrase:
+ shutil.copy(temploc, fileloc)
shutil.copy("%s.pub" % temploc, publoc)
except IOError:
- err = sys.exc_info()[1]
- raise PluginExecutionError("Temporary SSH keys not found: %s" %
- err)
+ raise PluginExecutionError("Unable to copy temporary SSH key: %s" %
+ sys.exc_info()[1])
try:
os.unlink(temploc)
diff --git a/src/lib/Bcfg2/Server/Plugins/SSLCA.py b/src/lib/Bcfg2/Server/Plugins/SSLCA.py
deleted file mode 100644
index a33b2f448..000000000
--- a/src/lib/Bcfg2/Server/Plugins/SSLCA.py
+++ /dev/null
@@ -1,372 +0,0 @@
-""" The SSLCA generator handles the creation and management of ssl
-certificates and their keys. """
-
-import os
-import sys
-import logging
-import tempfile
-import lxml.etree
-from subprocess import Popen, PIPE, STDOUT
-import Bcfg2.Options
-import Bcfg2.Server.Plugin
-from Bcfg2.Compat import ConfigParser
-from Bcfg2.Server.Plugin import PluginExecutionError
-
-LOGGER = logging.getLogger(__name__)
-
-
-class SSLCAXMLSpec(Bcfg2.Server.Plugin.StructFile):
- """ Base class to handle key.xml and cert.xml """
- attrs = dict()
- tag = None
-
- def get_spec(self, metadata):
- """ Get a specification for the type of object described by
- this SSLCA XML file for the given client metadata object """
- entries = [e for e in self.Match(metadata) if e.tag == self.tag]
- if len(entries) == 0:
- raise PluginExecutionError("No matching %s entry found for %s "
- "in %s" % (self.tag,
- metadata.hostname,
- self.name))
- elif len(entries) > 1:
- LOGGER.warning("More than one matching %s entry found for %s in "
- "%s; using first match" % (self.tag,
- metadata.hostname,
- self.name))
- rv = dict()
- for attr, default in self.attrs.items():
- val = entries[0].get(attr.lower(), default)
- if default in ['true', 'false']:
- rv[attr] = val == 'true'
- else:
- rv[attr] = val
- return rv
-
-
-class SSLCAKeySpec(SSLCAXMLSpec):
- """ Handle key.xml files """
- attrs = dict(bits='2048', type='rsa')
- tag = 'Key'
-
-
-class SSLCACertSpec(SSLCAXMLSpec):
- """ Handle cert.xml files """
- attrs = dict(ca='default',
- format='pem',
- key=None,
- days='365',
- C=None,
- L=None,
- ST=None,
- OU=None,
- O=None,
- emailAddress=None,
- append_chain='false')
- tag = 'Cert'
-
- def get_spec(self, metadata):
- rv = SSLCAXMLSpec.get_spec(self, metadata)
- rv['subjectaltname'] = [e.text for e in self.Match(metadata)
- if e.tag == "subjectAltName"]
- return rv
-
-
-class SSLCADataFile(Bcfg2.Server.Plugin.SpecificData):
- """ Handle key and cert files """
- def bind_entry(self, entry, _):
- """ Bind the data in the file to the given abstract entry """
- entry.text = self.data
- entry.set("type", "file")
- return entry
-
-
-class SSLCAEntrySet(Bcfg2.Server.Plugin.EntrySet):
- """ Entry set to handle SSLCA entries and XML files """
- def __init__(self, _, path, entry_type, encoding, parent=None):
- Bcfg2.Server.Plugin.EntrySet.__init__(self, os.path.basename(path),
- path, entry_type, encoding)
- self.parent = parent
- self.key = None
- self.cert = None
-
- def handle_event(self, event):
- action = event.code2str()
- fpath = os.path.join(self.path, event.filename)
-
- if event.filename == 'key.xml':
- if action in ['exists', 'created', 'changed']:
- self.key = SSLCAKeySpec(fpath)
- self.key.HandleEvent(event)
- elif event.filename == 'cert.xml':
- if action in ['exists', 'created', 'changed']:
- self.cert = SSLCACertSpec(fpath)
- self.cert.HandleEvent(event)
- else:
- Bcfg2.Server.Plugin.EntrySet.handle_event(self, event)
-
- def build_key(self, entry, metadata):
- """
- either grabs a prexisting key hostfile, or triggers the generation
- of a new key if one doesn't exist.
- """
- # TODO: verify key fits the specs
- filename = "%s.H_%s" % (os.path.basename(entry.get('name')),
- metadata.hostname)
- self.logger.info("SSLCA: Generating new key %s" % filename)
- key_spec = self.key.get_spec(metadata)
- ktype = key_spec['type']
- bits = key_spec['bits']
- if ktype == 'rsa':
- cmd = ["openssl", "genrsa", bits]
- elif ktype == 'dsa':
- cmd = ["openssl", "dsaparam", "-noout", "-genkey", bits]
- self.debug_log("SSLCA: Generating new key: %s" % " ".join(cmd))
- proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
- key, err = proc.communicate()
- if proc.wait():
- raise PluginExecutionError("SSLCA: Failed to generate key %s for "
- "%s: %s" % (entry.get("name"),
- metadata.hostname, err))
- open(os.path.join(self.path, filename), 'w').write(key)
- return key
-
- def build_cert(self, entry, metadata, keyfile):
- """ generate a new cert """
- filename = "%s.H_%s" % (os.path.basename(entry.get('name')),
- metadata.hostname)
- self.logger.info("SSLCA: Generating new cert %s" % filename)
- cert_spec = self.cert.get_spec(metadata)
- ca = self.parent.get_ca(cert_spec['ca'])
- req_config = None
- req = None
- try:
- req_config = self.build_req_config(metadata)
- req = self.build_request(keyfile, req_config, metadata)
- days = cert_spec['days']
- cmd = ["openssl", "ca", "-config", ca['config'], "-in", req,
- "-days", days, "-batch"]
- passphrase = ca.get('passphrase')
- if passphrase:
- cmd.extend(["-passin", "pass:%s" % passphrase])
-
- def _scrub_pass(arg):
- """ helper to scrub the passphrase from the
- argument list for debugging. """
- if arg.startswith("pass:"):
- return "pass:******"
- else:
- return arg
-
- self.debug_log("SSLCA: Generating new certificate: %s" %
- " ".join(_scrub_pass(a) for a in cmd))
- proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
- (cert, err) = proc.communicate()
- if proc.wait():
- # pylint: disable=E1103
- raise PluginExecutionError("SSLCA: Failed to generate cert: %s"
- % err.splitlines()[-1])
- # pylint: enable=E1103
- finally:
- try:
- if req_config and os.path.exists(req_config):
- os.unlink(req_config)
- if req and os.path.exists(req):
- os.unlink(req)
- except OSError:
- self.logger.error("SSLCA: Failed to unlink temporary files: %s"
- % sys.exc_info()[1])
- if cert_spec['append_chain'] and 'chaincert' in ca:
- cert += open(ca['chaincert']).read()
-
- open(os.path.join(self.path, filename), 'w').write(cert)
- return cert
-
- def build_req_config(self, metadata):
- """
- generates a temporary openssl configuration file that is
- used to generate the required certificate request
- """
- # create temp request config file
- fd, fname = tempfile.mkstemp()
- cfp = ConfigParser.ConfigParser({})
- cfp.optionxform = str
- defaults = {
- 'req': {
- 'default_md': 'sha1',
- 'distinguished_name': 'req_distinguished_name',
- 'req_extensions': 'v3_req',
- 'x509_extensions': 'v3_req',
- 'prompt': 'no'
- },
- 'req_distinguished_name': {},
- 'v3_req': {
- 'subjectAltName': '@alt_names'
- },
- 'alt_names': {}
- }
- for section in list(defaults.keys()):
- cfp.add_section(section)
- for key in defaults[section]:
- cfp.set(section, key, defaults[section][key])
- cert_spec = self.cert.get_spec(metadata)
- altnamenum = 1
- altnames = cert_spec['subjectaltname']
- altnames.extend(list(metadata.aliases))
- altnames.append(metadata.hostname)
- for altname in altnames:
- cfp.set('alt_names', 'DNS.' + str(altnamenum), altname)
- altnamenum += 1
- for item in ['C', 'L', 'ST', 'O', 'OU', 'emailAddress']:
- if cert_spec[item]:
- cfp.set('req_distinguished_name', item, cert_spec[item])
- cfp.set('req_distinguished_name', 'CN', metadata.hostname)
- self.debug_log("SSLCA: Writing temporary request config to %s" % fname)
- try:
- cfp.write(os.fdopen(fd, 'w'))
- except IOError:
- raise PluginExecutionError("SSLCA: Failed to write temporary CSR "
- "config file: %s" % sys.exc_info()[1])
- return fname
-
- def build_request(self, keyfile, req_config, metadata):
- """
- creates the certificate request
- """
- fd, req = tempfile.mkstemp()
- os.close(fd)
- days = self.cert.get_spec(metadata)['days']
- cmd = ["openssl", "req", "-new", "-config", req_config,
- "-days", days, "-key", keyfile, "-text", "-out", req]
- self.debug_log("SSLCA: Generating new CSR: %s" % " ".join(cmd))
- proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
- err = proc.communicate()[1]
- if proc.wait():
- raise PluginExecutionError("SSLCA: Failed to generate CSR: %s" %
- err)
- return req
-
- def verify_cert(self, filename, keyfile, entry, metadata):
- """ Perform certification verification against the CA and
- against the key """
- ca = self.parent.get_ca(self.cert.get_spec(metadata)['ca'])
- do_verify = ca.get('chaincert')
- if do_verify:
- return (self.verify_cert_against_ca(filename, entry, metadata) and
- self.verify_cert_against_key(filename, keyfile))
- return True
-
- def verify_cert_against_ca(self, filename, entry, metadata):
- """
- check that a certificate validates against the ca cert,
- and that it has not expired.
- """
- ca = self.parent.get_ca(self.cert.get_spec(metadata)['ca'])
- chaincert = ca.get('chaincert')
- cert = os.path.join(self.path, filename)
- cmd = ["openssl", "verify"]
- is_root = ca.get('root_ca', "false").lower() == 'true'
- if is_root:
- cmd.append("-CAfile")
- else:
- # verifying based on an intermediate cert
- cmd.extend(["-purpose", "sslserver", "-untrusted"])
- cmd.extend([chaincert, cert])
- self.debug_log("SSLCA: Verifying %s against CA: %s" %
- (entry.get("name"), " ".join(cmd)))
- res = Popen(cmd, stdout=PIPE, stderr=STDOUT).stdout.read()
- if res == cert + ": OK\n":
- self.debug_log("SSLCA: %s verified successfully against CA" %
- entry.get("name"))
- return True
- self.logger.warning("SSLCA: %s failed verification against CA: %s" %
- (entry.get("name"), res))
- return False
-
- def verify_cert_against_key(self, filename, keyfile):
- """
- check that a certificate validates against its private key.
- """
- def _modulus(fname, ftype="x509"):
- """ get the modulus from the given file """
- cmd = ["openssl", ftype, "-noout", "-modulus", "-in", fname]
- self.debug_log("SSLCA: Getting modulus of %s for verification: %s"
- % (fname, " ".join(cmd)))
- proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
- rv, err = proc.communicate()
- if proc.wait():
- self.logger.warning("SSLCA: Failed to get modulus of %s: %s" %
- (fname, err))
- return rv.strip() # pylint: disable=E1103
-
- certfile = os.path.join(self.path, filename)
- cert = _modulus(certfile)
- key = _modulus(keyfile, ftype="rsa")
- if cert == key:
- self.debug_log("SSLCA: %s verified successfully against key %s" %
- (filename, keyfile))
- return True
- self.logger.warning("SSLCA: %s failed verification against key %s" %
- (filename, keyfile))
- return False
-
- def bind_entry(self, entry, metadata):
- if self.key:
- self.bind_info_to_entry(entry, metadata)
- try:
- return self.best_matching(metadata).bind_entry(entry, metadata)
- except PluginExecutionError:
- entry.text = self.build_key(entry, metadata)
- entry.set("type", "file")
- return entry
- elif self.cert:
- key = self.cert.get_spec(metadata)['key']
- cleanup_keyfile = False
- try:
- keyfile = self.parent.entries[key].best_matching(metadata).name
- except PluginExecutionError:
- cleanup_keyfile = True
- # create a temp file with the key in it
- fd, keyfile = tempfile.mkstemp()
- os.chmod(keyfile, 384) # 0600
- el = lxml.etree.Element('Path', name=key)
- self.parent.core.Bind(el, metadata)
- os.fdopen(fd, 'w').write(el.text)
-
- try:
- self.bind_info_to_entry(entry, metadata)
- try:
- best = self.best_matching(metadata)
- if self.verify_cert(best.name, keyfile, entry, metadata):
- return best.bind_entry(entry, metadata)
- except PluginExecutionError:
- pass
- # if we get here, it's because either a) there was no best
- # matching entry; or b) the existing cert did not verify
- entry.text = self.build_cert(entry, metadata, keyfile)
- entry.set("type", "file")
- return entry
- finally:
- if cleanup_keyfile:
- try:
- os.unlink(keyfile)
- except OSError:
- err = sys.exc_info()[1]
- self.logger.error("SSLCA: Failed to unlink temporary "
- "key %s: %s" % (keyfile, err))
-
-
-class SSLCA(Bcfg2.Server.Plugin.GroupSpool):
- """ The SSLCA generator handles the creation and management of ssl
- certificates and their keys. """
- __author__ = 'g.hagger@gmail.com'
- es_child_cls = SSLCADataFile
-
- def es_cls(self, *args):
- """Fake entry set 'class' that sets this as the parent."""
- # python 2.5 doesn't support mixing *magic and keyword arguments
- return SSLCAEntrySet(*args, **dict(parent=self))
-
- def get_ca(self, name):
- """ get a dict describing a CA from the config file """
- return dict(self.core.setup.cfp.items("sslca_%s" % name))
diff --git a/src/lib/Bcfg2/Server/Plugins/Snapshots.py b/src/lib/Bcfg2/Server/Plugins/Snapshots.py
deleted file mode 100644
index cc5946bb2..000000000
--- a/src/lib/Bcfg2/Server/Plugins/Snapshots.py
+++ /dev/null
@@ -1,129 +0,0 @@
-import logging
-import difflib
-import Bcfg2.Server.Plugin
-import Bcfg2.Server.Snapshots
-import Bcfg2.Logger
-from Bcfg2.Server.Snapshots.model import Snapshot
-import sys
-import time
-import threading
-
-# Compatibility import
-from Bcfg2.Compat import Queue, u_str, b64decode
-
-logger = logging.getLogger('Snapshots')
-
-ftypes = ['ConfigFile', 'SymLink', 'Directory']
-datafields = {
- 'Package': ['version'],
- 'Path': ['type'],
- 'Service': ['status'],
- 'ConfigFile': ['owner', 'group', 'mode'],
- 'Directory': ['owner', 'group', 'mode'],
- 'SymLink': ['to'],
- }
-
-
-def build_snap_ent(entry):
- basefields = []
- if entry.tag in ['Package', 'Service']:
- basefields += ['type']
- desired = dict([(key, u_str(entry.get(key))) for key in basefields])
- state = dict([(key, u_str(entry.get(key))) for key in basefields])
- desired.update([(key, u_str(entry.get(key))) for key in \
- datafields[entry.tag]])
- if entry.tag == 'ConfigFile' or \
- ((entry.tag == 'Path') and (entry.get('type') == 'file')):
- if entry.text == None:
- desired['contents'] = None
- else:
- if entry.get('encoding', 'ascii') == 'ascii':
- desired['contents'] = u_str(entry.text)
- else:
- desired['contents'] = u_str(b64decode(entry.text))
-
- if 'current_bfile' in entry.attrib:
- state['contents'] = u_str(b64decode(entry.get('current_bfile')))
- elif 'current_bdiff' in entry.attrib:
- diff = b64decode(entry.get('current_bdiff'))
- state['contents'] = u_str( \
- '\n'.join(difflib.restore(diff.split('\n'), 1)))
-
- state.update([(key, u_str(entry.get('current_' + key, entry.get(key)))) \
- for key in datafields[entry.tag]])
- if entry.tag in ['ConfigFile', 'Path'] and entry.get('exists', 'true') == 'false':
- state = None
- return [desired, state]
-
-
-class Snapshots(Bcfg2.Server.Plugin.Statistics):
- name = 'Snapshots'
- deprecated = True
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Statistics.__init__(self, core, datastore)
- self.session = Bcfg2.Server.Snapshots.setup_session(core.cfile)
- self.work_queue = Queue()
- self.loader = threading.Thread(target=self.load_snapshot)
-
- def start_threads(self):
- self.loader.start()
-
- def load_snapshot(self):
- while self.running:
- try:
- (metadata, data) = self.work_queue.get(block=True, timeout=5)
- except:
- continue
- self.statistics_from_old_stats(metadata, data)
-
- def process_statistics(self, metadata, data):
- return self.work_queue.put((metadata, data))
-
- def statistics_from_old_stats(self, metadata, xdata):
- # entries are name -> (modified, correct, start, desired, end)
- # not sure we can get all of this from old format stats
- t1 = time.time()
- entries = dict([('Package', dict()),
- ('Service', dict()), ('Path', dict())])
- extra = dict([('Package', dict()), ('Service', dict()),
- ('Path', dict())])
- bad = []
- state = xdata.find('.//Statistics')
- correct = state.get('state') == 'clean'
- revision = u_str(state.get('revision', '-1'))
- for entry in state.find('.//Bad'):
- data = [False, False, u_str(entry.get('name'))] \
- + build_snap_ent(entry)
- if entry.tag in ftypes:
- etag = 'Path'
- else:
- etag = entry.tag
- entries[etag][entry.get('name')] = data
- for entry in state.find('.//Modified'):
- if entry.tag in ftypes:
- etag = 'Path'
- else:
- etag = entry.tag
- if entry.get('name') in entries[etag]:
- data = [True, False, u_str(entry.get('name'))] + \
- build_snap_ent(entry)
- else:
- data = [True, False, u_str(entry.get('name'))] + \
- build_snap_ent(entry)
- for entry in state.find('.//Extra'):
- if entry.tag in datafields:
- data = build_snap_ent(entry)[1]
- ename = u_str(entry.get('name'))
- data['name'] = ename
- extra[entry.tag][ename] = data
- else:
- print("extra", entry.tag, entry.get('name'))
- t2 = time.time()
- snap = Snapshot.from_data(self.session, correct, revision,
- metadata, entries, extra)
- self.session.add(snap)
- self.session.commit()
- t3 = time.time()
- logger.info("Snapshot storage took %fs" % (t3 - t2))
- return True
diff --git a/src/lib/Bcfg2/Server/Plugins/Statistics.py b/src/lib/Bcfg2/Server/Plugins/Statistics.py
deleted file mode 100644
index 7fae445d0..000000000
--- a/src/lib/Bcfg2/Server/Plugins/Statistics.py
+++ /dev/null
@@ -1,160 +0,0 @@
-'''This file manages the statistics collected by the BCFG2 Server'''
-
-import copy
-import difflib
-import logging
-import lxml.etree
-import os
-import sys
-from time import asctime, localtime, time, strptime, mktime
-import threading
-from Bcfg2.Compat import b64decode
-import Bcfg2.Server.Plugin
-
-
-class StatisticsStore(object):
- """Manages the memory and file copy of statistics collected about client runs."""
- __min_write_delay__ = 0
-
- def __init__(self, filename):
- self.filename = filename
- self.element = lxml.etree.Element('Dummy')
- self.dirty = 0
- self.lastwrite = 0
- self.logger = logging.getLogger('Bcfg2.Server.Statistics')
- self.ReadFromFile()
-
- def WriteBack(self, force=0):
- """Write statistics changes back to persistent store."""
- if (self.dirty and (self.lastwrite + self.__min_write_delay__ <= time())) \
- or force:
- try:
- fout = open(self.filename + '.new', 'w')
- except IOError:
- ioerr = sys.exc_info()[1]
- self.logger.error("Failed to open %s for writing: %s" % (self.filename + '.new', ioerr))
- else:
- fout.write(lxml.etree.tostring(self.element,
- xml_declaration=False).decode('UTF-8'))
- fout.close()
- os.rename(self.filename + '.new', self.filename)
- self.dirty = 0
- self.lastwrite = time()
-
- def ReadFromFile(self):
- """Reads current state regarding statistics."""
- try:
- fin = open(self.filename, 'r')
- data = fin.read()
- fin.close()
- self.element = lxml.etree.XML(data)
- self.dirty = 0
- except (IOError, lxml.etree.XMLSyntaxError):
- self.logger.error("Creating new statistics file %s"%(self.filename))
- self.element = lxml.etree.Element('ConfigStatistics')
- self.WriteBack()
- self.dirty = 0
-
- def updateStats(self, xml, client):
- """Updates the statistics of a current node with new data."""
-
- # Current policy:
- # - Keep anything less than 24 hours old
- # - Keep latest clean run for clean nodes
- # - Keep latest clean and dirty run for dirty nodes
- newstat = xml.find('Statistics')
-
- if newstat.get('state') == 'clean':
- node_dirty = 0
- else:
- node_dirty = 1
-
- # Find correct node entry in stats data
- # The following list comprehension should be guarenteed to return at
- # most one result
- nodes = [elem for elem in self.element.findall('Node') \
- if elem.get('name') == client]
- nummatch = len(nodes)
- if nummatch == 0:
- # Create an entry for this node
- node = lxml.etree.SubElement(self.element, 'Node', name=client)
- elif nummatch == 1 and not node_dirty:
- # Delete old instance
- node = nodes[0]
- [node.remove(elem) for elem in node.findall('Statistics') \
- if self.isOlderThan24h(elem.get('time'))]
- elif nummatch == 1 and node_dirty:
- # Delete old dirty statistics entry
- node = nodes[0]
- [node.remove(elem) for elem in node.findall('Statistics') \
- if (elem.get('state') == 'dirty' \
- and self.isOlderThan24h(elem.get('time')))]
- else:
- # Shouldn't be reached
- self.logger.error("Duplicate node entry for %s"%(client))
-
- # Set current time for stats
- newstat.set('time', asctime(localtime()))
-
- # Add statistic
- node.append(copy.copy(newstat))
-
- # Set dirty
- self.dirty = 1
- self.WriteBack(force=1)
-
- def isOlderThan24h(self, testTime):
- """Helper function to determine if <time> string is older than 24 hours."""
- now = time()
- utime = mktime(strptime(testTime))
- secondsPerDay = 60*60*24
-
- return (now-utime) > secondsPerDay
-
-
-class Statistics(Bcfg2.Server.Plugin.ThreadedStatistics,
- Bcfg2.Server.Plugin.PullSource):
- name = 'Statistics'
- deprecated = True
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.ThreadedStatistics.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.PullSource.__init__(self)
- fpath = "%s/etc/statistics.xml" % datastore
- self.data_file = StatisticsStore(fpath)
-
- def handle_statistic(self, metadata, data):
- self.data_file.updateStats(data, metadata.hostname)
-
- def FindCurrent(self, client):
- rt = self.data_file.element.xpath('//Node[@name="%s"]' % client)[0]
- maxtime = max([strptime(stat.get('time')) for stat \
- in rt.findall('Statistics')])
- return [stat for stat in rt.findall('Statistics') \
- if strptime(stat.get('time')) == maxtime][0]
-
- def GetExtra(self, client):
- return [(entry.tag, entry.get('name')) for entry \
- in self.FindCurrent(client).xpath('.//Extra/*')]
-
- def GetCurrentEntry(self, client, e_type, e_name):
- curr = self.FindCurrent(client)
- entry = curr.xpath('.//Bad/%s[@name="%s"]' % (e_type, e_name))
- if not entry:
- raise Bcfg2.Server.Plugin.PluginExecutionError
- cfentry = entry[-1]
-
- owner = cfentry.get('current_owner', cfentry.get('owner'))
- group = cfentry.get('current_group', cfentry.get('group'))
- mode = cfentry.get('current_mode', cfentry.get('mode'))
- if cfentry.get('sensitive') in ['true', 'True']:
- raise Bcfg2.Server.Plugin.PluginExecutionError
- elif 'current_bfile' in cfentry.attrib:
- contents = b64decode(cfentry.get('current_bfile'))
- elif 'current_bdiff' in cfentry.attrib:
- diff = b64decode(cfentry.get('current_bdiff'))
- contents = '\n'.join(difflib.restore(diff.split('\n'), 1))
- else:
- contents = None
-
- return (owner, group, mode, contents)
diff --git a/src/lib/Bcfg2/Server/Plugins/Svn.py b/src/lib/Bcfg2/Server/Plugins/Svn.py
index 240fd7f89..2ca518e53 100644
--- a/src/lib/Bcfg2/Server/Plugins/Svn.py
+++ b/src/lib/Bcfg2/Server/Plugins/Svn.py
@@ -4,19 +4,39 @@ additional XML-RPC methods for committing data to the repository and
updating the repository. """
import sys
+import Bcfg2.Options
import Bcfg2.Server.Plugin
-from Bcfg2.Compat import ConfigParser
try:
import pysvn
HAS_SVN = True
except ImportError:
- import pipes
- from subprocess import Popen, PIPE
+ from Bcfg2.Utils import Executor
HAS_SVN = False
class Svn(Bcfg2.Server.Plugin.Version):
"""Svn is a version plugin for dealing with Bcfg2 repos."""
+ options = Bcfg2.Server.Plugin.Version.options + [
+ Bcfg2.Options.Option(
+ cf=("svn", "user"), dest="svn_user", help="SVN username"),
+ Bcfg2.Options.Option(
+ cf=("svn", "password"), dest="svn_password", help="SVN password"),
+ Bcfg2.Options.BooleanOption(
+ cf=("svn", "always_trust"), dest="svn_trust_ssl",
+ help="Always trust SSL certs from SVN server")]
+
+ if HAS_SVN:
+ options.append(
+ Bcfg2.Options.Option(
+ cf=("svn", "conflict_resolution"),
+ dest="svn_conflict_resolution",
+ type=lambda v: v.replace("-", "_"),
+ # pylint: disable=E1101
+ choices=dir(pysvn.wc_conflict_choice),
+ default=pysvn.wc_conflict_choice.postpone,
+ # pylint: enable=E1101
+ help="SVN conflict resolution method"))
+
__author__ = 'bcfg-dev@mcs.anl.gov'
__vcs_metadata_path__ = ".svn"
if HAS_SVN:
@@ -24,73 +44,42 @@ class Svn(Bcfg2.Server.Plugin.Version):
else:
__vcs_metadata_path__ = ".svn"
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Version.__init__(self, core, datastore)
+ def __init__(self, core):
+ Bcfg2.Server.Plugin.Version.__init__(self, core)
self.revision = None
self.svn_root = None
+ self.client = None
+ self.cmd = None
if not HAS_SVN:
self.logger.debug("Svn: PySvn not found, using CLI interface to "
"SVN")
- self.client = None
+ self.cmd = Executor()
else:
self.client = pysvn.Client()
- # pylint: disable=E1101
- choice = pysvn.wc_conflict_choice.postpone
- try:
- resolution = self.core.setup.cfp.get(
- "svn",
- "conflict_resolution").replace('-', '_')
- if resolution in ["edit", "launch", "working"]:
- self.logger.warning("Svn: Conflict resolver %s requires "
- "manual intervention, using %s" %
- choice)
- else:
- choice = getattr(pysvn.wc_conflict_choice, resolution)
- except AttributeError:
- self.logger.warning("Svn: Conflict resolver %s does not "
- "exist, using %s" % choice)
- except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
- self.logger.info("Svn: No conflict resolution method "
- "selected, using %s" % choice)
- # pylint: enable=E1101
self.debug_log("Svn: Conflicts will be resolved with %s" %
- choice)
- self.client.callback_conflict_resolver = \
- self.get_conflict_resolver(choice)
+ Bcfg2.Options.setup.svn_conflict_resolution)
+ self.client.callback_conflict_resolver = self.conflict_resolver
- try:
- if self.core.setup.cfp.get(
- "svn",
- "always_trust").lower() == "true":
- self.client.callback_ssl_server_trust_prompt = \
- self.ssl_server_trust_prompt
- except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
- self.logger.debug("Svn: Using subversion cache for SSL "
- "certificate trust")
+ if Bcfg2.Options.setup.svn_trust_ssl:
+ self.client.callback_ssl_server_trust_prompt = \
+ self.ssl_server_trust_prompt
- try:
- if (self.core.setup.cfp.get("svn", "user") and
- self.core.setup.cfp.get("svn", "password")):
- self.client.callback_get_login = \
- self.get_login
- except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
- self.logger.info("Svn: Using subversion cache for "
- "password-based authetication")
+ if (Bcfg2.Options.setup.svn_user and
+ Bcfg2.Options.setup.svn_password):
+ self.client.callback_get_login = self.get_login
self.logger.debug("Svn: Initialized svn plugin with SVN directory %s" %
self.vcs_path)
- # pylint: disable=W0613
- def get_login(self, realm, username, may_save):
+ def get_login(self, realm, username, may_save): # pylint: disable=W0613
""" PySvn callback to get credentials for HTTP basic authentication """
self.logger.debug("Svn: Logging in with username: %s" %
- self.core.setup.cfp.get("svn", "user"))
- return True, \
- self.core.setup.cfp.get("svn", "user"), \
- self.core.setup.cfp.get("svn", "password"), \
- False
- # pylint: enable=W0613
+ Bcfg2.Options.setup.svn_user)
+ return (True,
+ Bcfg2.Options.setup.svn_user,
+ Bcfg2.Options.setup.svn_password,
+ False)
def ssl_server_trust_prompt(self, trust_dict):
""" PySvn callback to always trust SSL certificates from SVN server """
@@ -101,37 +90,35 @@ class Svn(Bcfg2.Server.Plugin.Version):
trust_dict['realm']))
return True, trust_dict['failures'], False
- def get_conflict_resolver(self, choice):
- """ Get a PySvn conflict resolution callback """
- def callback(conflict_description):
- """ PySvn callback function to resolve conflicts """
- self.logger.info("Svn: Resolving conflict for %s with %s" %
- (conflict_description['path'], choice))
- return choice, None, False
-
- return callback
+ def conflict_resolver(self, conflict_description):
+ """ PySvn callback function to resolve conflicts """
+ self.logger.info("Svn: Resolving conflict for %s with %s" %
+ (conflict_description['path'],
+ Bcfg2.Options.setup.svn_conflict_resolution))
+ return Bcfg2.Options.setup.svn_conflict_resolution, None, False
def get_revision(self):
"""Read svn revision information for the Bcfg2 repository."""
msg = None
if HAS_SVN:
try:
- info = self.client.info(self.vcs_root)
+ info = self.client.info(Bcfg2.Options.setup.vcs_root)
self.revision = info.revision
self.svn_root = info.url
return str(self.revision.number)
except pysvn.ClientError: # pylint: disable=E1101
msg = "Svn: Failed to get revision: %s" % sys.exc_info()[1]
else:
- try:
- data = Popen("env LC_ALL=C svn info %s" %
- pipes.quote(self.vcs_root), shell=True,
- stdout=PIPE).communicate()[0].split('\n')
- return [line.split(': ')[1] for line in data
- if line[:9] == 'Revision:'][-1]
- except IndexError:
- msg = "Failed to read svn info"
- self.logger.error('Ran command "svn info %s"' % self.vcs_root)
+ result = self.cmd.run(["env LC_ALL=C", "svn", "info",
+ Bcfg2.Options.setup.vcs_root],
+ shell=True)
+ if result.success:
+ self.revision = [line.split(': ')[1]
+ for line in result.stdout.splitlines()
+ if line.startswith('Revision:')][-1]
+ return self.revision
+ else:
+ msg = "Failed to read svn info: %s" % result.error
self.revision = None
raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
@@ -139,7 +126,8 @@ class Svn(Bcfg2.Server.Plugin.Version):
'''Svn.Update() => True|False\nUpdate svn working copy\n'''
try:
old_revision = self.revision.number
- self.revision = self.client.update(self.vcs_root, recurse=True)[0]
+ self.revision = self.client.update(Bcfg2.Options.setup.vcs_root,
+ recurse=True)[0]
except pysvn.ClientError: # pylint: disable=E1101
err = sys.exc_info()[1]
# try to be smart about the error we got back
@@ -161,7 +149,7 @@ class Svn(Bcfg2.Server.Plugin.Version):
self.logger.debug("repository is current")
else:
self.logger.info("Updated %s from revision %s to %s" %
- (self.vcs_root, old_revision,
+ (Bcfg2.Options.setup.vcs_root, old_revision,
self.revision.number))
return True
@@ -174,10 +162,11 @@ class Svn(Bcfg2.Server.Plugin.Version):
return False
try:
- self.revision = self.client.checkin([self.vcs_root],
+ self.revision = self.client.checkin([Bcfg2.Options.setup.vcs_root],
'Svn: autocommit',
recurse=True)
- self.revision = self.client.update(self.vcs_root, recurse=True)[0]
+ self.revision = self.client.update(Bcfg2.Options.setup.vcs_root,
+ recurse=True)[0]
self.logger.info("Svn: Commited changes. At %s" %
self.revision.number)
return True
diff --git a/src/lib/Bcfg2/Server/Plugins/TCheetah.py b/src/lib/Bcfg2/Server/Plugins/TCheetah.py
deleted file mode 100644
index f2c59ce29..000000000
--- a/src/lib/Bcfg2/Server/Plugins/TCheetah.py
+++ /dev/null
@@ -1,79 +0,0 @@
-'''This module implements a templating generator based on Cheetah'''
-
-import logging
-import sys
-import traceback
-import Bcfg2.Server.Plugin
-
-from Bcfg2.Compat import unicode, b64encode
-
-logger = logging.getLogger('Bcfg2.Plugins.TCheetah')
-
-try:
- import Cheetah.Template
- import Cheetah.Parser
-except:
- logger.error("TCheetah: Failed to import Cheetah. Is it installed?")
- raise
-
-
-class TemplateFile:
- """Template file creates Cheetah template structures for the loaded file."""
-
- def __init__(self, name, specific, encoding):
- self.name = name
- self.specific = specific
- self.encoding = encoding
- self.template = None
- self.searchlist = dict()
-
- def handle_event(self, event):
- """Handle all fs events for this template."""
- if event.code2str() == 'deleted':
- return
- try:
- s = {'useStackFrames': False}
- self.template = Cheetah.Template.Template(open(self.name).read(),
- compilerSettings=s,
- searchList=self.searchlist)
- except Cheetah.Parser.ParseError:
- perror = sys.exc_info()[1]
- logger.error("Cheetah parse error for file %s" % (self.name))
- logger.error(perror.report())
-
- def bind_entry(self, entry, metadata):
- """Build literal file information."""
- self.template.metadata = metadata
- self.searchlist['metadata'] = metadata
- self.template.path = entry.get('realname', entry.get('name'))
- self.searchlist['path'] = entry.get('realname', entry.get('name'))
- self.template.source_path = self.name
- self.searchlist['source_path'] = self.name
-
- if entry.tag == 'Path':
- entry.set('type', 'file')
- try:
- if type(self.template) == unicode:
- entry.text = self.template
- else:
- if entry.get('encoding') == 'base64':
- # take care of case where file needs base64 encoding
- entry.text = b64encode(self.template)
- else:
- entry.text = unicode(str(self.template), self.encoding)
- except:
- (a, b, c) = sys.exc_info()
- msg = traceback.format_exception(a, b, c, limit=2)[-1][:-1]
- logger.error(msg)
- logger.error("TCheetah template error for %s" % self.searchlist['path'])
- del a, b, c
- raise Bcfg2.Server.Plugin.PluginExecutionError
-
-
-class TCheetah(Bcfg2.Server.Plugin.GroupSpool):
- """The TCheetah generator implements a templating mechanism for configuration files."""
- name = 'TCheetah'
- __author__ = 'bcfg-dev@mcs.anl.gov'
- filename_pattern = 'template'
- es_child_cls = TemplateFile
- deprecated = True
diff --git a/src/lib/Bcfg2/Server/Plugins/TGenshi.py b/src/lib/Bcfg2/Server/Plugins/TGenshi.py
deleted file mode 100644
index 809587d91..000000000
--- a/src/lib/Bcfg2/Server/Plugins/TGenshi.py
+++ /dev/null
@@ -1,139 +0,0 @@
-"""This module implements a templating generator based on Genshi."""
-
-import logging
-import sys
-import Bcfg2.Server.Plugin
-
-from Bcfg2.Compat import unicode, b64encode
-
-logger = logging.getLogger('Bcfg2.Plugins.TGenshi')
-
-# try to import genshi stuff
-try:
- import genshi.core
- import genshi.input
- from genshi.template import TemplateLoader, \
- TextTemplate, MarkupTemplate, TemplateError
-except ImportError:
- logger.error("TGenshi: Failed to import Genshi. Is it installed?")
- raise
-try:
- from genshi.template import NewTextTemplate
- have_ntt = True
-except:
- have_ntt = False
-
-def removecomment(stream):
- """A genshi filter that removes comments from the stream."""
- for kind, data, pos in stream:
- if kind is genshi.core.COMMENT:
- continue
- yield kind, data, pos
-
-
-class TemplateFile(object):
- """Template file creates Genshi template structures for the loaded file."""
-
- def __init__(self, name, specific, encoding):
- self.name = name
- self.specific = specific
- self.encoding = encoding
- if self.specific.all:
- matchname = self.name
- elif self.specific.group:
- matchname = self.name[:self.name.find('.G')]
- else:
- matchname = self.name[:self.name.find('.H')]
- if matchname.endswith('.txt'):
- self.template_cls = TextTemplate
- elif matchname.endswith('.newtxt'):
- if not have_ntt:
- logger.error("Genshi NewTextTemplates not supported by this version of Genshi")
- else:
- self.template_cls = NewTextTemplate
- else:
- self.template_cls = MarkupTemplate
- self.HandleEvent = self.handle_event
-
- def handle_event(self, event=None):
- """Handle all fs events for this template."""
- if event and event.code2str() == 'deleted':
- return
- try:
- loader = TemplateLoader()
- try:
- self.template = loader.load(self.name, cls=self.template_cls,
- encoding=self.encoding)
- except LookupError:
- lerror = sys.exc_info()[1]
- logger.error('Genshi lookup error: %s' % lerror)
- except TemplateError:
- terror = sys.exc_info()[1]
- logger.error('Genshi template error: %s' % terror)
- except genshi.input.ParseError:
- perror = sys.exc_info()[1]
- logger.error('Genshi parse error: %s' % perror)
-
- def bind_entry(self, entry, metadata):
- """Build literal file information."""
- fname = entry.get('realname', entry.get('name'))
- if entry.tag == 'Path':
- entry.set('type', 'file')
- try:
- stream = self.template.generate( \
- name=fname, metadata=metadata,
- path=self.name).filter(removecomment)
- if have_ntt:
- ttypes = [TextTemplate, NewTextTemplate]
- else:
- ttypes = [TextTemplate]
- if True in [isinstance(self.template, t) for t in ttypes]:
- try:
- textdata = stream.render('text', strip_whitespace=False)
- except TypeError:
- textdata = stream.render('text')
- if type(textdata) == unicode:
- entry.text = textdata
- else:
- if entry.get('encoding') == 'base64':
- # take care of case where file needs base64 encoding
- entry.text = b64encode(textdata)
- else:
- entry.text = unicode(textdata, self.encoding)
- else:
- try:
- xmldata = stream.render('xml', strip_whitespace=False)
- except TypeError:
- xmldata = stream.render('xml')
- if type(xmldata) == unicode:
- entry.text = xmldata
- else:
- entry.text = unicode(xmldata, self.encoding)
- if entry.text == '':
- entry.set('empty', 'true')
- except TemplateError:
- err = sys.exc_info()[1]
- logger.exception('Genshi template error')
- raise Bcfg2.Server.Plugin.PluginExecutionError('Genshi template error: %s' % err)
- except AttributeError:
- err = sys.exc_info()[1]
- logger.exception('Genshi template loading error')
- raise Bcfg2.Server.Plugin.PluginExecutionError('Genshi template loading error: %s' % err)
-
-
-class TemplateEntrySet(Bcfg2.Server.Plugin.EntrySet):
- basename_is_regex = True
-
-
-class TGenshi(Bcfg2.Server.Plugin.GroupSpool):
- """
- The TGenshi generator implements a templating
- mechanism for configuration files.
-
- """
- name = 'TGenshi'
- __author__ = 'jeff@ocjtech.us'
- filename_pattern = 'template\.(txt|newtxt|xml)'
- es_cls = TemplateEntrySet
- es_child_cls = TemplateFile
- deprecated = True
diff --git a/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py b/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py
index db7370f01..cec2de297 100644
--- a/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py
+++ b/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py
@@ -3,11 +3,10 @@
import re
import imp
import sys
-import logging
-import Bcfg2.Server.Lint
-import Bcfg2.Server.Plugin
-
-LOGGER = logging.getLogger(__name__)
+import lxml.etree
+from Bcfg2.Server.Plugin import Plugin, Connector, DirectoryBacked, \
+ TemplateDataProvider, DefaultTemplateDataProvider
+from Bcfg2.Logger import Debuggable
MODULE_RE = re.compile(r'(?P<filename>(?P<module>[^\/]+)\.py)$')
@@ -20,12 +19,12 @@ def safe_module_name(module):
return '__TemplateHelper_%s' % module
-class HelperModule(object):
+class HelperModule(Debuggable):
""" Representation of a TemplateHelper module """
- def __init__(self, name, fam=None):
+ def __init__(self, name):
+ Debuggable.__init__(self)
self.name = name
- self.fam = fam
#: The name of the module as used by get_additional_data().
#: the name of the file with .py stripped off.
@@ -34,6 +33,14 @@ class HelperModule(object):
#: The attributes exported by this module
self._attrs = []
+ #: The attributes added to the template namespace by this module
+ self.defaults = []
+
+ default_prov = DefaultTemplateDataProvider()
+ self.reserved_defaults = default_prov.get_template_data(
+ lxml.etree.Element("Path", name="/dummy"),
+ None, None).keys() + ["path"]
+
def HandleEvent(self, event=None):
""" HandleEvent is called whenever the FAM registers an event.
@@ -48,121 +55,81 @@ class HelperModule(object):
module = imp.load_source(safe_module_name(self._module_name),
self.name)
except: # pylint: disable=W0702
+ # this needs to be a blanket except because the
+ # imp.load_source() call can raise literally any error,
+ # since it imports the module and just passes through any
+ # exceptions raised.
err = sys.exc_info()[1]
- LOGGER.error("TemplateHelper: Failed to import %s: %s" %
- (self.name, err))
+ self.logger.error("TemplateHelper: Failed to import %s: %s" %
+ (self.name, err))
return
if not hasattr(module, "__export__"):
- LOGGER.error("TemplateHelper: %s has no __export__ list" %
- self.name)
+ self.logger.error("TemplateHelper: %s has no __export__ list" %
+ self.name)
return
newattrs = []
- for sym in module.__export__:
+ for sym in module.__export__ + getattr(module, "__default__", []):
+ if sym in newattrs:
+ # already added to attribute list
+ continue
if sym not in self._attrs and hasattr(self, sym):
- LOGGER.warning("TemplateHelper: %s: %s is a reserved keyword, "
- "skipping export" % (self.name, sym))
+ self.logger.warning(
+ "TemplateHelper: %s: %s is a reserved keyword, "
+ "skipping export" % (self.name, sym))
continue
try:
setattr(self, sym, getattr(module, sym))
newattrs.append(sym)
except AttributeError:
- LOGGER.warning("TemplateHelper: %s exports %s, but has no "
- "such attribute" % (self.name, sym))
+ self.logger.warning(
+ "TemplateHelper: %s exports %s, but has no such attribute"
+ % (self.name, sym))
+
# remove old exports
for sym in set(self._attrs) - set(newattrs):
delattr(self, sym)
self._attrs = newattrs
+ self.defaults = []
+ for sym in getattr(module, "__default__", []):
+ if sym in self.reserved_defaults:
+ self.logger.warning(
+ "TemplateHelper: %s: %s is a reserved keyword, not adding "
+ "as default" % (self.name, sym))
+ self.defaults.append(sym)
+
-class TemplateHelper(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Connector,
- Bcfg2.Server.Plugin.DirectoryBacked):
+class TemplateHelper(Plugin, Connector, DirectoryBacked, TemplateDataProvider):
""" A plugin to provide helper classes and functions to templates """
__author__ = 'chris.a.st.pierre@gmail.com'
ignore = re.compile(r'^(\.#.*|.*~|\..*\.(sw[px])|.*\.py[co])$')
patterns = MODULE_RE
__child__ = HelperModule
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Connector.__init__(self)
- Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data, core.fam)
+ def __init__(self, core):
+ Plugin.__init__(self, core)
+ Connector.__init__(self)
+ DirectoryBacked.__init__(self, self.data)
+ TemplateDataProvider.__init__(self)
def get_additional_data(self, _):
return dict([(h._module_name, h) # pylint: disable=W0212
for h in self.entries.values()])
-
-class TemplateHelperLint(Bcfg2.Server.Lint.ServerPlugin):
- """ ``bcfg2-lint`` plugin to ensure that all :ref:`TemplateHelper
- <server-plugins-connectors-templatehelper>` modules are valid.
- This can check for:
-
- * A TemplateHelper module that cannot be imported due to syntax or
- other compile-time errors;
- * A TemplateHelper module that does not have an ``__export__``
- attribute, or whose ``__export__`` is not a list;
- * Bogus symbols listed in ``__export__``, including symbols that
- don't exist, that are reserved, or that start with underscores.
- """
-
- def __init__(self, *args, **kwargs):
- Bcfg2.Server.Lint.ServerPlugin.__init__(self, *args, **kwargs)
- self.reserved_keywords = dir(HelperModule("foo.py"))
-
- def Run(self):
- for helper in self.core.plugins['TemplateHelper'].entries.values():
- if self.HandlesFile(helper.name):
- self.check_helper(helper.name)
-
- def check_helper(self, helper):
- """ Check a single helper module.
-
- :param helper: The filename of the helper module
- :type helper: string
- """
- module_name = MODULE_RE.search(helper).group(1)
-
- try:
- module = imp.load_source(safe_module_name(module_name), helper)
- except: # pylint: disable=W0702
- err = sys.exc_info()[1]
- self.LintError("templatehelper-import-error",
- "Failed to import %s: %s" %
- (helper, err))
- return
-
- if not hasattr(module, "__export__"):
- self.LintError("templatehelper-no-export",
- "%s has no __export__ list" % helper)
- return
- elif not isinstance(module.__export__, list):
- self.LintError("templatehelper-nonlist-export",
- "__export__ is not a list in %s" % helper)
- return
-
- for sym in module.__export__:
- if not hasattr(module, sym):
- self.LintError("templatehelper-nonexistent-export",
- "%s: exported symbol %s does not exist" %
- (helper, sym))
- elif sym in self.reserved_keywords:
- self.LintError("templatehelper-reserved-export",
- "%s: exported symbol %s is reserved" %
- (helper, sym))
- elif sym.startswith("_"):
- self.LintError("templatehelper-underscore-export",
- "%s: exported symbol %s starts with underscore"
- % (helper, sym))
-
- @classmethod
- def Errors(cls):
- return {"templatehelper-import-error": "error",
- "templatehelper-no-export": "error",
- "templatehelper-nonlist-export": "error",
- "templatehelper-nonexistent-export": "error",
- "templatehelper-reserved-export": "error",
- "templatehelper-underscore-export": "warning"}
+ def get_template_data(self, *_):
+ rv = dict()
+ source = dict()
+ for helper in self.entries.values():
+ for key in helper.defaults:
+ if key not in rv:
+ rv[key] = getattr(helper, key)
+ source[key] = helper
+ else:
+ self.logger.warning(
+ "TemplateHelper: Duplicate default variable %s "
+ "provided by both %s and %s" %
+ (key, helper.name, source[key].name))
+ return rv
diff --git a/src/lib/Bcfg2/Server/Plugins/Trigger.py b/src/lib/Bcfg2/Server/Plugins/Trigger.py
index f7c82fdb3..12672de7d 100644
--- a/src/lib/Bcfg2/Server/Plugins/Trigger.py
+++ b/src/lib/Bcfg2/Server/Plugins/Trigger.py
@@ -3,18 +3,14 @@
import os
import pipes
import Bcfg2.Server.Plugin
-from subprocess import Popen, PIPE
+from Bcfg2.Utils import Executor
class TriggerFile(Bcfg2.Server.Plugin.FileBacked):
""" Representation of a trigger script file """
-
def HandleEvent(self, event=None):
return
- def __str__(self):
- return "%s: %s" % (self.__class__.__name__, self.name)
-
class Trigger(Bcfg2.Server.Plugin.Plugin,
Bcfg2.Server.Plugin.ClientRunHooks,
@@ -22,11 +18,11 @@ class Trigger(Bcfg2.Server.Plugin.Plugin,
"""Trigger is a plugin that calls external scripts (on the server)."""
__author__ = 'bcfg-dev@mcs.anl.gov'
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ def __init__(self, core):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core)
Bcfg2.Server.Plugin.ClientRunHooks.__init__(self)
- Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data,
- self.core.fam)
+ Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data)
+ self.cmd = Executor()
def async_run(self, args):
""" Run the trigger script asynchronously in a forked process
@@ -39,14 +35,12 @@ class Trigger(Bcfg2.Server.Plugin.Plugin,
if not dpid:
self.debug_log("Running %s" % " ".join(pipes.quote(a)
for a in args))
- proc = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
- err = proc.communicate()[1]
- rv = proc.wait()
- if rv != 0:
- self.logger.error("Trigger: Error running %s (%s): %s" %
- (args[0], rv, err))
- elif err:
- self.debug_log("Trigger: Error: %s" % err)
+ result = self.cmd.run(args)
+ if not result.success:
+ self.logger.error("Trigger: Error running %s: %s" %
+ (args[0], result.error))
+ elif result.stderr:
+ self.debug_log("Trigger: Error: %s" % result.stderr)
os._exit(0) # pylint: disable=W0212
def end_client_run(self, metadata):
diff --git a/src/lib/Bcfg2/Server/Plugins/__init__.py b/src/lib/Bcfg2/Server/Plugins/__init__.py
index ad51cf368..fdb20ed0a 100644
--- a/src/lib/Bcfg2/Server/Plugins/__init__.py
+++ b/src/lib/Bcfg2/Server/Plugins/__init__.py
@@ -1,5 +1 @@
"""Imports for Bcfg2.Server.Plugins."""
-
-from Bcfg2.Compat import walk_packages
-
-__all__ = [m[1] for m in walk_packages(path=__path__)]
diff --git a/src/lib/Bcfg2/Server/Reports/reports/models.py b/src/lib/Bcfg2/Server/Reports/reports/models.py
index c43c3cee7..ac4c8eac4 100644
--- a/src/lib/Bcfg2/Server/Reports/reports/models.py
+++ b/src/lib/Bcfg2/Server/Reports/reports/models.py
@@ -53,7 +53,7 @@ class ClientManager(models.Manager):
yet been expired as of optional timestmamp argument. Timestamp
should be a datetime object."""
- if timestamp == None:
+ if timestamp is None:
timestamp = datetime.now()
elif not isinstance(timestamp, datetime):
raise ValueError('Expected a datetime object')
@@ -64,8 +64,9 @@ class ClientManager(models.Manager):
except ValueError:
return self.none()
- return self.filter(Q(expiration__gt=timestamp) | Q(expiration__isnull=True),
- creation__lt=timestamp)
+ return self.filter(
+ Q(expiration__gt=timestamp) | Q(expiration__isnull=True),
+ creation__lt=timestamp)
class Client(models.Model):
@@ -101,7 +102,8 @@ class InteractiveManager(models.Manager):
if maxdate and not isinstance(maxdate, datetime):
raise ValueError('Expected a datetime object')
- return self.filter(id__in=self.get_interaction_per_client_ids(maxdate, active_only))
+ return self.filter(
+ id__in=self.get_interaction_per_client_ids(maxdate, active_only))
def get_interaction_per_client_ids(self, maxdate=None, active_only=True):
"""
@@ -116,15 +118,17 @@ class InteractiveManager(models.Manager):
cursor = connection.cursor()
cfilter = "expiration is null"
- sql = 'select reports_interaction.id, x.client_id from (select client_id, MAX(timestamp) ' + \
- 'as timer from reports_interaction'
+ sql = 'select reports_interaction.id, x.client_id ' + \
+ 'from (select client_id, MAX(timestamp) ' + \
+ 'as timer from reports_interaction'
if maxdate:
if not isinstance(maxdate, datetime):
raise ValueError('Expected a datetime object')
sql = sql + " where timestamp <= '%s' " % maxdate
cfilter = "(expiration is null or expiration > '%s') and creation <= '%s'" % (maxdate, maxdate)
sql = sql + ' GROUP BY client_id) x, reports_interaction where ' + \
- 'reports_interaction.client_id = x.client_id AND reports_interaction.timestamp = x.timer'
+ 'reports_interaction.client_id = x.client_id AND ' + \
+ 'reports_interaction.timestamp = x.timer'
if active_only:
sql = sql + " and x.client_id in (select id from reports_client where %s)" % \
cfilter
@@ -138,14 +142,16 @@ class InteractiveManager(models.Manager):
class Interaction(models.Model):
- """Models each reconfiguration operation interaction between client and server."""
+ """Models each reconfiguration operation
+ interaction between client and server."""
client = models.ForeignKey(Client, related_name="interactions")
- timestamp = models.DateTimeField(db_index=True) # Timestamp for this record
+ timestamp = models.DateTimeField(db_index=True) # record timestamp
state = models.CharField(max_length=32) # good/bad/modified/etc
- repo_rev_code = models.CharField(max_length=64) # repo revision at time of interaction
+ # repository revision at the time of the latest interaction
+ repo_rev_code = models.CharField(max_length=64)
goodcount = models.IntegerField() # of good config-items
totalcount = models.IntegerField() # of total config-items
- server = models.CharField(max_length=256) # Name of the server used for the interaction
+ server = models.CharField(max_length=256) # server used for interaction
bad_entries = models.IntegerField(default=-1)
modified_entries = models.IntegerField(default=-1)
extra_entries = models.IntegerField(default=-1)
@@ -391,5 +397,3 @@ class InteractionMetadata(models.Model):
profile = models.ForeignKey(Group, related_name="+")
groups = models.ManyToManyField(Group)
bundles = models.ManyToManyField(Bundle)
-
-
diff --git a/src/lib/Bcfg2/Server/Reports/updatefix.py b/src/lib/Bcfg2/Server/Reports/updatefix.py
index cb131c29d..91c370994 100644
--- a/src/lib/Bcfg2/Server/Reports/updatefix.py
+++ b/src/lib/Bcfg2/Server/Reports/updatefix.py
@@ -1,5 +1,4 @@
-import Bcfg2.settings
-
+import Bcfg2.DBSettings
from django.db import connection
import django.core.management
import sys
@@ -16,9 +15,9 @@ def _merge_database_table_entries():
find_cursor = connection.cursor()
cursor.execute("""
Select name, kind from reports_bad
- union
+ union
select name, kind from reports_modified
- union
+ union
select name, kind from reports_extra
""")
# this fetch could be better done
@@ -43,20 +42,26 @@ def _merge_database_table_entries():
if entries_map.get(key, None):
entry_id = entries_map[key]
else:
- find_cursor.execute("Select id from reports_entries where name=%s and kind=%s", key)
+ find_cursor.execute("Select id from reports_entries where "
+ "name=%s and kind=%s", key)
rowe = find_cursor.fetchone()
entry_id = rowe[0]
- insert_cursor.execute("insert into reports_entries_interactions \
- (entry_id, interaction_id, reason_id, type) values (%s, %s, %s, %s)", (entry_id, row[3], row[2], row[4]))
+ insert_cursor.execute("insert into reports_entries_interactions "
+ "(entry_id, interaction_id, reason_id, type) "
+ "values (%s, %s, %s, %s)",
+ (entry_id, row[3], row[2], row[4]))
def _interactions_constraint_or_idx():
'''sqlite doesn't support alter tables.. or constraints'''
cursor = connection.cursor()
try:
- cursor.execute('alter table reports_interaction add constraint reports_interaction_20100601 unique (client_id,timestamp)')
+ cursor.execute('alter table reports_interaction '
+ 'add constraint reports_interaction_20100601 '
+ 'unique (client_id,timestamp)')
except:
- cursor.execute('create unique index reports_interaction_20100601 on reports_interaction (client_id,timestamp)')
+ cursor.execute('create unique index reports_interaction_20100601 '
+ 'on reports_interaction (client_id,timestamp)')
def _populate_interaction_entry_counts():
@@ -67,13 +72,16 @@ def _populate_interaction_entry_counts():
3: 'extra_entries'}
for type in list(count_field.keys()):
- cursor.execute("select count(type), interaction_id " +
- "from reports_entries_interactions where type = %s group by interaction_id" % type)
+ cursor.execute("select count(type), interaction_id "
+ "from reports_entries_interactions "
+ "where type = %s group by interaction_id" % type)
updates = []
for row in cursor.fetchall():
updates.append(row)
try:
- cursor.executemany("update reports_interaction set " + count_field[type] + "=%s where id = %s", updates)
+ cursor.executemany("update reports_interaction set " +
+ count_field[type] +
+ "=%s where id = %s", updates)
except Exception:
e = sys.exc_info()[1]
print(e)
@@ -106,9 +114,8 @@ _fixes = [_merge_database_table_entries,
_interactions_constraint_or_idx,
'alter table reports_reason add is_binary bool NOT NULL default False;',
'alter table reports_reason add is_sensitive bool NOT NULL default False;',
- update_noop, #_remove_table_column('reports_interaction', 'client_version'),
- "alter table reports_reason add unpruned varchar(1280) not null default 'N/A';",
-]
+ update_noop, # _remove_table_column('reports_interaction', 'client_version'),
+ "alter table reports_reason add unpruned varchar(1280) not null default 'N/A';"]
# this will calculate the last possible version of the database
lastversion = len(_fixes)
@@ -127,8 +134,10 @@ def rollupdate(current_version):
else:
_fixes[i]()
except:
- logger.error("Failed to perform db update %s" % (_fixes[i]), exc_info=1)
- # since array start at 0 but version start at 1 we add 1 to the normal count
+ logger.error("Failed to perform db update %s" % (_fixes[i]),
+ exc_info=1)
+ # since array start at 0 but version start at 1
+ # we add 1 to the normal count
ret = InternalDatabaseVersion.objects.create(version=i + 1)
return ret
else:
diff --git a/src/lib/Bcfg2/Server/SSLServer.py b/src/lib/Bcfg2/Server/SSLServer.py
new file mode 100644
index 000000000..1f8febd0e
--- /dev/null
+++ b/src/lib/Bcfg2/Server/SSLServer.py
@@ -0,0 +1,464 @@
+""" Bcfg2 SSL server used by the builtin server core
+(:mod:`Bcfg2.Server.BuiltinCore`). This needs to be documented
+better. """
+
+import os
+import sys
+import socket
+import signal
+import logging
+import ssl
+import threading
+import time
+from Bcfg2.Compat import xmlrpclib, SimpleXMLRPCServer, SocketServer, \
+ b64decode
+
+
+class XMLRPCACLCheckException(Exception):
+ """ Raised when ACL checks fail on an RPC request """
+
+
+class XMLRPCDispatcher(SimpleXMLRPCServer.SimpleXMLRPCDispatcher):
+ """ An XML-RPC dispatcher. """
+
+ def __init__(self, allow_none, encoding):
+ try:
+ SimpleXMLRPCServer.SimpleXMLRPCDispatcher.__init__(self,
+ allow_none,
+ encoding)
+ except:
+ # Python 2.4?
+ SimpleXMLRPCServer.SimpleXMLRPCDispatcher.__init__(self)
+
+ self.logger = logging.getLogger(self.__class__.__name__)
+ self.allow_none = allow_none
+ self.encoding = encoding
+
+ def _marshaled_dispatch(self, address, data):
+ params, method = xmlrpclib.loads(data)
+ if not self.instance.check_acls(address, method):
+ raise XMLRPCACLCheckException
+ try:
+ if '.' not in method:
+ params = (address, ) + params
+ response = self.instance._dispatch(method, params, self.funcs)
+ # py3k compatibility
+ if type(response) not in [bool, str, list, dict, set, type(None)]:
+ response = (response.decode('utf-8'), )
+ elif type(response) == set:
+ response = (list(response), )
+ else:
+ response = (response, )
+ raw_response = xmlrpclib.dumps(response, methodresponse=True,
+ allow_none=self.allow_none,
+ encoding=self.encoding)
+ except xmlrpclib.Fault:
+ fault = sys.exc_info()[1]
+ raw_response = xmlrpclib.dumps(fault, methodresponse=True,
+ allow_none=self.allow_none,
+ encoding=self.encoding)
+ except:
+ err = sys.exc_info()
+ self.logger.error("Unexpected handler error", exc_info=1)
+ # report exception back to server
+ raw_response = xmlrpclib.dumps(
+ xmlrpclib.Fault(1, "%s:%s" % (err[0].__name__, err[1])),
+ methodresponse=True, allow_none=self.allow_none,
+ encoding=self.encoding)
+ return raw_response
+
+
+class SSLServer(SocketServer.TCPServer, object):
+ """ TCP server supporting SSL encryption. """
+ allow_reuse_address = True
+
+ def __init__(self, listen_all, server_address, RequestHandlerClass,
+ keyfile=None, certfile=None, reqCert=False, ca=None,
+ timeout=None, protocol='xmlrpc/tlsv1'):
+ """
+ :param listen_all: Listen on all interfaces
+ :type listen_all: bool
+ :param server_address: Address to bind to the server
+ :param RequestHandlerClass: Request handler used by TCP server
+ :param keyfile: Full path to SSL encryption key file
+ :type keyfile: string
+ :param certfile: Full path to SSL certificate file
+ :type certfile: string
+ :param reqCert: Require client to present certificate
+ :type reqCert: bool
+ :param ca: Full path to SSL CA that signed the key and cert
+ :type ca: string
+ :param timeout: Timeout for non-blocking request handling
+ :param protocol: The protocol to serve. Supported values are
+ ``xmlrpc/ssl`` and ``xmlrpc/tlsv1``.
+ :type protocol: string
+ """
+ # check whether or not we should listen on all interfaces
+ if listen_all:
+ listen_address = ('', server_address[1])
+ else:
+ listen_address = (server_address[0], server_address[1])
+
+ # check for IPv6 address
+ if ':' in server_address[0]:
+ self.address_family = socket.AF_INET6
+
+ self.logger = logging.getLogger(self.__class__.__name__)
+
+ try:
+ SocketServer.TCPServer.__init__(self, listen_address,
+ RequestHandlerClass)
+ except socket.gaierror:
+ e = sys.exc_info()[1]
+ self.logger.error("Failed to bind to socket: %s" % e)
+ raise
+ except socket.error:
+ self.logger.error("Failed to bind to socket")
+ raise
+
+ self.timeout = timeout
+ self.socket.settimeout(timeout)
+ self.keyfile = keyfile
+ if (keyfile is not None and
+ (keyfile is False or
+ not os.path.exists(keyfile) or
+ not os.access(keyfile, os.R_OK))):
+ msg = "Keyfile %s does not exist or is not readable" % keyfile
+ self.logger.error(msg)
+ raise Exception(msg)
+ self.certfile = certfile
+ if (certfile is not None and
+ (certfile is False or
+ not os.path.exists(certfile) or
+ not os.access(certfile, os.R_OK))):
+ msg = "Certfile %s does not exist or is not readable" % certfile
+ self.logger.error(msg)
+ raise Exception(msg)
+ self.ca = ca
+ if (ca is not None and
+ (ca is False or
+ not os.path.exists(ca) or
+ not os.access(ca, os.R_OK))):
+ msg = "CA %s does not exist or is not readable" % ca
+ self.logger.error(msg)
+ raise Exception(msg)
+ self.reqCert = reqCert
+ if ca and certfile:
+ self.mode = ssl.CERT_OPTIONAL
+ else:
+ self.mode = ssl.CERT_NONE
+ if protocol == 'xmlrpc/ssl':
+ self.ssl_protocol = ssl.PROTOCOL_SSLv23
+ elif protocol == 'xmlrpc/tlsv1':
+ self.ssl_protocol = ssl.PROTOCOL_TLSv1
+ else:
+ self.logger.error("Unknown protocol %s" % (protocol))
+ raise Exception("unknown protocol %s" % protocol)
+
+ def get_request(self):
+ (sock, sockinfo) = self.socket.accept()
+ sock.settimeout(self.timeout) # pylint: disable=E1101
+ sslsock = ssl.wrap_socket(sock,
+ server_side=True,
+ certfile=self.certfile,
+ keyfile=self.keyfile,
+ cert_reqs=self.mode,
+ ca_certs=self.ca,
+ ssl_version=self.ssl_protocol)
+ return sslsock, sockinfo
+
+ def close_request(self, request):
+ try:
+ request.unwrap()
+ except:
+ pass
+ try:
+ request.close()
+ except:
+ pass
+
+ def _get_url(self):
+ port = self.socket.getsockname()[1]
+ hostname = socket.gethostname()
+ protocol = "https"
+ return "%s://%s:%i" % (protocol, hostname, port)
+ url = property(_get_url)
+
+
+class XMLRPCRequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
+ """ XML-RPC request handler.
+
+ Adds support for HTTP authentication.
+ """
+
+ def __init__(self, *args, **kwargs):
+ self.logger = logging.getLogger(self.__class__.__name__)
+ SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.__init__(self, *args,
+ **kwargs)
+
+ def authenticate(self):
+ try:
+ header = self.headers['Authorization']
+ except KeyError:
+ self.logger.error("No authentication data presented")
+ return False
+ auth_content = b64decode(header.split()[1])
+ try:
+ # py3k compatibility
+ try:
+ username, password = auth_content.split(":")
+ except TypeError:
+ # pylint: disable=E0602
+ username, pw = auth_content.split(bytes(":", encoding='utf-8'))
+ password = pw.decode('utf-8')
+ # pylint: enable=E0602
+ except ValueError:
+ username = auth_content
+ password = ""
+ cert = self.request.getpeercert()
+ client_address = self.request.getpeername()
+ return self.server.instance.authenticate(cert, username,
+ password, client_address)
+
+ def parse_request(self):
+ """Extends parse_request.
+
+ Optionally check HTTP authentication when parsing.
+ """
+ if not SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.parse_request(self):
+ return False
+ try:
+ if not self.authenticate():
+ self.logger.error("Authentication Failure")
+ self.send_error(401, self.responses[401][0])
+ return False
+ except: # pylint: disable=W0702
+ self.logger.error("Unexpected Authentication Failure", exc_info=1)
+ self.send_error(401, self.responses[401][0])
+ return False
+ return True
+
+ def do_POST(self):
+ try:
+ max_chunk_size = 10 * 1024 * 1024
+ size_remaining = int(self.headers["content-length"])
+ L = []
+ while size_remaining:
+ chunk_size = min(size_remaining, max_chunk_size)
+ chunk = self.rfile.read(chunk_size).decode('utf-8')
+ if not chunk:
+ break
+ L.append(chunk)
+ size_remaining -= len(L[-1])
+ data = ''.join(L)
+ if data is None:
+ return # response has been sent
+
+ response = self.server._marshaled_dispatch(self.client_address,
+ data)
+ if sys.hexversion >= 0x03000000:
+ response = response.encode('utf-8')
+ except XMLRPCACLCheckException:
+ self.send_error(401, self.responses[401][0])
+ self.end_headers()
+ except: # pylint: disable=W0702
+ self.logger.error("Unexpected dispatch error for %s: %s" %
+ (self.client_address, sys.exc_info()[1]))
+ try:
+ self.send_response(500)
+ self.send_header("Content-length", "0")
+ self.end_headers()
+ except:
+ (etype, msg) = sys.exc_info()[:2]
+ self.logger.error("Error sending 500 response (%s): %s" %
+ (etype.__name__, msg))
+ raise
+ else:
+ # got a valid XML RPC response
+ client_address = self.request.getpeername()
+ try:
+ self.send_response(200)
+ self.send_header("Content-type", "text/xml")
+ self.send_header("Content-length", str(len(response)))
+ self.end_headers()
+ failcount = 0
+ while True:
+ try:
+ # If we hit SSL3_WRITE_PENDING here try to resend.
+ self.wfile.write(response)
+ break
+ except ssl.SSLError:
+ e = sys.exc_info()[1]
+ if str(e).find("SSL3_WRITE_PENDING") < 0:
+ raise
+ self.logger.error("SSL3_WRITE_PENDING")
+ failcount += 1
+ if failcount < 5:
+ continue
+ raise
+ except socket.error:
+ err = sys.exc_info()[1]
+ if isinstance(err, socket.timeout):
+ self.logger.warning("Connection timed out for %s" %
+ self.client_address[0])
+ elif err[0] == 32:
+ self.logger.warning("Connection dropped from %s" %
+ self.client_address[0])
+ elif err[0] == 104:
+ self.logger.warning("Connection reset by peer: %s" %
+ self.client_address[0])
+ else:
+ self.logger.warning("Socket error sending response to %s: "
+ "%s" % (self.client_address[0], err))
+ except ssl.SSLError:
+ err = sys.exc_info()[1]
+ self.logger.warning("SSLError handling client %s: %s" %
+ (self.client_address[0], err))
+ except:
+ etype, err = sys.exc_info()[:2]
+ self.logger.error("Unknown error sending response to %s: "
+ "%s (%s)" %
+ (self.client_address[0], err,
+ etype.__name__))
+
+ def finish(self):
+ # shut down the connection
+ try:
+ SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.finish(self)
+ except socket.error:
+ err = sys.exc_info()[1]
+ self.logger.warning("Error closing connection: %s" % err)
+
+
+class XMLRPCServer(SocketServer.ThreadingMixIn, SSLServer,
+ XMLRPCDispatcher, object):
+ """ Component XMLRPCServer. """
+
+ def __init__(self, listen_all, server_address, RequestHandlerClass=None,
+ keyfile=None, certfile=None, ca=None, protocol='xmlrpc/tlsv1',
+ timeout=10, logRequests=False,
+ register=True, allow_none=True, encoding=None):
+ """
+ :param listen_all: Listen on all interfaces
+ :type listen_all: bool
+ :param server_address: Address to bind to the server
+ :param RequestHandlerClass: request handler used by TCP server
+ :param keyfile: Full path to SSL encryption key file
+ :type keyfile: string
+ :param certfile: Full path to SSL certificate file
+ :type certfile: string
+ :param ca: Full path to SSL CA that signed the key and cert
+ :type ca: string
+ :param logRequests: Log all requests
+ :type logRequests: bool
+ :param register: Presence should be reported to service-location
+ :type register: bool
+ :param allow_none: Allow None values in XML-RPC
+ :type allow_none: bool
+ :param encoding: Encoding to use for XML-RPC
+ """
+
+ XMLRPCDispatcher.__init__(self, allow_none, encoding)
+
+ if not RequestHandlerClass:
+ # pylint: disable=E0102
+ class RequestHandlerClass(XMLRPCRequestHandler):
+ """A subclassed request handler to prevent
+ class-attribute conflicts."""
+ # pylint: enable=E0102
+
+ SSLServer.__init__(self,
+ listen_all,
+ server_address,
+ RequestHandlerClass,
+ ca=ca,
+ timeout=timeout,
+ keyfile=keyfile,
+ certfile=certfile,
+ protocol=protocol)
+ self.logRequests = logRequests
+ self.serve = False
+ self.register = register
+ self.register_introspection_functions()
+ self.register_function(self.ping)
+ self.logger.info("service available at %s" % self.url)
+ self.timeout = timeout
+
+ def _tasks_thread(self):
+ try:
+ while self.serve:
+ try:
+ if self.instance and hasattr(self.instance, 'do_tasks'):
+ self.instance.do_tasks()
+ except:
+ self.logger.error("Unexpected task failure", exc_info=1)
+ time.sleep(self.timeout)
+ except:
+ self.logger.error("tasks_thread failed", exc_info=1)
+
+ def server_close(self):
+ SSLServer.server_close(self)
+ self.logger.info("server_close()")
+
+ def _get_require_auth(self):
+ return getattr(self.RequestHandlerClass, "require_auth", False)
+
+ def _set_require_auth(self, value):
+ self.RequestHandlerClass.require_auth = value
+ require_auth = property(_get_require_auth, _set_require_auth)
+
+ def _get_credentials(self):
+ try:
+ return self.RequestHandlerClass.credentials
+ except AttributeError:
+ return dict()
+
+ def _set_credentials(self, value):
+ self.RequestHandlerClass.credentials = value
+ credentials = property(_get_credentials, _set_credentials)
+
+ def register_instance(self, instance, *args, **kwargs):
+ XMLRPCDispatcher.register_instance(self, instance, *args, **kwargs)
+ try:
+ name = instance.name
+ except AttributeError:
+ name = "unknown"
+ if hasattr(instance, '_get_rmi'):
+ for fname, func in instance._get_rmi().items():
+ self.register_function(func, name=fname)
+ self.logger.info("serving %s at %s" % (name, self.url))
+
+ def serve_forever(self):
+ """Serve single requests until (self.serve == False)."""
+ self.serve = True
+ self.task_thread = \
+ threading.Thread(name="%sThread" % self.__class__.__name__,
+ target=self._tasks_thread)
+ self.task_thread.start()
+ self.logger.info("serve_forever() [start]")
+ signal.signal(signal.SIGINT, self._handle_shutdown_signal)
+ signal.signal(signal.SIGTERM, self._handle_shutdown_signal)
+
+ try:
+ while self.serve:
+ try:
+ self.handle_request()
+ except socket.timeout:
+ pass
+ except:
+ self.logger.error("Got unexpected error in handle_request",
+ exc_info=1)
+ finally:
+ self.logger.info("serve_forever() [stop]")
+
+ def shutdown(self):
+ """Signal that automatic service should stop."""
+ self.serve = False
+
+ def _handle_shutdown_signal(self, *_):
+ self.shutdown()
+
+ def ping(self, *args):
+ """Echo response."""
+ self.logger.info("ping(%s)" % (", ".join([repr(arg) for arg in args])))
+ return args
diff --git a/src/lib/Bcfg2/Server/Snapshots/__init__.py b/src/lib/Bcfg2/Server/Snapshots/__init__.py
deleted file mode 100644
index d42aa0525..000000000
--- a/src/lib/Bcfg2/Server/Snapshots/__init__.py
+++ /dev/null
@@ -1,31 +0,0 @@
-__all__ = ['models', 'db_from_config', 'setup_session']
-
-import sqlalchemy
-import sqlalchemy.orm
-# Compatibility import
-from Bcfg2.Compat import ConfigParser
-
-
-def db_from_config(cfile):
- cp = ConfigParser.ConfigParser()
- cp.read([cfile])
- driver = cp.get('snapshots', 'driver')
- if driver == 'sqlite':
- path = cp.get('snapshots', 'database')
- return 'sqlite:///%s' % path
- elif driver in ['mysql', 'postgres']:
- user = cp.get('snapshots', 'user')
- password = cp.get('snapshots', 'password')
- host = cp.get('snapshots', 'host')
- db = cp.get('snapshots', 'database')
- return '%s://%s:%s@%s/%s' % (driver, user, password, host, db)
- else:
- raise Exception("unsupported db driver %s" % driver)
-
-
-def setup_session(cfile, debug=False):
- engine = sqlalchemy.create_engine(db_from_config(cfile),
- echo=debug)
- Session = sqlalchemy.orm.sessionmaker()
- Session.configure(bind=engine)
- return Session()
diff --git a/src/lib/Bcfg2/Server/Snapshots/model.py b/src/lib/Bcfg2/Server/Snapshots/model.py
deleted file mode 100644
index d578cd2c0..000000000
--- a/src/lib/Bcfg2/Server/Snapshots/model.py
+++ /dev/null
@@ -1,323 +0,0 @@
-import sys
-from sqlalchemy import Table, Column, Integer, Unicode, ForeignKey, Boolean, \
- DateTime, UnicodeText, desc
-import datetime
-import sqlalchemy.exceptions
-from sqlalchemy.orm import relation, backref
-from sqlalchemy.ext.declarative import declarative_base
-
-from Bcfg2.Compat import u_str
-
-
-class Uniquer(object):
- force_rt = True
-
- @classmethod
- def by_value(cls, session, **kwargs):
- if cls.force_rt:
- try:
- return session.query(cls).filter_by(**kwargs).one()
- except sqlalchemy.exceptions.InvalidRequestError:
- return cls(**kwargs)
- else:
- return cls(**kwargs)
-
- @classmethod
- def from_record(cls, session, data):
- return cls.by_value(session, **data)
-
-Base = declarative_base()
-
-
-class Administrator(Uniquer, Base):
- __tablename__ = 'administrator'
- id = Column(Integer, primary_key=True)
- name = Column(Unicode(20), unique=True)
- email = Column(Unicode(64))
-
-admin_client = Table('admin_client', Base.metadata,
- Column('admin_id',
- Integer,
- ForeignKey('administrator.id')),
- Column('client_id',
- Integer,
- ForeignKey('client.id')))
-
-admin_group = Table('admin_group', Base.metadata,
- Column('admin_id',
- Integer,
- ForeignKey('administrator.id')),
- Column('group_id',
- Integer,
- ForeignKey('group.id')))
-
-
-class Client(Uniquer, Base):
- __tablename__ = 'client'
- id = Column(Integer, primary_key=True)
- name = Column(Unicode(64), unique=True)
- admins = relation("Administrator", secondary=admin_client,
- backref='clients')
- active = Column(Boolean, default=True)
- online = Column(Boolean, default=True)
- online_ts = Column(DateTime)
-
-
-class Group(Uniquer, Base):
- __tablename__ = 'group'
- id = Column(Integer, primary_key=True)
- name = Column(Unicode(32), unique=True)
- admins = relation("Administrator", secondary=admin_group,
- backref='groups')
-
-
-class ConnectorKeyVal(Uniquer, Base):
- __tablename__ = 'connkeyval'
- id = Column(Integer, primary_key=True)
- connector = Column(Unicode(16))
- key = Column(Unicode(32))
- value = Column(UnicodeText)
-
-meta_group = Table('meta_group', Base.metadata,
- Column('metadata_id',
- Integer,
- ForeignKey('metadata.id')),
- Column('group_id',
- Integer,
- ForeignKey('group.id')))
-
-meta_conn = Table('meta_conn', Base.metadata,
- Column('metadata_id',
- Integer,
- ForeignKey('metadata.id')),
- Column('connkeyval_id',
- Integer,
- ForeignKey('connkeyval.id')))
-
-
-class Metadata(Base):
- __tablename__ = 'metadata'
- id = Column(Integer, primary_key=True)
- client_id = Column(Integer, ForeignKey('client.id'))
- client = relation(Client)
- groups = relation("Group", secondary=meta_group)
- keyvals = relation(ConnectorKeyVal, secondary=meta_conn)
- timestamp = Column(DateTime)
-
- @classmethod
- def from_metadata(cls, mysession, mymetadata):
- client = Client.by_value(mysession, name=u_str(mymetadata.hostname))
- m = cls(client=client)
- for group in mymetadata.groups:
- m.groups.append(Group.by_value(mysession, name=u_str(group)))
- for connector in mymetadata.connectors:
- data = getattr(mymetadata, connector)
- if not isinstance(data, dict):
- continue
- for key, value in list(data.items()):
- if not isinstance(value, str):
- continue
- m.keyvals.append(ConnectorKeyVal.by_value(mysession,
- connector=u_str(connector),
- key=u_str(key),
- value=u_str(value)))
- return m
-
-
-class Package(Base, Uniquer):
- __tablename__ = 'package'
- id = Column(Integer, primary_key=True)
- name = Column(Unicode(24))
- type = Column(Unicode(16))
- version = Column(Unicode(16))
- verification_status = Column(Boolean)
-
-
-class CorrespondenceType(object):
- mtype = Package
-
- @classmethod
- def from_record(cls, mysession, record):
- (mod, corr, name, s_dict, e_dict) = record
- if not s_dict:
- start = None
- else:
- start = cls.mtype.by_value(mysession, name=name, **s_dict)
- if s_dict != e_dict:
- end = cls.mtype.by_value(mysession, name=name, **e_dict)
- else:
- end = start
- return cls(start=start, end=end, modified=mod, correct=corr)
-
-
-class PackageCorrespondence(Base, CorrespondenceType):
- mtype = Package
- __tablename__ = 'package_pair'
- id = Column(Integer, primary_key=True)
- start_id = Column(Integer, ForeignKey('package.id'))
- start = relation(Package, primaryjoin=start_id == Package.id)
- end_id = Column(Integer, ForeignKey('package.id'), nullable=True)
- end = relation(Package, primaryjoin=end_id == Package.id)
- modified = Column(Boolean)
- correct = Column(Boolean)
-
-package_snap = Table('package_snap', Base.metadata,
- Column('ppair_id',
- Integer,
- ForeignKey('package_pair.id')),
- Column('snapshot_id',
- Integer,
- ForeignKey('snapshot.id')))
-
-
-class Service(Base, Uniquer):
- __tablename__ = 'service'
- id = Column(Integer, primary_key=True)
- name = Column(Unicode(16))
- type = Column(Unicode(12))
- status = Column(Boolean)
-
-
-class ServiceCorrespondence(Base, CorrespondenceType):
- mtype = Service
- __tablename__ = 'service_pair'
- id = Column(Integer, primary_key=True)
- start_id = Column(Integer, ForeignKey('service.id'))
- start = relation(Service, primaryjoin=start_id == Service.id)
- end_id = Column(Integer, ForeignKey('service.id'), nullable=True)
- end = relation(Service, primaryjoin=end_id == Service.id)
- modified = Column(Boolean)
- correct = Column(Boolean)
-
-service_snap = Table('service_snap', Base.metadata,
- Column('spair_id',
- Integer,
- ForeignKey('service_pair.id')),
- Column('snapshot_id',
- Integer,
- ForeignKey('snapshot.id')))
-
-
-class File(Base, Uniquer):
- __tablename__ = 'file'
- id = Column(Integer, primary_key=True)
- name = Column(UnicodeText)
- type = Column(Unicode(12))
- owner = Column(Unicode(12))
- group = Column(Unicode(16))
- perms = Column(Integer)
- contents = Column(UnicodeText)
-
-
-class FileCorrespondence(Base, CorrespondenceType):
- mtype = File
- __tablename__ = 'file_pair'
- id = Column(Integer, primary_key=True)
- start_id = Column(Integer, ForeignKey('file.id'))
- start = relation(File, primaryjoin=start_id == File.id)
- end_id = Column(Integer, ForeignKey('file.id'), nullable=True)
- end = relation(File, primaryjoin=end_id == File.id)
- modified = Column(Boolean)
- correct = Column(Boolean)
-
-file_snap = Table('file_snap', Base.metadata,
- Column('fpair_id',
- Integer,
- ForeignKey('file_pair.id')),
- Column('snapshot_id',
- Integer,
- ForeignKey('snapshot.id')))
-
-extra_pkg_snap = Table('extra_pkg_snap', Base.metadata,
- Column('package_id',
- Integer,
- ForeignKey('package.id')),
- Column('snapshot_id',
- Integer,
- ForeignKey('snapshot.id')))
-
-extra_file_snap = Table('extra_file_snap', Base.metadata,
- Column('file_id',
- Integer,
- ForeignKey('file.id')),
- Column('snapshot_id',
- Integer,
- ForeignKey('snapshot.id')))
-
-extra_service_snap = Table('extra_service_snap', Base.metadata,
- Column('service_id',
- Integer,
- ForeignKey('service.id')),
- Column('snapshot_id',
- Integer,
- ForeignKey('snapshot.id')))
-
-
-class Action(Base):
- __tablename__ = 'action'
- id = Column(Integer, primary_key=True)
- command = Column(UnicodeText)
- return_code = Column(Integer)
- output = Column(UnicodeText)
-
-action_snap = Table('action_snap', Base.metadata,
- Column('action_id', Integer, ForeignKey('action.id')),
- Column('snapshot_id', Integer, ForeignKey('snapshot.id')))
-
-
-class Snapshot(Base):
- __tablename__ = 'snapshot'
- id = Column(Integer, primary_key=True)
- correct = Column(Boolean)
- revision = Column(Unicode(36))
- metadata_id = Column(Integer, ForeignKey('metadata.id'))
- client_metadata = relation(Metadata, primaryjoin=metadata_id == Metadata.id)
- timestamp = Column(DateTime, default=datetime.datetime.now)
- client_id = Column(Integer, ForeignKey('client.id'))
- client = relation(Client, backref=backref('snapshots'))
- packages = relation(PackageCorrespondence, secondary=package_snap)
- services = relation(ServiceCorrespondence, secondary=service_snap)
- files = relation(FileCorrespondence, secondary=file_snap)
- actions = relation(Action, secondary=action_snap)
- extra_packages = relation(Package, secondary=extra_pkg_snap)
- extra_services = relation(Service, secondary=extra_service_snap)
- extra_files = relation(File, secondary=extra_file_snap)
-
- c_dispatch = dict([('Package', ('packages', PackageCorrespondence)),
- ('Service', ('services', ServiceCorrespondence)),
- ('Path', ('files', FileCorrespondence))])
- e_dispatch = dict([('Package', ('extra_packages', Package)),
- ('Service', ('extra_services', Service)),
- ('Path', ('extra_files', File))])
-
- @classmethod
- def from_data(cls, session, correct, revision, metadata, entries, extra):
- dbm = Metadata.from_metadata(session, metadata)
- snap = cls(correct=correct, client_metadata=dbm, revision=revision,
- timestamp=datetime.datetime.now(), client=dbm.client)
- for (dispatch, data) in [(cls.c_dispatch, entries),
- (cls.e_dispatch, extra)]:
- for key in dispatch:
- dest, ecls = dispatch[key]
- for edata in list(data[key].values()):
- getattr(snap, dest).append(ecls.from_record(session, edata))
- return snap
-
- @classmethod
- def by_client(cls, session, clientname):
- return session.query(cls).join(cls.client_metadata,
- Metadata.client).filter(Client.name == clientname)
-
- @classmethod
- def get_current(cls, session, clientname):
- return session.query(Snapshot).join(Snapshot.client_metadata,
- Metadata.client).filter(Client.name == clientname).order_by(desc(Snapshot.timestamp)).first()
-
- @classmethod
- def get_by_date(cls, session, clientname, timestamp):
- return session.query(Snapshot)\
- .join(Snapshot.client_metadata, Metadata.client)\
- .filter(Snapshot.timestamp < timestamp)\
- .filter(Client.name == clientname)\
- .order_by(desc(Snapshot.timestamp))\
- .first()
diff --git a/src/lib/Bcfg2/Server/Statistics.py b/src/lib/Bcfg2/Server/Statistics.py
new file mode 100644
index 000000000..e34135d4b
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Statistics.py
@@ -0,0 +1,124 @@
+""" Module for tracking execution time statistics from the Bcfg2
+server core. This data is exposed by
+:func:`Bcfg2.Server.Core.BaseCore.get_statistics`."""
+
+import time
+from Bcfg2.Compat import wraps
+
+
+class Statistic(object):
+ """ A single named statistic, tracking minimum, maximum, and
+ average execution time, and number of invocations. """
+
+ def __init__(self, name, initial_value):
+ """
+ :param name: The name of this statistic
+ :type name: string
+ :param initial_value: The initial value to be added to this
+ statistic
+ :type initial_value: int or float
+ """
+ self.name = name
+ self.min = float(initial_value)
+ self.max = float(initial_value)
+ self.ave = float(initial_value)
+ self.count = 1
+
+ def add_value(self, value):
+ """ Add a value to the statistic, recalculating the various
+ metrics.
+
+ :param value: The value to add to this statistic
+ :type value: int or float
+ """
+ self.min = min(self.min, float(value))
+ self.max = max(self.max, float(value))
+ self.count += 1
+ self.ave = (((self.ave * (self.count - 1)) + value) / self.count)
+
+ def get_value(self):
+ """ Get a tuple of all the stats tracked on this named item.
+ The tuple is in the format::
+
+ (<name>, (min, max, average, number of values))
+
+ This makes it very easy to cast to a dict in
+ :func:`Statistics.display`.
+
+ :returns: tuple
+ """
+ return (self.name, (self.min, self.max, self.ave, self.count))
+
+ def __repr__(self):
+ return "%s(%s, (min=%s, avg=%s, max=%s, count=%s))" % (
+ self.__class__.__name__,
+ self.name, self.min, self.ave, self.max, self.count)
+
+
+class Statistics(object):
+ """ A collection of named :class:`Statistic` objects. """
+
+ def __init__(self):
+ self.data = dict()
+
+ def add_value(self, name, value):
+ """ Add a value to the named :class:`Statistic`. This just
+ proxies to :func:`Statistic.add_value` or the
+ :class:`Statistic` constructor as appropriate.
+
+ :param name: The name of the :class:`Statistic` to add the
+ value to
+ :type name: string
+ :param value: The value to add to the Statistic
+ :type value: int or float
+ """
+ if name not in self.data:
+ self.data[name] = Statistic(name, value)
+ else:
+ self.data[name].add_value(value)
+
+ def display(self):
+ """ Return a dict of all :class:`Statistic` object values.
+ Keys are the statistic names, and values are tuples of the
+ statistic metrics as returned by
+ :func:`Statistic.get_value`. """
+ return dict([value.get_value() for value in list(self.data.values())])
+
+
+#: A module-level :class:`Statistics` objects used to track all
+#: execution time metrics for the server.
+stats = Statistics() # pylint: disable=C0103
+
+
+class track_statistics(object): # pylint: disable=C0103
+ """ Decorator that tracks execution time for the given method with
+ :mod:`Bcfg2.Server.Statistics` for reporting via ``bcfg2-admin
+ perf`` """
+
+ def __init__(self, name=None):
+ """
+ :param name: The name under which statistics for this function
+ will be tracked. By default, the name will be
+ the name of the function concatenated with the
+ name of the class the function is a member of.
+ :type name: string
+ """
+ # if this is None, it will be set later during __call_
+ self.name = name
+
+ def __call__(self, func):
+ if self.name is None:
+ self.name = func.__name__
+
+ @wraps(func)
+ def inner(obj, *args, **kwargs):
+ """ The decorated function """
+ name = "%s:%s" % (obj.__class__.__name__, self.name)
+
+ start = time.time()
+ try:
+ return func(obj, *args, **kwargs)
+ finally:
+ stats.add_value(name, time.time() - start)
+
+ return inner
diff --git a/src/lib/Bcfg2/Server/Test.py b/src/lib/Bcfg2/Server/Test.py
new file mode 100644
index 000000000..9a1d43d12
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Test.py
@@ -0,0 +1,287 @@
+""" bcfg2-test libraries and CLI """
+
+import os
+import sys
+import shlex
+import signal
+import fnmatch
+import logging
+import Bcfg2.Logger
+import Bcfg2.Server.Core
+from math import ceil
+from nose.core import TestProgram
+from nose.suite import LazySuite
+from unittest import TestCase
+
+try:
+ from multiprocessing import Process, Queue, active_children
+ HAS_MULTIPROC = True
+except ImportError:
+ HAS_MULTIPROC = False
+
+ def active_children():
+ """active_children() when multiprocessing lib is missing."""
+ return []
+
+
+def get_sigint_handler(core):
+ """ Get a function that handles SIGINT/Ctrl-C by shutting down the
+ core and exiting properly."""
+
+ def hdlr(sig, frame): # pylint: disable=W0613
+ """ Handle SIGINT/Ctrl-C by shutting down the core and exiting
+ properly. """
+ core.shutdown()
+ os._exit(1) # pylint: disable=W0212
+
+ return hdlr
+
+
+class CapturingLogger(object):
+ """ Fake logger that captures logging output so that errors are
+ only displayed for clients that fail tests """
+ def __init__(self, *args, **kwargs): # pylint: disable=W0613
+ self.output = []
+
+ def error(self, msg):
+ """ discard error messages """
+ self.output.append(msg)
+
+ def warning(self, msg):
+ """ discard error messages """
+ self.output.append(msg)
+
+ def info(self, msg):
+ """ discard error messages """
+ self.output.append(msg)
+
+ def debug(self, msg):
+ """ discard error messages """
+ if Bcfg2.Options.setup.debug:
+ self.output.append(msg)
+
+ def reset_output(self):
+ """ Reset the captured output """
+ self.output = []
+
+
+class ClientTestFromQueue(TestCase):
+ """ A test case that tests a value that has been enqueued by a
+ child test process. ``client`` is the name of the client that has
+ been tested; ``result`` is the result from the :class:`ClientTest`
+ test. ``None`` indicates a successful test; a string value
+ indicates a failed test; and an exception indicates an error while
+ running the test. """
+ __test__ = False # Do not collect
+
+ def __init__(self, client, result):
+ TestCase.__init__(self)
+ self.client = client
+ self.result = result
+
+ def shortDescription(self):
+ return "Building configuration for %s" % self.client
+
+ def runTest(self):
+ """ parse the result from this test """
+ if isinstance(self.result, Exception):
+ raise self.result
+ assert self.result is None, self.result
+
+
+class ClientTest(TestCase):
+ """ A test case representing the build of all of the configuration for
+ a single host. Checks that none of the build config entities has
+ had a failure when it is building. Optionally ignores some config
+ files that we know will cause errors (because they are private
+ files we don't have access to, for instance) """
+ __test__ = False # Do not collect
+ divider = "-" * 70
+
+ def __init__(self, core, client, ignore=None):
+ TestCase.__init__(self)
+ self.core = core
+ self.core.logger = CapturingLogger()
+ self.client = client
+ if ignore is None:
+ self.ignore = dict()
+ else:
+ self.ignore = ignore
+
+ def ignore_entry(self, tag, name):
+ """ return True if an error on a given entry should be ignored
+ """
+ if tag in self.ignore:
+ if name in self.ignore[tag]:
+ return True
+ else:
+ # try wildcard matching
+ for pattern in self.ignore[tag]:
+ if fnmatch.fnmatch(name, pattern):
+ return True
+ return False
+
+ def shortDescription(self):
+ return "Building configuration for %s" % self.client
+
+ def runTest(self):
+ """ run this individual test """
+ config = self.core.BuildConfiguration(self.client)
+ output = self.core.logger.output[:]
+ if output:
+ output.append(self.divider)
+ self.core.logger.reset_output()
+
+ # check for empty client configuration
+ assert len(config.findall("Bundle")) > 0, \
+ "\n".join(output + ["%s has no content" % self.client])
+
+ # check for missing bundles
+ metadata = self.core.build_metadata(self.client)
+ sbundles = [el.get('name') for el in config.findall("Bundle")]
+ missing = [b for b in metadata.bundles if b not in sbundles]
+ assert len(missing) == 0, \
+ "\n".join(output + ["Configuration is missing bundle(s): %s" %
+ ':'.join(missing)])
+
+ # check for unknown packages
+ unknown_pkgs = [el.get("name")
+ for el in config.xpath('//Package[@type="unknown"]')
+ if not self.ignore_entry(el.tag, el.get("name"))]
+ assert len(unknown_pkgs) == 0, \
+ "Configuration contains unknown packages: %s" % \
+ ", ".join(unknown_pkgs)
+
+ failures = []
+ msg = output + ["Failures:"]
+ for failure in config.xpath('//*[@failure]'):
+ if not self.ignore_entry(failure.tag, failure.get('name')):
+ failures.append(failure)
+ msg.append("%s:%s: %s" % (failure.tag, failure.get("name"),
+ failure.get("failure")))
+
+ assert len(failures) == 0, "\n".join(msg)
+
+ def __str__(self):
+ return "ClientTest(%s)" % self.client
+
+ id = __str__
+
+
+class CLI(object):
+ """ The bcfg2-test CLI """
+ options = [
+ Bcfg2.Options.PositionalArgument(
+ "clients", help="Specific clients to build", nargs="*"),
+ Bcfg2.Options.Option(
+ "--nose-options", cf=("bcfg2_test", "nose_options"),
+ type=shlex.split, default=[],
+ help='Options to pass to nosetests. Only honored with '
+ '--children 0'),
+ Bcfg2.Options.Option(
+ "--ignore", cf=('bcfg2_test', 'ignore_entries'), default=[],
+ dest="test_ignore", type=Bcfg2.Options.Types.comma_list,
+ help='Ignore these entries if they fail to build'),
+ Bcfg2.Options.Option(
+ "--children", cf=('bcfg2_test', 'children'), default=0, type=int,
+ help='Spawn this number of children for bcfg2-test (python 2.6+)')]
+
+ def __init__(self):
+ parser = Bcfg2.Options.get_parser(
+ description="Verify that all clients build without failures",
+ components=[Bcfg2.Server.Core.Core, self])
+ parser.parse()
+ self.logger = logging.getLogger(parser.prog)
+
+ if Bcfg2.Options.setup.children and not HAS_MULTIPROC:
+ self.logger.warning("Python multiprocessing library not found, "
+ "running with no children")
+ Bcfg2.Options.setup.children = 0
+
+ def get_core(self):
+ """ Get a server core, with events handled """
+ core = Bcfg2.Server.Core.Core()
+ core.load_plugins()
+ core.block_for_fam_events(handle_events=True)
+ signal.signal(signal.SIGINT, get_sigint_handler(core))
+ return core
+
+ def get_ignore(self):
+ """ Get a dict of entry tags and names to
+ ignore errors from """
+ ignore = dict()
+ for entry in Bcfg2.Options.setup.test_ignore:
+ tag, name = entry.split(":")
+ try:
+ ignore[tag].append(name)
+ except KeyError:
+ ignore[tag] = [name]
+ return ignore
+
+ def run_child(self, clients, queue):
+ """ Run tests for the given clients in a child process, returning
+ results via the given Queue """
+ core = self.get_core()
+ ignore = self.get_ignore()
+ for client in clients:
+ try:
+ ClientTest(core, client, ignore).runTest()
+ queue.put((client, None))
+ except AssertionError:
+ queue.put((client, str(sys.exc_info()[1])))
+ except:
+ queue.put((client, sys.exc_info()[1]))
+
+ core.shutdown()
+
+ def run(self):
+ """ Run bcfg2-test """
+ core = self.get_core()
+ clients = Bcfg2.Options.setup.clients or core.metadata.clients
+ ignore = self.get_ignore()
+
+ if Bcfg2.Options.setup.children:
+ if Bcfg2.Options.setup.children > len(clients):
+ self.logger.info("Refusing to spawn more children than "
+ "clients to test, setting children=%s" %
+ len(clients))
+ Bcfg2.Options.setup.children = len(clients)
+ perchild = int(ceil(len(clients) /
+ float(Bcfg2.Options.setup.children + 1)))
+ queue = Queue()
+ for child in range(Bcfg2.Options.setup.children):
+ start = child * perchild
+ end = (child + 1) * perchild
+ child = Process(target=self.run_child,
+ args=(clients[start:end], queue))
+ child.start()
+
+ def generate_tests():
+ """ Read test results for the clients """
+ start = Bcfg2.Options.setup.children * perchild
+ for client in clients[start:]:
+ yield ClientTest(core, client, ignore)
+
+ for i in range(start): # pylint: disable=W0612
+ yield ClientTestFromQueue(*queue.get())
+ else:
+ def generate_tests():
+ """ Run tests for the clients """
+ for client in clients:
+ yield ClientTest(core, client, ignore)
+
+ result = TestProgram(
+ argv=sys.argv[:1] + Bcfg2.Options.setup.nose_options,
+ suite=LazySuite(generate_tests), exit=False)
+
+ # block until all children have completed -- should be
+ # immediate since we've already gotten all the results we
+ # expect
+ for child in active_children():
+ child.join()
+
+ core.shutdown()
+ if result.success:
+ os._exit(0) # pylint: disable=W0212
+ else:
+ os._exit(1) # pylint: disable=W0212
diff --git a/src/lib/Bcfg2/Server/__init__.py b/src/lib/Bcfg2/Server/__init__.py
index 0678e4579..39ed2ec91 100644
--- a/src/lib/Bcfg2/Server/__init__.py
+++ b/src/lib/Bcfg2/Server/__init__.py
@@ -1,12 +1,11 @@
"""This is the set of modules for Bcfg2.Server."""
import lxml.etree
-from Bcfg2.Compat import walk_packages
-
-__all__ = [m[1] for m in walk_packages(path=__path__)]
XI = 'http://www.w3.org/2001/XInclude'
XI_NAMESPACE = '{%s}' % XI
# pylint: disable=C0103
XMLParser = lxml.etree.XMLParser(remove_blank_text=True)
+
+core = None
diff --git a/src/lib/Bcfg2/Server/models.py b/src/lib/Bcfg2/Server/models.py
index 7e2f5b09d..8d6642a25 100644
--- a/src/lib/Bcfg2/Server/models.py
+++ b/src/lib/Bcfg2/Server/models.py
@@ -1,34 +1,44 @@
""" Django database models for all plugins """
import sys
-import copy
import logging
import Bcfg2.Options
import Bcfg2.Server.Plugins
-from django.db import models
-LOGGER = logging.getLogger('Bcfg2.Server.models')
+LOGGER = logging.getLogger(__name__)
MODELS = []
-def load_models(plugins=None, cfile='/etc/bcfg2.conf', quiet=True):
+class _OptionContainer(object):
+ """Options for Bcfg2 database models."""
+
+ # we want to provide a different default plugin list --
+ # namely, _all_ plugins, so that the database is guaranteed to
+ # work, even if /etc/bcfg2.conf isn't set up properly
+ options = [Bcfg2.Options.Common.plugins]
+
+ @staticmethod
+ def options_parsed_hook():
+ # basic invocation to ensure that a default set of models is
+ # loaded, and thus that this module will always work.
+ load_models()
+
+Bcfg2.Options.get_parser().add_component(_OptionContainer)
+
+
+def load_models(plugins=None):
""" load models from plugins specified in the config """
+ # this has to be imported after options are parsed, because Django
+ # finalizes its settings as soon as it's loaded, which means that
+ # if we import this before Bcfg2.DBSettings has been populated,
+ # Django gets a null configuration, and subsequent updates to
+ # Bcfg2.DBSettings won't help.
+ from django.db import models
global MODELS
- if plugins is None:
- # we want to provide a different default plugin list --
- # namely, _all_ plugins, so that the database is guaranteed to
- # work, even if /etc/bcfg2.conf isn't set up properly
- plugin_opt = copy.deepcopy(Bcfg2.Options.SERVER_PLUGINS)
- plugin_opt.default = Bcfg2.Server.Plugins.__all__
-
- setup = \
- Bcfg2.Options.OptionParser(dict(plugins=plugin_opt,
- configfile=Bcfg2.Options.CFILE),
- quiet=quiet)
- setup.parse([Bcfg2.Options.CFILE.cmd, cfile])
- plugins = setup['plugins']
+ if not plugins:
+ plugins = Bcfg2.Options.setup.plugins
if MODELS:
# load_models() has been called once, so first unload all of
@@ -39,45 +49,22 @@ def load_models(plugins=None, cfile='/etc/bcfg2.conf', quiet=True):
delattr(sys.modules[__name__], model)
MODELS = []
- for plugin in plugins:
- try:
- mod = getattr(__import__("Bcfg2.Server.Plugins.%s" %
- plugin).Server.Plugins, plugin)
- except ImportError:
- try:
- err = sys.exc_info()[1]
- mod = __import__(plugin)
- except: # pylint: disable=W0702
- if plugins != Bcfg2.Server.Plugins.__all__:
- # only produce errors if the default plugin list
- # was not used -- i.e., if the config file was set
- # up. don't produce errors when trying to load
- # all plugins, IOW. the error from the first
- # attempt to import is probably more accurate than
- # the second attempt.
- LOGGER.error("Failed to load plugin %s: %s" % (plugin,
- err))
- continue
+ for mod in plugins:
for sym in dir(mod):
obj = getattr(mod, sym)
- if hasattr(obj, "__bases__") and models.Model in obj.__bases__:
+ if isinstance(obj, type) and issubclass(obj, models.Model):
setattr(sys.modules[__name__], sym, obj)
MODELS.append(sym)
-# basic invocation to ensure that a default set of models is loaded,
-# and thus that this module will always work.
-load_models(quiet=True)
-
-
-class InternalDatabaseVersion(models.Model):
- """ Object that tell us to which version the database is """
- version = models.IntegerField()
- updated = models.DateTimeField(auto_now_add=True)
+ class InternalDatabaseVersion(models.Model):
+ """ Object that tell us to which version the database is """
+ version = models.IntegerField()
+ updated = models.DateTimeField(auto_now_add=True)
- def __str__(self):
- return "version %d updated the %s" % (self.version,
+ def __str__(self):
+ return "version %d updated %s" % (self.version,
self.updated.isoformat())
- class Meta: # pylint: disable=C0111,W0232
- app_label = "reports"
- get_latest_by = "version"
+ class Meta: # pylint: disable=C0111,W0232
+ app_label = "reports"
+ get_latest_by = "version"