summaryrefslogtreecommitdiffstats
path: root/src/sbin
diff options
context:
space:
mode:
authorChris St. Pierre <chris.a.st.pierre@gmail.com>2011-04-20 09:41:07 -0400
committerChris St. Pierre <chris.a.st.pierre@gmail.com>2011-04-20 09:41:07 -0400
commitb5810882e8c6b1e6b76a8239f70a129d415ecee6 (patch)
tree8c2df3610bebd92f52b70b7f37a7197c9ec2a3e9 /src/sbin
parent20974e1311168b75e621cad14894fe7b217b61a2 (diff)
downloadbcfg2-b5810882e8c6b1e6b76a8239f70a129d415ecee6.tar.gz
bcfg2-b5810882e8c6b1e6b76a8239f70a129d415ecee6.tar.bz2
bcfg2-b5810882e8c6b1e6b76a8239f70a129d415ecee6.zip
Rewrote bcfg2-repo-validate as bcfg2-lint, which uses a plugin
interface to be lots more flexible and extensible. Added several more tests. If bcfg2-lint is run as bcfg2-repo-validate, it roughly emulates the functionality of that program. TODO: Need to figure out correct way to symlink bcfg2-repo-validate to bcfg2-lint on install.
Diffstat (limited to 'src/sbin')
-rwxr-xr-xsrc/sbin/bcfg2-lint167
-rwxr-xr-xsrc/sbin/bcfg2-repo-validate328
2 files changed, 167 insertions, 328 deletions
diff --git a/src/sbin/bcfg2-lint b/src/sbin/bcfg2-lint
new file mode 100755
index 000000000..42c077d63
--- /dev/null
+++ b/src/sbin/bcfg2-lint
@@ -0,0 +1,167 @@
+#!/usr/bin/env python
+
+"""This tool examines your Bcfg2 specifications for errors."""
+__revision__ = '$Revision$'
+
+import sys
+import inspect
+import logging
+import ConfigParser
+import Bcfg2.Logger
+import Bcfg2.Options
+import Bcfg2.Server.Core
+import Bcfg2.Server.Lint
+
+logger = logging.getLogger('bcfg2-lint')
+
+class Parser(ConfigParser.ConfigParser):
+ def get(self, section, option, default):
+ """ Override ConfigParser.get: If the request option is not in
+ the config file then return the value of default rather than
+ raise an exception. We still raise exceptions on missing
+ sections.
+ """
+ try:
+ return ConfigParser.ConfigParser.get(self, section, option)
+ except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
+ return default
+
+def run_serverless_plugins(plugins, config=None, setup=None):
+ logger.debug("Running serverless plugins")
+ errors = (0, 0)
+ for plugin_name, plugin in plugins.items():
+ plugin_errors = run_plugin(plugin, plugin_name,
+ setup=setup, config=config, files=files)
+ errors = [errors[n] + plugin_errors[n]
+ for n in range(0, len(errors))]
+ return errors
+
+def run_server_plugins(plugins, config=None, setup=None):
+ core = load_server(setup)
+ logger.debug("Running server plugins")
+ errors = (0, 0)
+ for plugin_name, plugin in plugins.items():
+ plugin_errors = run_plugin(plugin, plugin_name, args=[core],
+ setup=setup, config=config, files=files)
+ errors = [errors[n] + plugin_errors[n]
+ for n in range(0, len(errors))]
+ return errors
+
+def run_plugin(plugin, plugin_name, setup=None, args=None, config=None,
+ files=None):
+ logger.debug(" Running %s" % plugin_name)
+ if args is None:
+ args = []
+
+ if config is not None and config.has_section(plugin_name):
+ args.append(dict(config.items(plugin_name), **setup))
+ else:
+ args.append(setup)
+
+ return plugin(*args, files=files).Run()
+ # try:
+ # return plugin(*args, files=files).Run()
+ # except Exception, err:
+ # logger.error("Failed to run plugin %s: %s" % (plugin, err))
+ # raise SystemExit(1)
+
+def load_server(setup):
+ """ load server """
+ core = Bcfg2.Server.Core.Core(setup['repo'], setup['plugins'],
+ setup['password'], setup['encoding'])
+ if setup['event debug']:
+ core.fam.debug = True
+ core.fam.handle_events_in_interval(4)
+ return core
+
+if __name__ == '__main__':
+ optinfo = {
+ 'configfile': Bcfg2.Options.CFILE,
+ 'help': Bcfg2.Options.HELP,
+ 'verbose': Bcfg2.Options.VERBOSE,
+ }
+ optinfo.update({
+ 'event debug': Bcfg2.Options.DEBUG,
+ 'encoding': Bcfg2.Options.ENCODING,
+ # Server options
+ 'repo': Bcfg2.Options.SERVER_REPOSITORY,
+ 'plugins': Bcfg2.Options.SERVER_PLUGINS,
+ 'mconnect': Bcfg2.Options.SERVER_MCONNECT,
+ 'filemonitor': Bcfg2.Options.SERVER_FILEMONITOR,
+ 'location': Bcfg2.Options.SERVER_LOCATION,
+ 'static': Bcfg2.Options.SERVER_STATIC,
+ 'key': Bcfg2.Options.SERVER_KEY,
+ 'cert': Bcfg2.Options.SERVER_CERT,
+ 'ca': Bcfg2.Options.SERVER_CA,
+ 'password': Bcfg2.Options.SERVER_PASSWORD,
+ 'protocol': Bcfg2.Options.SERVER_PROTOCOL,
+ # More options
+ 'logging': Bcfg2.Options.LOGGING_FILE_PATH,
+ 'stdin': Bcfg2.Options.FILES_ON_STDIN,
+ 'schema': Bcfg2.Options.SCHEMA_PATH,
+ 'config': Bcfg2.Options.Option('Specify bcfg2-lint configuration file',
+ '/etc/bcfg2-lint.conf',
+ cmd='--lint-config',
+ odesc='<conffile>',
+ long_arg = True),
+ })
+ setup = Bcfg2.Options.OptionParser(optinfo)
+ setup.parse(sys.argv[1:])
+
+ log_args = dict(to_syslog=False, to_console=logging.WARNING)
+ if setup['verbose']:
+ log_args['to_console'] = logging.DEBUG
+ Bcfg2.Logger.setup_logging('bcfg2-info', **log_args)
+
+ config = Parser()
+ config.read(setup['config'])
+
+ # get list of plugins to run
+ if setup['args']:
+ allplugins = setup['args']
+ elif "bcfg2-repo-validate" in sys.argv[0]:
+ allplugins = 'Duplicates,RequiredAttrs,Validate'.split(',')
+ else:
+ allplugins = config.get('main', 'plugins',
+ ",".join(Bcfg2.Server.Lint.__all__)).split(',')
+
+ if setup['stdin']:
+ files = [s.strip() for s in sys.stdin.readlines()]
+ else:
+ files = None
+
+ # load plugins
+ serverplugins = {}
+ serverlessplugins = {}
+ for plugin_name in allplugins:
+ try:
+ mod = getattr(__import__("Bcfg2.Server.Lint.%s" %
+ (plugin_name)).Server.Lint, plugin_name)
+ except ImportError:
+ try:
+ mod = __import__(plugin_name)
+ except Exception, err:
+ logger.error("Failed to load plugin %s: %s" % (plugin_name,
+ err))
+ raise SystemExit(1)
+ plugin = getattr(mod, plugin_name)
+ if [c for c in inspect.getmro(plugin)
+ if c == Bcfg2.Server.Lint.ServerPlugin]:
+ serverplugins[plugin_name] = plugin
+ else:
+ serverlessplugins[plugin_name] = plugin
+
+ # errors is a tuple of (errors, warnings)
+ errors = run_serverless_plugins(serverlessplugins,
+ config=config, setup=setup)
+
+ if serverplugins:
+ perrors = run_server_plugins(serverplugins, config=config, setup=setup)
+ errors = [errors[n] + perrors[n] for n in range(0, len(errors))]
+
+ print "%d errors" % errors[0]
+ print "%d warnings" % errors[1]
+ if errors[0]:
+ raise SystemExit(2)
+ elif errors[1]:
+ raise SystemExit(3)
diff --git a/src/sbin/bcfg2-repo-validate b/src/sbin/bcfg2-repo-validate
deleted file mode 100755
index e82b57659..000000000
--- a/src/sbin/bcfg2-repo-validate
+++ /dev/null
@@ -1,328 +0,0 @@
-#!/usr/bin/env python
-
-"""
-bcfg2-repo-validate checks all xml files in Bcfg2
-repos against their respective XML schemas.
-"""
-__revision__ = '$Revision$'
-
-import fnmatch
-import glob
-import lxml.etree
-import os
-import sys
-import fnmatch
-import logging
-import Bcfg2.Options
-from subprocess import Popen, PIPE, STDOUT
-
-def follow_xinclude(xfile, file_list=None):
- """ follow xincludes in the given file """
- xdata = lxml.etree.parse(xfile)
- included = set([ent.get('href') for ent in
- xdata.findall('./{http://www.w3.org/2001/XInclude}include')])
- rv = []
-
- while included:
- try:
- filename = included.pop()
- except KeyError:
- continue
-
- path = os.path.join(os.path.dirname(xfile), filename)
- if file_list is not None and path in file_list:
- rv.append(path)
- groupdata = lxml.etree.parse(path)
- [included.add(el.get('href'))
- for el in
- groupdata.findall('./{http://www.w3.org/2001/XInclude}include')]
- included.discard(filename)
-
- return rv
-
-def validate(filename, schemafile, schema=None, xinclude=True):
- """validate a fail against the given lxml.etree.Schema. return
- True on success, False on failure"""
- if schema is None:
- # if no schema object was provided, instantiate one
- try:
- schema = lxml.etree.XMLSchema(lxml.etree.parse(schemafile))
- except:
- logging.warn("Failed to process schema %s", schemafile)
- return False
-
- try:
- datafile = lxml.etree.parse(filename)
- except SyntaxError:
- logging.warn("%s ***FAILS*** to parse \t\t<----", filename)
- lint = Popen(["xmllint", filename], stdout=PIPE, stderr=STDOUT)
- logging.warn(lint.communicate()[0])
- lint.wait()
- return False
- except IOError:
- logging.warn("Failed to open file %s \t\t<---", filename)
- return False
-
- if schema.validate(datafile):
- logging.info("%s checks out", filename)
- else:
- cmd = ["xmllint"]
- if xinclude:
- cmd.append("--xinclude")
- cmd.extend(["--noout", "--schema", schemafile, filename])
- lint = Popen(cmd, stdout=PIPE, stderr=STDOUT)
- output = lint.communicate()[0]
- if lint.wait():
- logging.warn("%s ***FAILS*** to verify \t\t<----", filename)
- logging.warn(output)
- return False
- else:
- logging.info("%s checks out", filename)
- return True
-
-if __name__ == '__main__':
- opts = {'repo': Bcfg2.Options.SERVER_REPOSITORY,
- 'verbose': Bcfg2.Options.VERBOSE,
- 'configfile': Bcfg2.Options.CFILE,
- 'require-schema': Bcfg2.Options.REQUIRE_SCHEMA,
- 'schema': Bcfg2.Options.SCHEMA_PATH,
- 'stdin': Bcfg2.Options.FILES_ON_STDIN}
- setup = Bcfg2.Options.OptionParser(opts)
- setup.parse(sys.argv[1:])
- verbose = setup['verbose']
- cpath = setup['configfile']
- schemadir = setup['schema']
- os.chdir(schemadir)
- repo = setup['repo']
-
- # set up logging
- level = logging.WARNING
- if verbose:
- level = logging.INFO
- logging.basicConfig(level=level, format="%(message)s")
-
- if setup['stdin']:
- file_list = [s.strip() for s in sys.stdin.readlines()]
- info_list = [f for f in file_list if os.path.basename(f) == 'info.xml']
- metadata_list = fnmatch.filter(file_list, "*/Metadata/groups.xml")
- clients_list = fnmatch.filter(file_list, "*/Metadata/clients.xml")
- bundle_list = fnmatch.filter(file_list, "*/Bundler/*.xml")
- genshibundle_list = fnmatch.filter(file_list, "*/Bundler/*.genshi")
- pkg_list = fnmatch.filter(file_list, "*/Pkgmgr/*.xml")
- base_list = fnmatch.filter(file_list, "*/Base/*.xml")
- rules_list = fnmatch.filter(file_list, "*/Rules/*.xml")
- imageinfo_list = fnmatch.filter(file_list,
- "*/etc/report-configuration.xml")
- services_list = fnmatch.filter(file_list, "*/Svcmgr/*.xml")
- deps_list = fnmatch.filter(file_list, "*/Deps/*.xml")
- dec_list = fnmatch.filter(file_list, "*/Decisions/*")
- pkgcfg_list = fnmatch.filter(file_list, "*/Packages/config.xml")
- gp_list = fnmatch.filter(file_list, "*/GroupPatterns/config.xml")
- props_list = [f
- for f in fnmatch.filter(file_list, "*/Properties/*.xml")
- if "%s.xsd" % os.path.splitext(f)[0] in file_list]
-
- # attempt to follow XIncludes in groups.xml and clients.xml.
- # if those top-level files aren't listed in file_list, though,
- # there's really nothing we can do to guess what a file in
- # Metadata is
- if metadata_list:
- metadata_list.extend(follow_xinclude(metadata_list[0],
- file_list=file_list))
- if clients_list:
- clients_list.extend(follow_xinclude(clients_list[0],
- file_list=file_list))
-
- # if there are other files in Metadata in file_list that
- # aren't listed in metadata_list or clients_list, we can't
- # verify them. warn about those.
- for fname in fnmatch.filter(file_list, "*/Metadata/*.xml"):
- if fname not in metadata_list and fname not in clients_list:
- logging.warn("Broken XInclude chain: Could not determine file type of %s", fname)
- else:
- # not reading files from stdin
-
- # Get a list of all info.xml files in the bcfg2 repository
- info_list = []
- for infodir in ['Cfg', 'TGenshi', 'TCheetah']:
- for root, dirs, files in os.walk('%s/%s' % (repo, infodir)):
- info_list.extend([os.path.join(root, f) for f in files
- if f == 'info.xml'])
-
- # get metadata list
- metadata_list = glob.glob("%s/Metadata/groups.xml" % repo)
-
- # get other file lists
- clients_list = glob.glob("%s/Metadata/clients.xml" % repo)
- bundle_list = glob.glob("%s/Bundler/*.xml" % repo)
- genshibundle_list = glob.glob("%s/Bundler/*.genshi" % repo)
- pkg_list = glob.glob("%s/Pkgmgr/*.xml" % repo)
- base_list = glob.glob("%s/Base/*.xml" % repo)
- rules_list = glob.glob("%s/Rules/*.xml" % repo)
- imageinfo_list = glob.glob("%s/etc/report-configuration.xml" % repo)
- services_list = glob.glob("%s/Svcmgr/*.xml" % repo)
- deps_list = glob.glob("%s/Deps/*.xml" % repo)
- dec_list = glob.glob("%s/Decisions/*" % repo)
- pkgcfg_list = glob.glob("%s/Packages/config.xml" % repo)
- gp_list = glob.glob('%s/GroupPatterns/config.xml' % repo)
- props_list = glob.glob("%s/Properties/*.xml" % repo)
-
- metadata_list.extend(follow_xinclude("%s/Metadata/groups.xml" % repo))
- clients_list.extend(follow_xinclude("%s/Metadata/clients.xml" % repo))
-
- # get all bundles
- ref_bundles = set()
- xdata = lxml.etree.parse("%s/Metadata/groups.xml" % repo)
- xdata.xinclude()
- for bundle in xdata.findall("//Bundle"):
- ref_bundles.add("%s/Bundler/%s" % (repo, bundle.get('name')))
- included = set([ent.get('href') for ent in
- xdata.findall('./{http://www.w3.org/2001/XInclude}include')])
-
- # check for multiple default group definitions
- if "%s/Metadata/groups.xml" % repo in metadata_list:
- default_groups = [g for g in lxml.etree.parse("%s/Metadata/groups.xml" %
- repo).findall('.//Group')
- if g.get('default') == 'true']
- if len(default_groups) > 1:
- logging.warn("*** Warning: Multiple default groups defined")
- for grp in default_groups:
- logging.warn(" %s", grp.get('name'))
-
- # verify attributes for configuration entries
- # (as defined in doc/server/configurationentries)
- # TODO: See if it is possible to do this in the schema instead
- required_configuration_attrs = {
- 'device': ['name', 'owner', 'group', 'dev_type'],
- 'directory': ['name', 'owner', 'group', 'perms'],
- 'file': ['name', 'owner', 'group', 'perms'],
- 'hardlink': ['name', 'to'],
- 'symlink': ['name', 'to'],
- 'ignore': ['name'],
- 'nonexistent': ['name'],
- 'permissions': ['name', 'owner', 'group', 'perms']}
- for rfile in rules_list:
- try:
- xdata = lxml.etree.parse(rfile)
- except lxml.etree.XMLSyntaxError, e:
- logging.warn("Failed to parse %s: %s", rfile, e)
- for posixpath in xdata.findall("//Path"):
- pathname = posixpath.get('name')
- pathtype = posixpath.get('type')
- pathset = set(posixpath.attrib.keys())
- try:
- required_attrs = set(required_configuration_attrs[pathtype] \
- + ['type'])
- except KeyError:
- continue
- if 'dev_type' in required_attrs:
- dev_type = posixpath.get('dev_type')
- if dev_type in ['block', 'char']:
- # check if major/minor are specified
- required_attrs |= set(['major', 'minor'])
- if pathset.issuperset(required_attrs):
- continue
- else:
- logging.warn("The following required attributes are missing for"
- " Path %s in %s: %s",
- pathname, rfile,
- [attr
- for attr in required_attrs.difference(pathset)])
-
- # warn on duplicate Pkgmgr entries with the same priority
- pset = set()
- for plist in pkg_list:
- try:
- xdata = lxml.etree.parse(plist)
- except lxml.etree.XMLSyntaxError, e:
- logging.warn("Failed to parse %s: %s", plist, e)
- # get priority, type, group
- priority = xdata.getroot().get('priority')
- ptype = xdata.getroot().get('type')
- for pkg in xdata.findall("//Package"):
- if pkg.getparent().tag == 'Group':
- grp = pkg.getparent().get('name')
- if type(grp) is not str and grp.getparent().tag == 'Group':
- pgrp = grp.getparent().get('name')
- else:
- pgrp = 'none'
- else:
- grp = 'none'
- pgrp = 'none'
- ptuple = (pkg.get('name'), priority, ptype, grp, pgrp)
- # check if package is already listed with same priority,
- # type, grp
- if ptuple in pset:
- logging.warn("Duplicate Package %s, priority:%s, type:%s",
- pkg.get('name'), priority, ptype)
- else:
- pset.add(ptuple)
-
- filesets = {"%s/metadata.xsd": metadata_list,
- "%s/clients.xsd": clients_list,
- "%s/info.xsd": info_list,
- "%s/bundle.xsd": bundle_list + genshibundle_list,
- "%s/pkglist.xsd": pkg_list,
- "%s/base.xsd": base_list,
- "%s/rules.xsd": rules_list,
- "%s/report-configuration.xsd": imageinfo_list,
- "%s/services.xsd": services_list,
- "%s/deps.xsd": deps_list,
- "%s/decisions.xsd": dec_list,
- "%s/packages.xsd": pkgcfg_list,
- "%s/grouppatterns.xsd": gp_list}
-
- failures = 0
- for schemaname, filelist in list(filesets.items()):
- if filelist:
- # avoid loading schemas for empty file lists
- try:
- schema = lxml.etree.XMLSchema(lxml.etree.parse(schemaname %
- schemadir))
- except:
- logging.warn("Failed to process schema %s",
- schemaname % schemadir)
- failures = 1
- continue
- for filename in filelist:
- if not validate(filename, schemaname % schemadir,
- schema=schema, xinclude=not setup['stdin']):
- failures = 1
-
- # check Properties files against their schemas
- for filename in props_list:
- logging.info("checking %s" % filename)
- schemafile = "%s.xsd" % os.path.splitext(filename)[0]
- if os.path.exists(schemafile):
- if not validate(filename, schemafile, xinclude=not setup['stdin']):
- failures = 1
- elif setup['require-schema']:
- logging.warn("No schema found for %s", filename)
- failures = 1
-
- # print out missing bundle information
- logging.info("")
- if not setup['stdin']:
- # if we've taken a list of files on stdin, there's an
- # excellent chance that referenced bundles do not exist, so
- # skip this check
- for bundle in ref_bundles:
- # check for both regular and genshi bundles
- xmlbundle = "%s.xml" % bundle
- genshibundle = "%s.genshi" % bundle
- allbundles = bundle_list + genshibundle_list
- if xmlbundle not in allbundles and genshibundle not in allbundles:
- logging.info("*** Warning: Bundle %s referenced, but does not "
- "exist.", bundle)
-
- # verify bundle name attribute matches filename
- for bundle in (bundle_list + genshibundle_list):
- fname = bundle.split('Bundler/')[1].split('.')[0]
- xdata = lxml.etree.parse(bundle)
- bname = xdata.getroot().get('name')
- if fname != bname:
- logging.warn("The following names are inconsistent:")
- logging.warn(" Filename is %s", fname)
- logging.warn(" Bundle name found in %s is %s", fname, bname)
-
- raise SystemExit(failures)