summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--doc/server/plugins/generators/packages.txt13
-rw-r--r--src/lib/Bcfg2/Client/Tools/POSIX/File.py17
-rw-r--r--src/lib/Bcfg2/Client/Tools/POSIX/base.py3
-rw-r--r--src/lib/Bcfg2/Client/Tools/__init__.py5
-rw-r--r--src/lib/Bcfg2/Reporting/Collector.py2
-rw-r--r--src/lib/Bcfg2/Server/BuiltinCore.py1
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Metadata.py248
-rw-r--r--src/lib/Bcfg2/Server/Plugins/NagiosGen.py6
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Probes.py6
-rw-r--r--src/lib/Bcfg2/Server/SSLServer.py32
-rw-r--r--testsuite/Testsrc/Testlib/TestClient/TestTools/TestPOSIX/Testbase.py11
-rw-r--r--testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestMetadata.py17
-rw-r--r--tools/README3
-rwxr-xr-xtools/git_commit.py181
14 files changed, 395 insertions, 150 deletions
diff --git a/doc/server/plugins/generators/packages.txt b/doc/server/plugins/generators/packages.txt
index a7cdfad2d..8b317552f 100644
--- a/doc/server/plugins/generators/packages.txt
+++ b/doc/server/plugins/generators/packages.txt
@@ -428,17 +428,18 @@ Benefits to this include:
* Much lower memory usage by the ``bcfg2-server`` process.
* Much faster ``Packages.Refresh`` behavior.
* More accurate dependency resolution.
+* Better use of multiple processors/cores.
Drawbacks include:
-* More disk I/O. In some cases, you may have to raise the open file
+* Resolution of package dependencies is slower and more
+ resource-intensive. At times it can be much slower, particularly
+ after running ``Packages.Refresh``.
+* More disk I/O. This can be alleviated by putting
+ ``/var/lib/bcfg2/Packages/cache`` on tmpfs, but that offsets the
+ lower memory usage. In some cases, you may have to raise the open file
limit for the user who runs your Bcfg2 server process, particularly
if you have a lot of repositories.
-* Resolution of package dependencies is slower in some cases,
- particularly after running ``Packages.Refresh``.
-* If you have a very large number of clients using a very small number
- of repositories, using native yum libraries may actually increase
- memory usage.
Configuring the Yum Helper
--------------------------
diff --git a/src/lib/Bcfg2/Client/Tools/POSIX/File.py b/src/lib/Bcfg2/Client/Tools/POSIX/File.py
index 482320e0d..c12188e1c 100644
--- a/src/lib/Bcfg2/Client/Tools/POSIX/File.py
+++ b/src/lib/Bcfg2/Client/Tools/POSIX/File.py
@@ -54,6 +54,10 @@ class POSIXFile(POSIXTool):
def verify(self, entry, modlist):
ondisk = self._exists(entry)
tempdata, is_binary = self._get_data(entry)
+ if isinstance(tempdata, str) and str != unicode:
+ tempdatasize = len(tempdata)
+ else:
+ tempdatasize = len(tempdata.encode(self.setup['encoding']))
different = False
content = None
@@ -62,7 +66,7 @@ class POSIXFile(POSIXTool):
# they're clearly different
different = True
content = ""
- elif len(tempdata) != ondisk[stat.ST_SIZE]:
+ elif tempdatasize != ondisk[stat.ST_SIZE]:
# next, see if the size of the target file is different
# from the size of the desired content
different = True
@@ -73,6 +77,9 @@ class POSIXFile(POSIXTool):
# for everything else
try:
content = open(entry.get('name')).read()
+ except UnicodeDecodeError:
+ content = open(entry.get('name'),
+ encoding=self.setup['encoding']).read()
except IOError:
self.logger.error("POSIX: Failed to read %s: %s" %
(entry.get("name"), sys.exc_info()[1]))
@@ -90,7 +97,7 @@ class POSIXFile(POSIXTool):
def _write_tmpfile(self, entry):
""" Write the file data to a temp file """
- filedata, _ = self._get_data(entry)
+ filedata = self._get_data(entry)[0]
# get a temp file to write to that is in the same directory as
# the existing file in order to preserve any permissions
# protections on that directory, and also to avoid issues with
@@ -106,7 +113,11 @@ class POSIXFile(POSIXTool):
(os.path.dirname(entry.get('name')), err))
return False
try:
- os.fdopen(newfd, 'w').write(filedata)
+ if isinstance(filedata, str) and str != unicode:
+ os.fdopen(newfd, 'w').write(filedata)
+ else:
+ os.fdopen(newfd, 'wb').write(
+ filedata.encode(self.setup['encoding']))
except (OSError, IOError):
err = sys.exc_info()[1]
self.logger.error("POSIX: Failed to open temp file %s for writing "
diff --git a/src/lib/Bcfg2/Client/Tools/POSIX/base.py b/src/lib/Bcfg2/Client/Tools/POSIX/base.py
index c9164cb88..bd2f8f87e 100644
--- a/src/lib/Bcfg2/Client/Tools/POSIX/base.py
+++ b/src/lib/Bcfg2/Client/Tools/POSIX/base.py
@@ -525,7 +525,8 @@ class POSIXTool(Bcfg2.Client.Tools.Tool):
if entry.get("secontext") == "__default__":
try:
wanted_secontext = \
- selinux.matchpathcon(path, 0)[1].split(":")[2]
+ selinux.matchpathcon(
+ path, ondisk[stat.ST_MODE])[1].split(":")[2]
except OSError:
errors.append("%s has no default SELinux context" %
entry.get("name"))
diff --git a/src/lib/Bcfg2/Client/Tools/__init__.py b/src/lib/Bcfg2/Client/Tools/__init__.py
index aab2459f2..4a808aa60 100644
--- a/src/lib/Bcfg2/Client/Tools/__init__.py
+++ b/src/lib/Bcfg2/Client/Tools/__init__.py
@@ -590,14 +590,15 @@ class SvcTool(Tool):
if not self.handlesEntry(entry):
continue
+ estatus = entry.get('status')
restart = entry.get("restart", "true").lower()
- if (restart == "false" or
+ if (restart == "false" or estatus == 'ignore' or
(restart == "interactive" and
not Bcfg2.Options.setup.interactive)):
continue
success = False
- if entry.get('status') == 'on':
+ if estatus == 'on':
if Bcfg2.Options.setup.service_mode == 'build':
success = self.stop_service(entry)
elif entry.get('name') not in self.restarted:
diff --git a/src/lib/Bcfg2/Reporting/Collector.py b/src/lib/Bcfg2/Reporting/Collector.py
index ccd175dc3..a93f1b0ae 100644
--- a/src/lib/Bcfg2/Reporting/Collector.py
+++ b/src/lib/Bcfg2/Reporting/Collector.py
@@ -78,7 +78,7 @@ class ReportingCollector(object):
"""Startup the processing and go!"""
self.terminate = threading.Event()
atexit.register(self.shutdown)
- self.context = daemon.DaemonContext()
+ self.context = daemon.DaemonContext(detach_process=True)
if Bcfg2.Options.setup.daemon:
self.logger.debug("Daemonizing")
diff --git a/src/lib/Bcfg2/Server/BuiltinCore.py b/src/lib/Bcfg2/Server/BuiltinCore.py
index 179a6aa9f..0023e9313 100644
--- a/src/lib/Bcfg2/Server/BuiltinCore.py
+++ b/src/lib/Bcfg2/Server/BuiltinCore.py
@@ -113,7 +113,6 @@ class BuiltinCore(NetworkCore):
keyfile=Bcfg2.Options.setup.key,
certfile=Bcfg2.Options.setup.cert,
register=False,
- timeout=1,
ca=Bcfg2.Options.setup.ca)
except: # pylint: disable=W0702
err = sys.exc_info()[1]
diff --git a/src/lib/Bcfg2/Server/Plugins/Metadata.py b/src/lib/Bcfg2/Server/Plugins/Metadata.py
index 17352dedc..db104b27e 100644
--- a/src/lib/Bcfg2/Server/Plugins/Metadata.py
+++ b/src/lib/Bcfg2/Server/Plugins/Metadata.py
@@ -545,15 +545,18 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
self.raliases = {}
# mapping of groupname -> MetadataGroup object
self.groups = {}
- # mappings of predicate -> MetadataGroup object
+ # mappings of groupname -> [predicates]
self.group_membership = dict()
self.negated_groups = dict()
+ # list of group names in document order
+ self.ordered_groups = []
# mapping of hostname -> version string
if self._use_db:
self.versions = ClientVersions(core, # pylint: disable=E1102
datastore)
else:
self.versions = dict()
+
self.uuid = {}
self.session_cache = {}
self.cache = Cache("Metadata")
@@ -561,7 +564,7 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
self.pdirty = False
self.password = Bcfg2.Options.setup.password
self.query = MetadataQuery(core.build_metadata,
- lambda: list(self.clients),
+ self.list_clients,
self.get_client_names_by_groups,
self.get_client_names_by_profiles,
self.get_all_group_names,
@@ -851,51 +854,34 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
if self._use_db:
self.clients = self.list_clients()
+ def _get_condition(self, element):
+ """ Return a predicate that returns True if a client meets
+ the condition specified in the given Group or Client
+ element """
+ negate = element.get('negate', 'false').lower() == 'true'
+ pname = element.get("name")
+ if element.tag == 'Group':
+ return lambda c, g, _: negate != (pname in g)
+ elif element.tag == 'Client':
+ return lambda c, g, _: negate != (pname == c)
+
+ def _get_category_condition(self, grpname):
+ """ get a predicate that returns False if a client is already
+ a member of a group in the given group's category, True
+ otherwise"""
+ return lambda client, _, categories: \
+ bool(self._check_category(client, grpname, categories))
+
+ def _aggregate_conditions(self, conditions):
+ """ aggregate all conditions on a given group declaration
+ into a single predicate """
+ return lambda client, groups, cats: \
+ all(cond(client, groups, cats) for cond in conditions)
+
def _handle_groups_xml_event(self, _): # pylint: disable=R0912
""" re-read groups.xml on any event on it """
self.groups = {}
- # these three functions must be separate functions in order to
- # ensure that the scope is right for the closures they return
- def get_condition(element):
- """ Return a predicate that returns True if a client meets
- the condition specified in the given Group or Client
- element """
- negate = element.get('negate', 'false').lower() == 'true'
- pname = element.get("name")
- if element.tag == 'Group':
- return lambda c, g, _: negate != (pname in g)
- elif element.tag == 'Client':
- return lambda c, g, _: negate != (pname == c)
-
- def get_category_condition(category, gname):
- """ get a predicate that returns False if a client is
- already a member of a group in the given category, True
- otherwise """
- def in_cat(client, groups, categories): # pylint: disable=W0613
- """ return True if the client is already a member of a
- group in the category given in the enclosing function,
- False otherwise """
- if category in categories:
- if (gname not in self.groups or
- client not in self.groups[gname].warned):
- self.logger.warning("%s: Group %s suppressed by "
- "category %s; %s already a member "
- "of %s" %
- (self.name, gname, category,
- client, categories[category]))
- if gname in self.groups:
- self.groups[gname].warned.append(client)
- return False
- return True
- return in_cat
-
- def aggregate_conditions(conditions):
- """ aggregate all conditions on a given group declaration
- into a single predicate """
- return lambda client, groups, cats: \
- all(cond(client, groups, cats) for cond in conditions)
-
# first, we get a list of all of the groups declared in the
# file. we do this in two stages because the old way of
# parsing groups.xml didn't support nested groups; in the old
@@ -921,6 +907,7 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
self.group_membership = dict()
self.negated_groups = dict()
+ self.ordered_groups = []
# confusing loop condition; the XPath query asks for all
# elements under a Group tag under a Groups tag; that is
@@ -931,28 +918,29 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
# XPath. We do the same thing for Client tags.
for el in self.groups_xml.xdata.xpath("//Groups/Group//*") + \
self.groups_xml.xdata.xpath("//Groups/Client//*"):
- if ((el.tag != 'Group' and el.tag != 'Client') or
- el.getchildren()):
+ if (el.tag != 'Group' and el.tag != 'Client') or el.getchildren():
continue
conditions = []
for parent in el.iterancestors():
- cond = get_condition(parent)
+ cond = self._get_condition(parent)
if cond:
conditions.append(cond)
gname = el.get("name")
if el.get("negate", "false").lower() == "true":
- self.negated_groups[aggregate_conditions(conditions)] = \
- self.groups[gname]
+ self.negated_groups.setdefault(gname, [])
+ self.negated_groups[gname].append(
+ self._aggregate_conditions(conditions))
else:
if self.groups[gname].category:
- conditions.append(
- get_category_condition(self.groups[gname].category,
- gname))
+ conditions.append(self._get_category_condition(gname))
- self.group_membership[aggregate_conditions(conditions)] = \
- self.groups[gname]
+ if gname not in self.ordered_groups:
+ self.ordered_groups.append(gname)
+ self.group_membership.setdefault(gname, [])
+ self.group_membership[gname].append(
+ self._aggregate_conditions(conditions))
self.states['groups.xml'] = True
def HandleEvent(self, event):
@@ -962,6 +950,12 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
# clear the entire cache when we get an event for any
# metadata file
self.cache.expire()
+
+ # clear out the list of category suppressions that
+ # have been warned about, since this may change when
+ # clients.xml or groups.xml changes.
+ for group in self.groups.values():
+ group.warned = []
event_handler(event)
if False not in list(self.states.values()) and self.debug_flag:
@@ -1100,22 +1094,77 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
categories = dict()
while numgroups != len(groups):
numgroups = len(groups)
- for predicate, group in self.group_membership.items():
- if group.name in groups:
+ newgroups = set()
+ removegroups = set()
+ for grpname in self.ordered_groups:
+ if grpname in groups:
continue
- if predicate(client, groups, categories):
- groups.add(group.name)
- if group.category:
- categories[group.category] = group.name
- for predicate, group in self.negated_groups.items():
- if group.name not in groups:
+ if any(p(client, groups, categories)
+ for p in self.group_membership[grpname]):
+ newgroups.add(grpname)
+ if (grpname in self.groups and
+ self.groups[grpname].category):
+ categories[self.groups[grpname].category] = grpname
+ groups.update(newgroups)
+ for grpname, predicates in self.negated_groups.items():
+ if grpname not in groups:
continue
- if predicate(client, groups, categories):
- groups.remove(group.name)
- if group.category:
- del categories[group.category]
+ if any(p(client, groups, categories) for p in predicates):
+ removegroups.add(grpname)
+ if (grpname in self.groups and
+ self.groups[grpname].category):
+ del categories[self.groups[grpname].category]
+ groups.difference_update(removegroups)
return (groups, categories)
+ def _check_category(self, client, grpname, categories):
+ """ Determine if the given client is already a member of a
+ group in the same category as the named group.
+
+ The return value is one of three possibilities:
+
+ * If the client is already a member of a group in the same
+ category, then False is returned (i.e., the category check
+ failed);
+ * If the group is not in any categories, then True is returned;
+ * If the group is not a member of a group in the category,
+ then the name of the category is returned. This makes it
+ easy to add the category to the ClientMetadata object (or
+ other category list).
+
+ If a pure boolean value is required, you can do
+ ``bool(self._check_category(...))``.
+ """
+ if grpname not in self.groups:
+ return True
+ category = self.groups[grpname].category
+ if not category:
+ return True
+ if category in categories:
+ if client not in self.groups[grpname].warned:
+ self.logger.warning("%s: Group %s suppressed by category %s; "
+ "%s already a member of %s" %
+ (self.name, grpname, category,
+ client, categories[category]))
+ self.groups[grpname].warned.append(client)
+ return False
+ return category
+
+ def _check_and_add_category(self, client, grpname, categories):
+ """ If the client is not a member of a group in the same
+ category as the named group, then the category is added to
+ ``categories``.
+ :func:`Bcfg2.Server.Plugins.Metadata._check_category` is used
+ to determine if the category can be added.
+
+ If the category check failed, returns False; otherwise,
+ returns True. """
+ rv = self._check_category(client, grpname, categories)
+ if rv and rv is not True:
+ categories[rv] = grpname
+ return True
+ return rv
+
def get_initial_metadata(self, client): # pylint: disable=R0914,R0912
"""Return the metadata for a given client."""
if False in list(self.states.values()):
@@ -1137,30 +1186,29 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
Handles setting categories and category suppression.
Returns the new profile for the client (which might be
unchanged). """
- groups.add(grpname)
if grpname in self.groups:
- group = self.groups[grpname]
- category = group.category
- if category:
- if category in categories:
- self.logger.warning("%s: Group %s suppressed by "
- "category %s; %s already a member "
- "of %s" %
- (self.name, grpname, category,
- client, categories[category]))
- return
- categories[category] = grpname
- if not profile and group.is_profile:
+ if not self._check_and_add_category(client, grpname,
+ categories):
+ return profile
+ groups.add(grpname)
+ if not profile and self.groups[grpname].is_profile:
return grpname
else:
return profile
+ else:
+ groups.add(grpname)
+ return profile
if client not in self.clients:
pgroup = None
if client in self.clientgroups:
pgroup = self.clientgroups[client][0]
+ self.debug_log("%s: Adding new client with profile %s" %
+ (self.name, pgroup))
elif self.default:
pgroup = self.default
+ self.debug_log("%s: Adding new client with default profile %s"
+ % (self.name, pgroup))
if pgroup:
self.set_profile(client, pgroup, (None, None),
@@ -1177,6 +1225,9 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
self.groups[cgroup] = MetadataGroup(cgroup)
profile = _add_group(cgroup)
+ # we do this before setting the default because there may be
+ # groups set in <Client> tags in groups.xml that we want to
+ # set
groups, categories = self._merge_groups(client, groups,
categories=categories)
@@ -1225,8 +1276,8 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
""" return a list of all group names """
all_groups = set()
all_groups.update(self.groups.keys())
- all_groups.update([g.name for g in self.group_membership.values()])
- all_groups.update([g.name for g in self.negated_groups.values()])
+ all_groups.update(self.group_membership.keys())
+ all_groups.update(self.negated_groups.keys())
for grp in self.clientgroups.values():
all_groups.update(grp)
return all_groups
@@ -1239,7 +1290,7 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
def get_client_names_by_profiles(self, profiles):
""" return a list of names of clients in the given profile groups """
rv = []
- for client in list(self.clients):
+ for client in self.list_clients():
mdata = self.core.build_metadata(client)
if mdata.profile in profiles:
rv.append(client)
@@ -1247,34 +1298,33 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
def get_client_names_by_groups(self, groups):
""" return a list of names of clients in the given groups """
- mdata = [self.core.build_metadata(client) for client in self.clients]
- return [md.hostname for md in mdata if md.groups.issuperset(groups)]
+ rv = []
+ for client in self.list_clients():
+ mdata = self.core.build_metadata(client)
+ if mdata.groups.issuperset(groups):
+ rv.append(client)
+ return rv
def get_client_names_by_bundles(self, bundles):
""" given a list of bundles, return a list of names of clients
that use those bundles """
- mdata = [self.core.build_metadata(client) for client in self.clients]
- return [md.hostname for md in mdata if md.bundles.issuperset(bundles)]
+ rv = []
+ for client in self.list_clients():
+ mdata = self.core.build_metadata(client)
+ if mdata.bundles.issuperset(bundles):
+ rv.append(client)
+ return rv
def merge_additional_groups(self, imd, groups):
for group in groups:
if group in imd.groups:
continue
- if group in self.groups and self.groups[group].category:
- category = self.groups[group].category
- if self.groups[group].category in imd.categories:
- self.logger.warning("%s: Group %s suppressed by category "
- "%s; %s already a member of %s" %
- (self.name, group, category,
- imd.hostname,
- imd.categories[category]))
- continue
- imd.categories[category] = group
+ if not self._check_and_add_category(imd.hostname, group,
+ imd.categories):
+ continue
imd.groups.add(group)
- self._merge_groups(imd.hostname, imd.groups,
- categories=imd.categories)
-
+ self._merge_groups(imd.hostname, imd.groups, categories=imd.categories)
for group in imd.groups:
if group in self.groups:
imd.bundles.update(self.groups[group].bundles)
@@ -1439,7 +1489,7 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
instances = {}
rv = []
- for client in list(self.clients):
+ for client in list(self.list_clients()):
if not include_client(client):
continue
if client in self.clientgroups:
diff --git a/src/lib/Bcfg2/Server/Plugins/NagiosGen.py b/src/lib/Bcfg2/Server/Plugins/NagiosGen.py
index c807d51f4..a27664215 100644
--- a/src/lib/Bcfg2/Server/Plugins/NagiosGen.py
+++ b/src/lib/Bcfg2/Server/Plugins/NagiosGen.py
@@ -21,9 +21,9 @@ class NagiosGen(Plugin, Generator):
self.config = \
StructFile(os.path.join(self.data, 'config.xml'),
should_monitor=True, create=self.name)
- self.Entries = {'Path':
- {'/etc/nagiosgen.status': self.createhostconfig,
- '/etc/nagios/conf.d/bcfg2.cfg': self.createserverconfig}}
+ self.Entries = {
+ 'Path': {'/etc/nagiosgen.status': self.createhostconfig,
+ '/etc/nagios/conf.d/bcfg2.cfg': self.createserverconfig}}
self.client_attrib = {'encoding': 'ascii',
'owner': 'root',
diff --git a/src/lib/Bcfg2/Server/Plugins/Probes.py b/src/lib/Bcfg2/Server/Plugins/Probes.py
index ef339a34b..010a69dd1 100644
--- a/src/lib/Bcfg2/Server/Plugins/Probes.py
+++ b/src/lib/Bcfg2/Server/Plugins/Probes.py
@@ -10,6 +10,7 @@ import lxml.etree
import Bcfg2.Server
import Bcfg2.Server.Cache
import Bcfg2.Server.Plugin
+from Bcfg2.Compat import unicode # pylint: disable=W0622
import Bcfg2.Server.FileMonitor
from Bcfg2.Logger import Debuggable
from Bcfg2.Server.Statistics import track_statistics
@@ -285,7 +286,10 @@ class ProbeData(str): # pylint: disable=E0012,R0924
.json, and .yaml properties to provide convenient ways to use
ProbeData objects as XML, JSON, or YAML data """
def __new__(cls, data):
- return str.__new__(cls, data)
+ if isinstance(data, unicode):
+ return str.__new__(cls, data.encode('utf-8'))
+ else:
+ return str.__new__(cls, data)
def __init__(self, data): # pylint: disable=W0613
str.__init__(self)
diff --git a/src/lib/Bcfg2/Server/SSLServer.py b/src/lib/Bcfg2/Server/SSLServer.py
index 646124fcc..215fd0e2b 100644
--- a/src/lib/Bcfg2/Server/SSLServer.py
+++ b/src/lib/Bcfg2/Server/SSLServer.py
@@ -5,7 +5,6 @@ better. """
import os
import sys
import socket
-import select
import signal
import logging
import ssl
@@ -237,22 +236,23 @@ class XMLRPCRequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
return False
return True
- ### need to override do_POST here
def do_POST(self):
try:
max_chunk_size = 10 * 1024 * 1024
size_remaining = int(self.headers["content-length"])
L = []
while size_remaining:
- try:
- select.select([self.rfile.fileno()], [], [], 3)
- except select.error:
- self.logger.error("Got select timeout")
- raise
chunk_size = min(size_remaining, max_chunk_size)
- L.append(self.rfile.read(chunk_size).decode('utf-8'))
+ chunk = self.rfile.read(chunk_size)
+ if not chunk:
+ break
+ L.append(chunk)
size_remaining -= len(L[-1])
data = ''.join(L)
+ data = self.decode_request_content(data)
+ if data is None:
+ return # response has been sent
+
response = self.server._marshaled_dispatch(self.client_address,
data)
if sys.hexversion >= 0x03000000:
@@ -265,6 +265,7 @@ class XMLRPCRequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
(self.client_address, sys.exc_info()[1]))
try:
self.send_response(500)
+ self.send_header("Content-length", "0")
self.end_headers()
except:
(etype, msg) = sys.exc_info()[:2]
@@ -321,14 +322,11 @@ class XMLRPCRequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
def finish(self):
# shut down the connection
- if not self.wfile.closed:
- try:
- self.wfile.flush()
- self.wfile.close()
- except socket.error:
- err = sys.exc_info()[1]
- self.logger.warning("Error closing connection: %s" % err)
- self.rfile.close()
+ try:
+ SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.finish(self)
+ except socket.error:
+ err = sys.exc_info()[1]
+ self.logger.warning("Error closing connection: %s" % err)
class XMLRPCServer(SocketServer.ThreadingMixIn, SSLServer,
@@ -446,8 +444,6 @@ class XMLRPCServer(SocketServer.ThreadingMixIn, SSLServer,
self.handle_request()
except socket.timeout:
pass
- except select.error:
- pass
except:
self.logger.error("Got unexpected error in handle_request",
exc_info=1)
diff --git a/testsuite/Testsrc/Testlib/TestClient/TestTools/TestPOSIX/Testbase.py b/testsuite/Testsrc/Testlib/TestClient/TestTools/TestPOSIX/Testbase.py
index bb7db5e14..5a752b2ac 100644
--- a/testsuite/Testsrc/Testlib/TestClient/TestTools/TestPOSIX/Testbase.py
+++ b/testsuite/Testsrc/Testlib/TestClient/TestTools/TestPOSIX/Testbase.py
@@ -734,12 +734,13 @@ class TestPOSIXTool(TestTool):
gather_data_rv[idx] = val
ptool._gather_data.return_value = tuple(gather_data_rv)
+ stat_mode = 17407
mtime = 1344430414
+ stat_rv = (stat_mode, Mock(), Mock(), Mock(), Mock(), Mock(), Mock(),
+ Mock(), mtime, Mock())
+ gather_data_rv[0] = stat_rv
entry = reset()
entry.set("mtime", str(mtime))
- stat_rv = MagicMock()
- stat_rv.__getitem__.return_value = mtime
- gather_data_rv[0] = stat_rv
ptool._gather_data.return_value = tuple(gather_data_rv)
self.assertTrue(ptool._verify_metadata(entry))
ptool._gather_data.assert_called_with(entry.get("name"))
@@ -811,7 +812,7 @@ class TestPOSIXTool(TestTool):
ptool._gather_data.assert_called_with(entry.get("name"))
ptool._verify_acls.assert_called_with(entry,
path=entry.get("name"))
- mock_matchpathcon.assert_called_with(entry.get("name"), 0)
+ mock_matchpathcon.assert_called_with(entry.get("name"), stat_mode)
self.assertEqual(entry.get("current_exists", 'true'), 'true')
for attr, idx, val in expected:
self.assertEqual(entry.get(attr), val)
@@ -826,7 +827,7 @@ class TestPOSIXTool(TestTool):
ptool._gather_data.assert_called_with(entry.get("name"))
ptool._verify_acls.assert_called_with(entry,
path=entry.get("name"))
- mock_matchpathcon.assert_called_with(entry.get("name"), 0)
+ mock_matchpathcon.assert_called_with(entry.get("name"), stat_mode)
self.assertEqual(entry.get("current_exists", 'true'), 'true')
for attr, idx, val in expected:
self.assertEqual(entry.get(attr), val)
diff --git a/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestMetadata.py b/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestMetadata.py
index f527bb523..20d752afc 100644
--- a/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestMetadata.py
+++ b/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestMetadata.py
@@ -831,21 +831,18 @@ class TestMetadata(_TestMetadata, TestClientRunHooks, TestDatabaseBacked):
self.assertEqual(metadata.groups['group4'].category, 'category1')
self.assertEqual(metadata.default, "group1")
- all_groups = []
- negated_groups = []
+ all_groups = set()
+ negated_groups = set()
for group in get_groups_test_tree().xpath("//Groups/Client//*") + \
get_groups_test_tree().xpath("//Groups/Group//*"):
if group.tag == 'Group' and not group.getchildren():
if group.get("negate", "false").lower() == 'true':
- negated_groups.append(group.get("name"))
+ negated_groups.add(group.get("name"))
else:
- all_groups.append(group.get("name"))
- self.assertItemsEqual([g.name
- for g in metadata.group_membership.values()],
- all_groups)
- self.assertItemsEqual([g.name
- for g in metadata.negated_groups.values()],
- negated_groups)
+ all_groups.add(group.get("name"))
+ self.assertItemsEqual(metadata.ordered_groups, all_groups)
+ self.assertItemsEqual(metadata.group_membership.keys(), all_groups)
+ self.assertItemsEqual(metadata.negated_groups.keys(), negated_groups)
@patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock())
def test_set_profile(self):
diff --git a/tools/README b/tools/README
index 9aef03104..6fc99bc6d 100644
--- a/tools/README
+++ b/tools/README
@@ -55,6 +55,9 @@ export.sh
generate-manpages.bash
- Generate man pages from the Sphinx source
+git_commit.py
+ - Trigger script to commit local changes back to a git repository
+
pkgmgr_gen.py
- Generate Pkgmgr XML files from a list of directories that
contain RPMS
diff --git a/tools/git_commit.py b/tools/git_commit.py
new file mode 100755
index 000000000..cc4061f25
--- /dev/null
+++ b/tools/git_commit.py
@@ -0,0 +1,181 @@
+#!/usr/bin/env python
+""" Trigger script to commit selected changes to a local repository
+back to git. To use this script, enable the Trigger plugin, put this
+script in /var/lib/bcfg2/Trigger/, and create /etc/bcfg2-commit.conf.
+
+The config file, /etc/bcfg2-commit.conf, may contain four options in
+the [global] section:
+
+* "config" is the path to the Bcfg2 server config file. (Default:
+ /etc/bcfg2.conf)
+* "commit" is a comma-separated list of globs giving the paths that
+ should be committed back to the repository. Default is 'SSLCA/*,
+ SSHbase/*, Cfg/*', which will commit data back for SSLCA, SSHbase,
+ Cfg, FileProbes, etc., but not, for instance, Probes/probed.xml.
+ You may wish to add Metadata/clients.xml to the commit list.
+* "debug" and "verbose" let you set the log level for git_commit.py
+ itself.
+"""
+
+
+import os
+import sys
+import git
+import logging
+import Bcfg2.Logger
+import Bcfg2.Options
+from Bcfg2.Compat import ConfigParser
+from fnmatch import fnmatch
+
+# config file path
+CONFIG = "/etc/bcfg2-commit.conf"
+
+# config defaults. all config options are in the [global] section
+DEFAULTS = dict(config='/etc/bcfg2.conf',
+ commit="SSLCA/*, SSHbase/*, Cfg/*")
+
+
+def list_changed_files(repo):
+ return [d for d in repo.index.diff(None)
+ if (d.a_blob is not None and not d.deleted_file and
+ not d.renamed and not d.new_file)]
+
+
+def add_to_commit(patterns, path, repo, relpath):
+ progname = os.path.basename(sys.argv[0])
+ logger = logging.getLogger(progname)
+ for pattern in patterns:
+ if fnmatch(path, os.path.join(relpath, pattern)):
+ logger.debug("%s: Adding %s to commit" % (progname, path))
+ repo.index.add([path])
+ return True
+ return False
+
+
+def parse_options():
+ config = ConfigParser.SafeConfigParser(DEFAULTS)
+ config.read(CONFIG)
+
+ optinfo = dict(
+ profile=Bcfg2.Options.CLIENT_PROFILE,
+ dryrun=Bcfg2.Options.CLIENT_DRYRUN,
+ groups=Bcfg2.Options.Option("Groups",
+ default=[],
+ cmd="-g",
+ odesc='<group>:<group>',
+ cook=Bcfg2.Options.colon_split))
+ optinfo.update(Bcfg2.Options.CLI_COMMON_OPTIONS)
+ optinfo.update(Bcfg2.Options.SERVER_COMMON_OPTIONS)
+ argv = [Bcfg2.Options.CFILE.cmd, config.get("global", "config")]
+ argv.extend(sys.argv[1:])
+ setup = Bcfg2.Options.OptionParser(optinfo, argv=argv)
+ setup.parse(argv)
+
+ setup['commit'] = Bcfg2.Options.list_split(config.get("global",
+ "commit"))
+ for opt in ['debug', 'verbose']:
+ try:
+ setup[opt] = config.getboolean("global", opt)
+ except ConfigParser.NoOptionError:
+ pass
+
+ try:
+ hostname = setup['args'][0]
+ except IndexError:
+ print(setup.hm)
+ raise SystemExit(1)
+ return (setup, hostname)
+
+
+def setup_logging(setup):
+ progname = os.path.basename(sys.argv[0])
+ log_args = dict(to_syslog=setup['syslog'], to_console=sys.stdout.isatty(),
+ to_file=setup['logging'], level=logging.WARNING)
+ if setup['debug']:
+ log_args['level'] = logging.DEBUG
+ elif setup['verbose']:
+ log_args['level'] = logging.INFO
+ Bcfg2.Logger.setup_logging(progname, **log_args)
+ return logging.getLogger(progname)
+
+
+def main():
+ progname = os.path.basename(sys.argv[0])
+ setup, hostname = parse_options()
+ logger = setup_logging(setup)
+ if setup['dryrun']:
+ logger.info("%s: In dry-run mode, changes will not be committed" %
+ progname)
+
+ if setup['vcs_root']:
+ gitroot = os.path.realpath(setup['vcs_root'])
+ else:
+ gitroot = os.path.realpath(setup['repo'])
+ logger.info("%s: Using Git repo at %s" % (progname, gitroot))
+ try:
+ repo = git.Repo(gitroot)
+ except: # pylint: disable=W0702
+ logger.error("%s: Error setting up Git repo at %s: %s" %
+ (progname, gitroot, sys.exc_info()[1]))
+ return 1
+
+ # canonicalize the repo path so that git will recognize it as
+ # being inside the git repo
+ bcfg2root = os.path.realpath(setup['repo'])
+
+ if not bcfg2root.startswith(gitroot):
+ logger.error("%s: Bcfg2 repo %s is not inside Git repo %s" %
+ (progname, bcfg2root, gitroot))
+ return 1
+
+ # relative path to Bcfg2 root from VCS root
+ if gitroot == bcfg2root:
+ relpath = ''
+ else:
+ relpath = bcfg2root[len(gitroot) + 1:]
+
+ new = 0
+ changed = 0
+ logger.debug("%s: Untracked files: %s" % (progname, repo.untracked_files))
+ for path in repo.untracked_files:
+ if add_to_commit(setup['commit'], path, repo, relpath):
+ new += 1
+ else:
+ logger.debug("%s: Not adding %s to commit" % (progname, path))
+ logger.debug("%s: Untracked files after building commit: %s" %
+ (progname, repo.untracked_files))
+
+ changes = list_changed_files(repo)
+ logger.info("%s: Changed files: %s" % (progname,
+ [d.a_blob.path for d in changes]))
+ for diff in changes:
+ if add_to_commit(setup['commit'], diff.a_blob.path, repo, relpath):
+ changed += 1
+ else:
+ logger.debug("%s: Not adding %s to commit" % (progname,
+ diff.a_blob.path))
+ logger.info("%s: Changed files after building commit: %s" %
+ (progname, [d.a_blob.path for d in list_changed_files(repo)]))
+
+ if new + changed > 0:
+ logger.debug("%s: Committing %s new files and %s changed files" %
+ (progname, new, changed))
+ if setup['dryrun']:
+ logger.warning("%s: In dry-run mode, skipping commit and push" %
+ progname)
+ else:
+ output = repo.index.commit("Auto-commit with %s from %s run" %
+ (progname, hostname))
+ if output:
+ logger.debug("%s: %s" % (progname, output))
+ remote = repo.remote()
+ logger.debug("%s: Pushing to remote %s at %s" % (progname, remote,
+ remote.url))
+ output = remote.push()
+ if output:
+ logger.debug("%s: %s" % (progname, output))
+ else:
+ logger.info("%s: No changes to commit" % progname)
+
+if __name__ == '__main__':
+ sys.exit(main())