summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.travis.yml1
-rw-r--r--COPYRIGHT2
-rw-r--r--README8
-rwxr-xr-xdebian/bcfg2-server.bcfg2-report-collector.init (renamed from debian/bcfg2-report-collector.init)0
-rw-r--r--debian/bcfg2-server.postinst17
-rwxr-xr-xdebian/bcfg2.cron.daily2
-rwxr-xr-xdebian/bcfg2.cron.hourly2
-rw-r--r--debian/bcfg2.default2
-rw-r--r--debian/changelog12
-rw-r--r--debian/control7
-rwxr-xr-xdebian/rules9
-rw-r--r--doc/appendix/files/mysql.txt3
-rw-r--r--doc/appendix/guides/centos.txt9
-rw-r--r--doc/appendix/guides/fedora.txt9
-rw-r--r--doc/appendix/guides/ubuntu.txt750
-rw-r--r--doc/appendix/tools.txt2
-rw-r--r--doc/client/tools/actions.txt24
-rw-r--r--doc/client/tools/augeas.txt95
-rw-r--r--doc/conf.py4
-rw-r--r--doc/contents.txt2
-rw-r--r--doc/development/lint.txt5
-rw-r--r--doc/getting_started/index.txt11
-rw-r--r--doc/installation/distributions.txt14
-rw-r--r--doc/installation/prerequisites.txt25
-rw-r--r--doc/installation/source.txt2
-rw-r--r--doc/man/bcfg2-report-collector.txt40
-rw-r--r--doc/man/bcfg2-server.txt3
-rw-r--r--doc/man/bcfg2.conf.txt11
-rw-r--r--doc/reports/dynamic.txt2
-rw-r--r--doc/server/admin/index.txt1
-rw-r--r--doc/server/admin/query.txt15
-rw-r--r--doc/server/configuration.txt45
-rw-r--r--doc/server/database.txt11
-rw-r--r--doc/server/plugins/connectors/awstags.txt124
-rw-r--r--doc/server/plugins/connectors/properties.txt8
-rw-r--r--doc/server/plugins/generators/cfg.txt50
-rw-r--r--doc/server/plugins/generators/nagiosgen.txt2
-rw-r--r--doc/server/plugins/generators/packages.txt13
-rw-r--r--doc/server/plugins/generators/rules.txt21
-rw-r--r--doc/server/plugins/generators/tcheetah.txt2
-rw-r--r--doc/server/plugins/index.txt2
-rw-r--r--doc/server/plugins/probes/index.txt42
-rw-r--r--doc/server/plugins/structures/bundler/kernel.txt5
-rw-r--r--doc/unsorted/bcfg2.conf-options.txt19
-rw-r--r--doc/unsorted/dynamic_groups.txt27
-rw-r--r--doc/unsorted/howtos.txt2
-rw-r--r--doc/unsorted/python-ssl_1.14-1_amd64.debbin57920 -> 0 bytes
-rw-r--r--doc/unsorted/python-stdeb_0.3-1_all.debbin17024 -> 0 bytes
-rw-r--r--doc/unsorted/ssl.txt68
-rw-r--r--man/bcfg2-report-collector.879
-rw-r--r--man/bcfg2-server.85
-rw-r--r--man/bcfg2.conf.514
-rw-r--r--misc/bcfg2-selinux.spec20
-rw-r--r--misc/bcfg2.spec978
-rw-r--r--osx/Makefile4
-rw-r--r--osx/macports/Portfile2
-rw-r--r--redhat/Makefile153
-rw-r--r--redhat/PACKAGE1
-rw-r--r--redhat/RELEASE1
-rw-r--r--redhat/VERSION1
-rw-r--r--redhat/bcfg2.spec.in324
-rwxr-xr-xredhat/scripts/bcfg2-report-collector.init4
-rw-r--r--redhat/systemd/bcfg2.service5
-rw-r--r--schemas/augeas.xsd229
-rw-r--r--schemas/authorizedkeys.xsd79
-rw-r--r--schemas/awstags.xsd73
-rw-r--r--schemas/servicetype.xsd15
-rw-r--r--schemas/types.xsd53
-rw-r--r--solaris-ips/MANIFEST.bcfg2-server.header3
-rw-r--r--solaris-ips/MANIFEST.bcfg2.header3
-rw-r--r--solaris-ips/Makefile2
-rw-r--r--solaris-ips/pkginfo.bcfg22
-rw-r--r--solaris-ips/pkginfo.bcfg2-server2
-rw-r--r--solaris/Makefile2
-rw-r--r--solaris/pkginfo.bcfg22
-rw-r--r--solaris/pkginfo.bcfg2-server2
-rw-r--r--src/lib/Bcfg2/Client/Frame.py35
-rw-r--r--src/lib/Bcfg2/Client/Tools/Action.py16
-rw-r--r--src/lib/Bcfg2/Client/Tools/Chkconfig.py105
-rw-r--r--src/lib/Bcfg2/Client/Tools/DebInit.py103
-rw-r--r--src/lib/Bcfg2/Client/Tools/POSIX/Augeas.py296
-rw-r--r--src/lib/Bcfg2/Client/Tools/POSIX/File.py23
-rw-r--r--src/lib/Bcfg2/Client/Tools/POSIX/__init__.py7
-rw-r--r--src/lib/Bcfg2/Client/Tools/POSIX/base.py30
-rw-r--r--src/lib/Bcfg2/Client/Tools/POSIXUsers.py12
-rw-r--r--src/lib/Bcfg2/Client/Tools/RcUpdate.py108
-rw-r--r--src/lib/Bcfg2/Client/Tools/VCS.py119
-rw-r--r--src/lib/Bcfg2/Client/Tools/__init__.py21
-rw-r--r--src/lib/Bcfg2/Client/XML.py22
-rw-r--r--src/lib/Bcfg2/Client/__init__.py10
-rw-r--r--src/lib/Bcfg2/Compat.py5
-rwxr-xr-xsrc/lib/Bcfg2/Encryption.py19
-rw-r--r--src/lib/Bcfg2/Options.py56
-rw-r--r--src/lib/Bcfg2/Proxy.py1
-rw-r--r--src/lib/Bcfg2/Reporting/Collector.py70
-rw-r--r--src/lib/Bcfg2/Reporting/Compat.py16
-rw-r--r--src/lib/Bcfg2/Reporting/Storage/DjangoORM.py4
-rw-r--r--src/lib/Bcfg2/Reporting/models.py2
-rw-r--r--src/lib/Bcfg2/Reporting/templates/base.html31
-rw-r--r--src/lib/Bcfg2/Reporting/templates/clients/detail.html32
-rw-r--r--src/lib/Bcfg2/Reporting/templates/clients/detailed-list.html6
-rw-r--r--src/lib/Bcfg2/Reporting/templates/clients/index.html8
-rw-r--r--src/lib/Bcfg2/Reporting/templates/clients/manage.html8
-rw-r--r--src/lib/Bcfg2/Reporting/templates/config_items/common.html5
-rw-r--r--src/lib/Bcfg2/Reporting/templates/config_items/entry_status.html10
-rw-r--r--src/lib/Bcfg2/Reporting/templates/config_items/item.html20
-rw-r--r--src/lib/Bcfg2/Reporting/templates/config_items/listing.html8
-rw-r--r--src/lib/Bcfg2/Reporting/templates/displays/summary.html6
-rw-r--r--src/lib/Bcfg2/Reporting/templates/displays/timing.html14
-rw-r--r--src/lib/Bcfg2/Reporting/templatetags/bcfg2_tags.py36
-rw-r--r--src/lib/Bcfg2/Reporting/templatetags/syntax_coloring.py16
-rw-r--r--src/lib/Bcfg2/Reporting/urls.py2
-rwxr-xr-xsrc/lib/Bcfg2/Reporting/utils.py1
-rw-r--r--src/lib/Bcfg2/Reporting/views.py2
-rw-r--r--src/lib/Bcfg2/SSLServer.py32
-rw-r--r--src/lib/Bcfg2/Server/Admin/Client.py34
-rw-r--r--src/lib/Bcfg2/Server/Admin/Compare.py4
-rw-r--r--src/lib/Bcfg2/Server/Admin/Init.py11
-rw-r--r--src/lib/Bcfg2/Server/Admin/Minestruct.py15
-rw-r--r--src/lib/Bcfg2/Server/Admin/Pull.py5
-rw-r--r--src/lib/Bcfg2/Server/Admin/Reports.py27
-rw-r--r--src/lib/Bcfg2/Server/Admin/Snapshots.py1
-rw-r--r--src/lib/Bcfg2/Server/Admin/Syncdb.py10
-rw-r--r--src/lib/Bcfg2/Server/Admin/Viz.py1
-rw-r--r--src/lib/Bcfg2/Server/Admin/Xcmd.py17
-rw-r--r--src/lib/Bcfg2/Server/BuiltinCore.py4
-rw-r--r--src/lib/Bcfg2/Server/Core.py176
-rw-r--r--src/lib/Bcfg2/Server/FileMonitor/__init__.py3
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/urls.py2
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/urls.py2
-rw-r--r--src/lib/Bcfg2/Server/Lint/Comments.py9
-rwxr-xr-xsrc/lib/Bcfg2/Server/Lint/Genshi.py6
-rw-r--r--src/lib/Bcfg2/Server/Lint/RequiredAttrs.py2
-rw-r--r--src/lib/Bcfg2/Server/Lint/TemplateAbuse.py75
-rw-r--r--src/lib/Bcfg2/Server/Lint/Validate.py45
-rw-r--r--src/lib/Bcfg2/Server/Lint/ValidateJSON.py72
-rw-r--r--src/lib/Bcfg2/Server/MultiprocessingCore.py433
-rw-r--r--src/lib/Bcfg2/Server/Plugin/base.py29
-rw-r--r--src/lib/Bcfg2/Server/Plugin/helpers.py55
-rw-r--r--src/lib/Bcfg2/Server/Plugin/interfaces.py47
-rw-r--r--src/lib/Bcfg2/Server/Plugins/AWSTags.py217
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Bundler.py15
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgAuthorizedKeysGenerator.py23
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenerator.py9
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgPrivateKeyCreator.py18
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgPublicKeyCreator.py53
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py144
-rw-r--r--src/lib/Bcfg2/Server/Plugins/GroupLogic.py33
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Guppy.py1
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Metadata.py368
-rw-r--r--src/lib/Bcfg2/Server/Plugins/NagiosGen.py6
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Ohai.py4
-rw-r--r--src/lib/Bcfg2/Server/Plugins/POSIXCompat.py2
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Collection.py4
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py12
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Yum.py169
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/__init__.py144
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Probes.py108
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Properties.py6
-rw-r--r--src/lib/Bcfg2/Server/Plugins/PuppetENC.py2
-rw-r--r--src/lib/Bcfg2/Server/Plugins/SSHbase.py5
-rw-r--r--src/lib/Bcfg2/Server/Plugins/ServiceCompat.py4
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Svn.py39
-rw-r--r--src/lib/Bcfg2/Server/Plugins/TemplateHelper.py2
-rw-r--r--src/lib/Bcfg2/Server/Reports/reports/models.py10
-rw-r--r--src/lib/Bcfg2/Server/models.py2
-rw-r--r--src/lib/Bcfg2/Utils.py11
-rw-r--r--src/lib/Bcfg2/settings.py14
-rw-r--r--src/lib/Bcfg2/version.py2
-rwxr-xr-xsrc/sbin/bcfg2-admin4
-rwxr-xr-xsrc/sbin/bcfg2-crypt585
-rwxr-xr-xsrc/sbin/bcfg2-info31
-rwxr-xr-xsrc/sbin/bcfg2-lint9
-rwxr-xr-xsrc/sbin/bcfg2-reports10
-rwxr-xr-xsrc/sbin/bcfg2-test27
-rwxr-xr-xsrc/sbin/bcfg2-yum-helper161
-rw-r--r--testsuite/Testschema/test_schema.py2
-rw-r--r--testsuite/Testsrc/Testlib/TestClient/TestTools/TestPOSIX/TestAugeas.py247
-rw-r--r--testsuite/Testsrc/Testlib/TestClient/TestTools/TestPOSIX/Testbase.py15
-rw-r--r--testsuite/Testsrc/Testlib/TestClient/TestTools/TestPOSIXUsers.py15
-rw-r--r--testsuite/Testsrc/Testlib/TestServer/TestPlugin/Testbase.py42
-rw-r--r--testsuite/Testsrc/Testlib/TestServer/TestPlugin/Testhelpers.py47
-rw-r--r--testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestAWSTags.py140
-rw-r--r--testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/TestCfgEncryptedGenerator.py8
-rw-r--r--testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/TestCfgPrivateKeyCreator.py19
-rw-r--r--testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/TestCfgPublicKeyCreator.py105
-rw-r--r--testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/Test_init.py65
-rw-r--r--testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestMetadata.py79
-rw-r--r--testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestProbes.py38
-rw-r--r--testsuite/Testsrc/test_code_checks.py10
-rwxr-xr-xtestsuite/before_install.sh5
-rw-r--r--testsuite/common.py60
-rw-r--r--testsuite/ext/exception_messages.py30
-rwxr-xr-xtestsuite/install.sh19
-rw-r--r--testsuite/pylintrc.conf2
-rw-r--r--testsuite/requirements.txt2
-rw-r--r--tools/README7
-rwxr-xr-xtools/bcfg2-cron2
-rwxr-xr-xtools/bcfg2-profile-templates.py17
-rwxr-xr-xtools/bcfg2_local.py7
-rwxr-xr-xtools/export.py132
-rwxr-xr-xtools/export.sh50
-rwxr-xr-xtools/git_commit.py181
-rwxr-xr-xtools/posixusers_baseline.py11
-rwxr-xr-xtools/upgrade/1.3/migrate_configs.py8
-rwxr-xr-xtools/upgrade/1.3/migrate_dbstats.py9
-rwxr-xr-xtools/upgrade/1.3/migrate_perms_to_mode.py15
207 files changed, 6864 insertions, 2769 deletions
diff --git a/.travis.yml b/.travis.yml
index 73b8a9594..9ad7dfb19 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,6 +1,5 @@
language: python
python:
- - "2.5"
- "2.6"
- "2.7"
env:
diff --git a/COPYRIGHT b/COPYRIGHT
index fa01cb568..379ddaa4b 100644
--- a/COPYRIGHT
+++ b/COPYRIGHT
@@ -159,3 +159,5 @@ add themselves to this file. See LICENSE for the full license.
- Michael Fenn <fennm@deshawresearch.com> fixed various small bugs
related to bcfg2 on CentOS 5
+
+- Alexander Sulfrian <alexander@sulfrian.net> fixed various bugs.
diff --git a/README b/README
index c836961f7..bc2f8951c 100644
--- a/README
+++ b/README
@@ -21,11 +21,11 @@ Installation
------------
For details about the installation of Bcfg2 please refer to the
-following pages in the Bcfg2 wiki.
+following pages in the Bcfg2 online documentation:
-* Prerequisites: http://bcfg2.org/wiki/Prereqs
-* Download: http://bcfg2.org/wiki/Download
-* Installation: http://bcfg2.org/wiki/Install
+* Prerequisites: http://docs.bcfg2.org/installation/prerequisites.html
+* Download: http://bcfg2.org/download/
+* Installation: http://docs.bcfg2.org/installation/index.html
Need help
---------
diff --git a/debian/bcfg2-report-collector.init b/debian/bcfg2-server.bcfg2-report-collector.init
index df7b751cb..df7b751cb 100755
--- a/debian/bcfg2-report-collector.init
+++ b/debian/bcfg2-server.bcfg2-report-collector.init
diff --git a/debian/bcfg2-server.postinst b/debian/bcfg2-server.postinst
index 2f65fe847..77dea5f22 100644
--- a/debian/bcfg2-server.postinst
+++ b/debian/bcfg2-server.postinst
@@ -40,21 +40,4 @@ esac
#DEBHELPER#
-# We do a restart manually here because with autogenerated code
-# we get this traceback (eg something isn't done yet):
-# This happens due to debhelper bug #546293, fixed in version 7.4.2.
-## Setting up bcfg2-server (1.0.0~rc3+r5542-0.1+dctest8) ...
-## Starting Configuration Management Server: Traceback (most recent call last):
-## File "/usr/sbin/bcfg2-server", line 12, in <module>
-## import Bcfg2.Server.Plugins.Metadata
-## ImportError: No module named Server.Plugins.Metadata
-## * bcfg2-server
-if [ -x "/etc/init.d/bcfg2-server" ]; then
- if [ -x "`which invoke-rc.d 2>/dev/null`" ]; then
- invoke-rc.d bcfg2-server start || exit $?
- else
- /etc/init.d/bcfg2-server start || exit $?
- fi
-fi
-
exit 0
diff --git a/debian/bcfg2.cron.daily b/debian/bcfg2.cron.daily
index f2d1efb9f..b872887cb 100755
--- a/debian/bcfg2.cron.daily
+++ b/debian/bcfg2.cron.daily
@@ -10,4 +10,4 @@ else
echo "No bcfg2-cron command found"
exit 1
fi
-$BCFG2CRON --daily 2>&1 | logger -t bcfg2-cron -p daemon.info
+$BCFG2CRON --daily 2>&1 | logger -t bcfg2-cron -p daemon.info -i
diff --git a/debian/bcfg2.cron.hourly b/debian/bcfg2.cron.hourly
index 73aae7606..9f666e083 100755
--- a/debian/bcfg2.cron.hourly
+++ b/debian/bcfg2.cron.hourly
@@ -10,4 +10,4 @@ else
echo "No bcfg2-cron command found"
exit 1
fi
-$BCFG2CRON --hourly 2>&1 | logger -t bcfg2-cron -p daemon.info
+$BCFG2CRON --hourly 2>&1 | logger -t bcfg2-cron -p daemon.info -i
diff --git a/debian/bcfg2.default b/debian/bcfg2.default
index 0164e5531..8ed0da74a 100644
--- a/debian/bcfg2.default
+++ b/debian/bcfg2.default
@@ -20,7 +20,7 @@
#BCFG2_INIT=1
# BCFG2_AGENT:
-# Bcfg2 no longer supports agent mode please use the Agent+SSH method
+# Bcfg2 no longer supports agent mode, please see NEWS.Debian
# BCFG2_CRON:
# Set the frequency of cron runs.
diff --git a/debian/changelog b/debian/changelog
index 298e695c5..7f6e2f637 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,15 @@
+bcfg2 (1.3.3-0.0) unstable; urgency=low
+
+ * New upstream release
+
+ -- Sol Jerome <sol.jerome@gmail.com> Thu, 07 Nov 2013 08:09:57 -0600
+
+bcfg2 (1.3.2-0.0) unstable; urgency=low
+
+ * New upstream release
+
+ -- Sol Jerome <sol.jerome@gmail.com> Mon, 01 Jul 2013 16:24:46 -0500
+
bcfg2 (1.3.1-0.0) unstable; urgency=low
* New upstream release
diff --git a/debian/control b/debian/control
index 20cef93c8..aee6f1c24 100644
--- a/debian/control
+++ b/debian/control
@@ -9,11 +9,14 @@ Build-Depends: debhelper (>= 7.0.50~),
python-sphinx (>= 1.0.7+dfsg) | python3-sphinx,
python-lxml,
python-daemon,
- python-cherrypy,
+ python-boto,
+ python-cherrypy3,
python-gamin,
+ python-genshi,
python-pyinotify,
python-m2crypto,
python-doc,
+ python-mock,
python-mock-doc
Build-Depends-Indep: python-support (>= 0.5.3)
Standards-Version: 3.8.0.0
@@ -41,7 +44,7 @@ Description: Configuration management server
Package: bcfg2-web
Architecture: all
-Depends: ${python:Depends}, ${misc:Depends}, bcfg2-server (= ${binary:Version}), python-django,
+Depends: ${python:Depends}, ${misc:Depends}, bcfg2-server (= ${binary:Version}), python-django, python-django-south (>= 0.7.5)
Suggests: python-mysqldb, python-psycopg2, python-sqlite, libapache2-mod-wsgi
Description: Configuration management web interface
Bcfg2 is a configuration management system that generates configuration sets
diff --git a/debian/rules b/debian/rules
index 5694e4e37..eaf80a4d7 100755
--- a/debian/rules
+++ b/debian/rules
@@ -1,13 +1,20 @@
#!/usr/bin/make -f
+# Lucid does not have dh_python2, but we would like to be able to use
+# this rules file to build on lucid as well.
+WITH_PYTHON2 = $(shell test -f /usr/bin/dh_python2 && echo "--with python2")
+WITH_SPHINXDOC = $(shell test -f /usr/bin/dh_sphinxdoc && echo "--with sphinxdoc")
+
%:
- dh $@ --with python-support,sphinxdoc
+ dh $@ ${WITH_PYTHON2} ${WITH_SPHINXDOC}
override_dh_installinit:
# Install bcfg2 initscript without starting it on postinst
dh_installinit --package=bcfg2 --no-start
# Install bcfg2-server initscript without starting it on postinst
dh_installinit --package=bcfg2-server --no-start
+ # Install bcfg2-report-collector initscript without starting it on postinst
+ dh_installinit --package=bcfg2-server --name=bcfg2-report-collector --no-start
override_dh_auto_build:
dh_auto_build
diff --git a/doc/appendix/files/mysql.txt b/doc/appendix/files/mysql.txt
index 81104ec17..5adf2e27f 100644
--- a/doc/appendix/files/mysql.txt
+++ b/doc/appendix/files/mysql.txt
@@ -1,4 +1,5 @@
.. -*- mode: rst -*-
+.. vim: ft=rst
.. _appendix-files-mysql:
@@ -17,7 +18,7 @@ I added a new bundle:
<Bundle name="mysql-server" version="3.0">
<Path name="/root/bcfg2-install/mysql/users.sh"/>
<Path name="/root/bcfg2-install/mysql/users.sql"/>
- <PostInstall name="/root/bcfg2-install/mysql/users.sh"/>
+ <Action name="users.sh"/>
<Package name="mysql-server-4.1"/>
<Service name="mysql"/>
</Bundle>
diff --git a/doc/appendix/guides/centos.txt b/doc/appendix/guides/centos.txt
index febdf5769..19354b709 100644
--- a/doc/appendix/guides/centos.txt
+++ b/doc/appendix/guides/centos.txt
@@ -231,10 +231,11 @@ file should look something like this
When editing your xml files by hand, it is useful to occasionally run
`bcfg2-lint` to ensure that your xml validates properly.
-The final thing we need is for the client to have the proper
-arch group membership. For this, we will make use of the
-:ref:`unsorted-dynamic_groups` capabilities of the Probes plugin. Add
-Probes to your plugins line in ``bcfg2.conf`` and create the Probe.::
+The final thing we need is for the client to have the proper arch
+group membership. For this, we will make use of the
+:ref:`server-plugins-probes-dynamic-groups` capabilities of the Probes
+plugin. Add Probes to your plugins line in ``bcfg2.conf`` and create
+the Probe.::
[root@centos ~]# grep plugins /etc/bcfg2.conf
plugins = Base,Bundler,Cfg,...,Probes
diff --git a/doc/appendix/guides/fedora.txt b/doc/appendix/guides/fedora.txt
index 1e49084ef..f89daaf0b 100644
--- a/doc/appendix/guides/fedora.txt
+++ b/doc/appendix/guides/fedora.txt
@@ -256,10 +256,11 @@ file should look something like this
Add a probe
+++++++++++
-The next step for the client will be to have the proper
-arch group membership. For this, we will make use of the
-:ref:`unsorted-dynamic_groups` capabilities of the Probes plugin. Add
-**Probes** to your plugins line in ``bcfg2.conf`` and create the Probe:
+The next step for the client will be to have the proper arch group
+membership. For this, we will make use of the
+:ref:`server-plugins-probes-dynamic-groups` capabilities of the Probes
+plugin. Add **Probes** to your plugins line in ``bcfg2.conf`` and
+create the Probe:
.. code-block:: sh
diff --git a/doc/appendix/guides/ubuntu.txt b/doc/appendix/guides/ubuntu.txt
index 5a67d0a37..9bf851632 100644
--- a/doc/appendix/guides/ubuntu.txt
+++ b/doc/appendix/guides/ubuntu.txt
@@ -1,4 +1,5 @@
.. -*- mode: rst -*-
+.. vim: ft=rst
.. _appendix-guides-ubuntu:
@@ -8,7 +9,7 @@ Ubuntu
.. note::
- This particular how to was done on lucid, but should apply to any
+ This particular how to was done on saucy, but should apply to any
other `stable`__ version of Ubuntu.
__ ubuntu-releases_
@@ -23,11 +24,6 @@ version available in the ubuntu archives, but it is not as up to date).
.. _PPA: https://launchpad.net/~bcfg2/+archive/ppa
-Add the Ubuntu PPA listing to your APT sources
-----------------------------------------------
-
-See http://trac.mcs.anl.gov/projects/bcfg2/wiki/PrecompiledPackages#UbuntuLucid
-
Install bcfg2-server
--------------------
::
@@ -36,7 +32,7 @@ Install bcfg2-server
Remove the default configuration preseeded by the ubuntu package::
- root@lucid:~# rm -rf /etc/bcfg2* /var/lib/bcfg2
+ root@saucy:~# rm -rf /etc/bcfg2* /etc/ssl/bcfg2* /var/lib/bcfg2
Initialize your repository
==========================
@@ -45,63 +41,95 @@ Now that you're done with the install, you need to intialize your
repository and setup your bcfg2.conf. bcfg2-admin init is a tool which
allows you to automate this process.::
- root@lucid:~# bcfg2-admin init
- Store bcfg2 configuration in [/etc/bcfg2.conf]:
- Location of bcfg2 repository [/var/lib/bcfg2]:
+ root@saucy:~# bcfg2-admin init
+ Store Bcfg2 configuration in [/etc/bcfg2.conf]:
+ Location of Bcfg2 repository [/var/lib/bcfg2]:
Input password used for communication verification (without echoing; leave blank for a random):
- What is the server's hostname: [lucid]
- Input the server location [https://lucid:6789]:
+ What is the server's hostname: [saucy]
+ Input the server location (the server listens on a single interface by default) [https://saucy:6789]:
Input base Operating System for clients:
- 1: Redhat/Fedora/RHEL/RHAS/Centos
+ 1: Redhat/Fedora/RHEL/RHAS/CentOS
2: SUSE/SLES
3: Mandrake
4: Debian
5: Ubuntu
6: Gentoo
7: FreeBSD
+ 8: Arch
: 5
+ Path where Bcfg2 server private key will be created [/etc/ssl/bcfg2.key]:
+ Path where Bcfg2 server cert will be created [/etc/ssl/bcfg2.crt]:
+ The following questions affect SSL certificate generation.
+ If no data is provided, the default values are used.
+ Country name (2 letter code) for certificate: US
+ State or Province Name (full name) for certificate: Illinois
+ Locality Name (eg, city) for certificate: Argonne
+ Repository created successfuly in /var/lib/bcfg2
Generating a 2048 bit RSA private key
- ......................................................................................+++
- ...+++
- writing new private key to '/etc/bcfg2.key'
+ ....................................................................................................................+++
+ ..............................+++
+ writing new private key to '/etc/ssl/bcfg2.key'
-----
Signature ok
- subject=/C=US/ST=Illinois/L=Argonne/CN=lucid
+ subject=/C=US/ST=Illinois/L=Argonne/CN=saucy
Getting Private key
- Repository created successfuly in /var/lib/bcfg2
-
Of course, change responses as necessary.
Start the server
================
+Before you start the server, you need to fix your network resolution for
+this host. The short and easy way is to remove the 127.0.1.1 line in
+``/etc/hosts`` and move your hostname to the 127.0.0.1 line.
+
+::
+
+ 127.0.0.1 saucy localhost
+
+ # The following lines are desirable for IPv6 capable hosts
+ ...
+
+.. _Debian Manual: http://www.debian.org/doc/manuals/debian-reference/ch05.en.html#_the_hostname_resolution
+
+.. note::
+
+ This configuration is not recommended except as a quick hack to get
+ you through this guide. Ideally you'd add a line containing the
+ host's actual IP address. More information on why this is broken
+ can be found in the `Debian Manual`_.
+
You are now ready to start your bcfg2 server for the first time.::
- root@lucid:~# /etc/init.d/bcfg2-server start
- root@lucid:~# tail /var/log/syslog
- Dec 17 22:07:02 lucid bcfg2-server[17523]: serving bcfg2-server at https://lucid:6789
- Dec 17 22:07:02 lucid bcfg2-server[17523]: serve_forever() [start]
- Dec 17 22:07:02 lucid bcfg2-server[17523]: Processed 16 fam events in 0.502 seconds. 0 coalesced
+ root@saucy:~# /etc/init.d/bcfg2-server start
+ Starting Configuration Management Server: * bcfg2-server
+ root@saucy:~# tail /var/log/syslog
+ Jul 18 17:50:48 saucy bcfg2-server[5872]: Reconnected to syslog
+ Jul 18 17:50:48 saucy bcfg2-server[5872]: bcfg2-server daemonized
+ Jul 18 17:50:48 saucy bcfg2-server[5872]: service available at https://saucy:6789
+ Jul 18 17:50:48 saucy bcfg2-server[5872]: serving bcfg2-server at https://saucy:6789
+ Jul 18 17:50:48 saucy bcfg2-server[5872]: serve_forever() [start]
+ Jul 18 17:50:48 saucy bcfg2-server[5872]: Handled 13 events in 0.006s
Run bcfg2 to be sure you are able to communicate with the server::
- root@lucid:~# bcfg2 -vqn
+ root@saucy:~# bcfg2 -vqn
+ Starting Bcfg2 client run at 1374188552.53
Loaded tool drivers:
- APT Action DebInit POSIX
-
+ APT Action DebInit POSIX POSIXUsers Upstart VCS
+ Loaded experimental tool drivers:
+ POSIXUsers
Phase: initial
Correct entries: 0
Incorrect entries: 0
Total managed entries: 0
- Unmanaged entries: 382
-
-
+ Unmanaged entries: 590
Phase: final
Correct entries: 0
Incorrect entries: 0
Total managed entries: 0
- Unmanaged entries: 382
+ Unmanaged entries: 590
+ Finished Bcfg2 client run at 1374188563.26
Bring your first machine under Bcfg2 control
============================================
@@ -114,92 +142,101 @@ Setup the :ref:`server-plugins-generators-packages` plugin
Replace Pkgmgr with Packages in the plugins line of ``bcfg2.conf``::
- root@lucid:~# cat /etc/bcfg2.conf
+ root@saucy:~# cat /etc/bcfg2.conf
[server]
repository = /var/lib/bcfg2
- plugins = SSHbase,Cfg,Packages,Rules,Metadata,Base,Bundler
+ plugins = Bundler,Cfg,Metadata,Packages,Rules,SSHbase
+ # Uncomment the following to listen on all interfaces
+ #listen_all = true
[statistics]
sendmailpath = /usr/lib/sendmail
+ #web_debug = False
+ #time_zone =
[database]
- engine = sqlite3
+ #engine = sqlite3
# 'postgresql', 'mysql', 'mysql_old', 'sqlite3' or 'ado_mssql'.
- name =
+ #name =
# Or path to database file if using sqlite3.
- #<repository>/etc/brpt.sqlite is default path if left empty
- user =
+ #<repository>/bcfg2.sqlite is default path if left empty
+ #user =
# Not used with sqlite3.
- password =
+ #password =
# Not used with sqlite3.
- host =
+ #host =
# Not used with sqlite3.
- port =
+ #port =
+
+ [reporting]
+ transport = LocalFilesystem
[communication]
protocol = xmlrpc/ssl
password = secret
- certificate = /etc/bcfg2.crt
- key = /etc/bcfg2.key
- ca = /etc/bcfg2.crt
+ certificate = /etc/ssl/bcfg2.crt
+ key = /etc/ssl/bcfg2.key
+ ca = /etc/ssl/bcfg2.crt
[components]
- bcfg2 = https://lucid:6789
+ bcfg2 = https://saucy:6789
Create Packages layout (as per :ref:`packages-exampleusage`) in
``/var/lib/bcfg2``
.. code-block:: xml
- root@lucid:~# mkdir /var/lib/bcfg2/Packages
- root@lucid:~# cat /var/lib/bcfg2/Packages/packages.conf
+ root@saucy:~# mkdir /var/lib/bcfg2/Packages
+ root@saucy:~# cat /var/lib/bcfg2/Packages/packages.conf
[global]
- root@lucid:~# cat /var/lib/bcfg2/Packages/sources.xml
+ root@saucy:~# cat /var/lib/bcfg2/Packages/sources.xml
<Sources>
- <Group name="ubuntu-lucid">
- <Source type="apt" url="http://archive.ubuntu.com/ubuntu" version="lucid">
+ <Group name="ubuntu-saucy">
+ <Source type="apt" debsrc="true" recommended="true" url="http://archive.ubuntu.com/ubuntu" version="saucy">
<Component>main</Component>
<Component>multiverse</Component>
<Component>restricted</Component>
<Component>universe</Component>
<Arch>amd64</Arch>
+ <Blacklist>bcfg2</Blacklist>
+ <Blacklist>bcfg2-server</Blacklist>
</Source>
- <Source type="apt" url="http://archive.ubuntu.com/ubuntu" version="lucid-updates">
+ <Source type="apt" debsrc="true" recommended="true" url="http://archive.ubuntu.com/ubuntu" version="saucy-updates">
<Component>main</Component>
<Component>multiverse</Component>
<Component>restricted</Component>
<Component>universe</Component>
<Arch>amd64</Arch>
+ <Blacklist>bcfg2</Blacklist>
+ <Blacklist>bcfg2-server</Blacklist>
</Source>
- <Source type="apt" url="http://security.ubuntu.com/ubuntu" version="lucid-security">
+ <Source type="apt" debsrc="true" recommended="true" url="http://security.ubuntu.com/ubuntu" version="saucy-security">
<Component>main</Component>
<Component>multiverse</Component>
<Component>restricted</Component>
<Component>universe</Component>
<Arch>amd64</Arch>
+ <Blacklist>bcfg2</Blacklist>
+ <Blacklist>bcfg2-server</Blacklist>
+ </Source>
+ <Source type="apt" debsrc="true" recommended="true" url="http://ppa.launchpad.net/bcfg2/ppa/ubuntu" version="saucy">
+ <Component>main</Component>
+ <Arch>amd64</Arch>
</Source>
</Group>
</Sources>
-Due to the :ref:`server-plugins-generators-packages-magic-groups`,
-we need to modify our Metadata. Let's add an **ubuntu-lucid**
-group which inherits the **ubuntu** group already present in
-``/var/lib/bcfg2/Metadata/groups.xml``. The resulting file should look
-something like this
-
-.. note::
-
- The reason we are creating a release-specific group in this case is
- that the APTSource above is specific to the lucid release of ubuntu.
- That is, it should not apply to other releases (hardy, maverick, etc).
+Above, we have grouped our package sources under **ubuntu-saucy**. We
+need to add this group to our ``/var/lib/bcfg2/Metadata/groups.xml`` so
+that our client is able to obtain these sources.
.. code-block:: xml
<Groups version='3.0'>
<Group profile='true' public='true' default='true' name='basic'>
- <Group name='ubuntu-lucid'/>
+ <Group name='ubuntu-saucy'/>
</Group>
- <Group name='ubuntu-lucid'>
+ <Group name='ubuntu-saucy'>
<Group name='ubuntu'/>
</Group>
<Group name='ubuntu'/>
@@ -214,22 +251,23 @@ something like this
.. note::
When editing your xml files by hand, it is useful to occasionally run
- `bcfg2-lint` to ensure that your xml validates properly.
+ ``bcfg2-lint -v`` to ensure that your xml validates properly.
-The last thing we need is for the client to have the proper
-arch group membership. For this, we will make use of the
-:ref:`unsorted-dynamic_groups` capabilities of the Probes plugin. Add
-Probes to your plugins line in ``bcfg2.conf`` and create the Probe.
+The last thing we need is for the client to have the proper arch group
+membership. For this, we will make use of the
+:ref:`server-plugins-probes-dynamic-groups` capabilities of the Probes
+plugin. Add Probes to your plugins line in ``bcfg2.conf`` and create
+the Probe.
.. code-block:: sh
- root@lucid:~# grep plugins /etc/bcfg2.conf
- plugins = Base,Bundler,Cfg,...,Probes
- root@lucid:~# mkdir /var/lib/bcfg2/Probes
- root@lucid:~# cat /var/lib/bcfg2/Probes/groups
+ root@saucy:~# grep plugins /etc/bcfg2.conf
+ plugins = Bundler,Cfg,Metadata,...,Probes
+ root@saucy:~# mkdir /var/lib/bcfg2/Probes
+ root@saucy:~# cat /var/lib/bcfg2/Probes/groups
#!/bin/sh
- ARCH=`uname -m`
+ ARCH=$(uname -m)
case "$ARCH" in
"x86_64")
echo "group:amd64"
@@ -241,33 +279,36 @@ Probes to your plugins line in ``bcfg2.conf`` and create the Probe.
Now we restart the bcfg2-server::
- root@lucid:~# /etc/init.d/bcfg2-server restart
+ root@saucy:~# /etc/init.d/bcfg2-server restart
Stopping Configuration Management Server: * bcfg2-server
Starting Configuration Management Server: * bcfg2-server
- root@lucid:~# tail /var/log/syslog
- Dec 17 22:36:47 lucid bcfg2-server[17937]: Packages: File read failed; falling back to file download
- Dec 17 22:36:47 lucid bcfg2-server[17937]: Packages: Updating http://us.archive.ubuntu.com/ubuntu//dists/lucid/main/binary-amd64/Packages.gz
- Dec 17 22:36:54 lucid bcfg2-server[17937]: Packages: Updating http://us.archive.ubuntu.com/ubuntu//dists/lucid/multiverse/binary-amd64/Packages.gz
- Dec 17 22:36:55 lucid bcfg2-server[17937]: Packages: Updating http://us.archive.ubuntu.com/ubuntu//dists/lucid/restricted/binary-amd64/Packages.gz
- Dec 17 22:36:56 lucid bcfg2-server[17937]: Packages: Updating http://us.archive.ubuntu.com/ubuntu//dists/lucid/universe/binary-amd64/Packages.gz
- Dec 17 22:37:27 lucid bcfg2-server[17937]: Failed to read file probed.xml
- Dec 17 22:37:27 lucid bcfg2-server[17937]: Loading experimental plugin(s): Packages
- Dec 17 22:37:27 lucid bcfg2-server[17937]: NOTE: Interfaces subject to change
- Dec 17 22:37:27 lucid bcfg2-server[17937]: service available at https://lucid:6789
- Dec 17 22:37:27 lucid bcfg2-server[17937]: serving bcfg2-server at https://lucid:6789
- Dec 17 22:37:27 lucid bcfg2-server[17937]: serve_forever() [start]
- Dec 17 22:37:28 lucid bcfg2-server[17937]: Processed 17 fam events in 0.502 seconds. 0 coalesced
+ root@saucy:~# tail /var/log/syslog
+ Jul 18 18:43:22 saucy bcfg2-server[6215]: Reconnected to syslog
+ Jul 18 18:43:22 saucy bcfg2-server[6215]: bcfg2-server daemonized
+ Jul 18 18:43:22 saucy bcfg2-server[6215]: service available at https://saucy:6789
+ Jul 18 18:43:22 saucy bcfg2-server[6215]: Failed to read file probed.xml: Error reading file '/var/lib/bcfg2/Probes/probed.xml': failed to load external entity "/var/lib/bcfg2/Probes/probed.xml"
+ Jul 18 18:43:22 saucy bcfg2-server[6215]: serving bcfg2-server at https://saucy:6789
+ Jul 18 18:43:22 saucy bcfg2-server[6215]: serve_forever() [start]
+ Jul 18 18:43:22 saucy bcfg2-server[6215]: Reloading Packages plugin
+ Jul 18 18:43:22 saucy bcfg2-server[6215]: Handled 15 events in 0.205s
+
+.. note::
+
+ The error regarding *probed.xml* is non-fatal and just telling you
+ that the file doesn't yet exist. It will be populated once you have
+ run a client with the Probes plugin enabled.
Start managing packages
-----------------------
-Add a base-packages bundle. Let's see what happens when we just populate
-it with the ubuntu-standard package.
+Add a base-saucy (or whatever release you happen to be using)
+bundle. Let's see what happens when we just populate it with the
+ubuntu-standard package.
.. code-block:: xml
- root@lucid:~# cat /var/lib/bcfg2/Bundler/base-packages.xml
- <Bundle name='base-packages'>
+ root@saucy:~# cat /var/lib/bcfg2/Bundler/base-saucy.xml
+ <Bundle name='base-saucy'>
<Package name='ubuntu-standard'/>
</Bundle>
@@ -277,218 +318,473 @@ profile group might look something like this
.. code-block:: xml
<Group profile='true' public='true' default='true' name='basic'>
- <Bundle name='base-packages'/>
- <Group name='ubuntu-lucid'/>
+ <Bundle name='base-saucy'/>
+ <Group name='ubuntu-saucy'/>
</Group>
Now if we run the client in debug mode (-d), we can see what this has
done for us.::
- root@lucid:~# bcfg2 -vqdn
+ root@saucy:/var/lib/bcfg2# bcfg2 -vqdn
+ Configured logging: DEBUG to console; DEBUG to syslog
+ {'help': False, 'extra': False, 'ppath': '/var/cache/bcfg2', 'ca': '/etc/ssl/bcfg2.crt', 'rpm_version_fail_action': 'upgrade', 'yum_version_fail_action': 'upgrade', 'retry_delay': '1', 'posix_uid_whitelist': [], 'rpm_erase_flags': ['allmatches'], 'verbose': True, 'certificate': '/etc/ssl/bcfg2.crt', 'paranoid': False, 'rpm_installonly': ['kernel', 'kernel-bigmem', 'kernel-enterprise', 'kernel-smp', 'kernel-modules', 'kernel-debug', 'kernel-unsupported', 'kernel-devel', 'kernel-source', 'kernel-default', 'kernel-largesmp-devel', 'kernel-largesmp', 'kernel-xen', 'gpg-pubkey'], 'cache': None, 'yum24_autodep': True, 'yum_pkg_verify': True, 'probe_timeout': None, 'yum_installed_action': 'install', 'rpm_verify_fail_action': 'reinstall', 'dryrun': True, 'retries': '3', 'apt_install_path': '/usr', 'quick': True, 'password': 'secret', 'yum24_installed_action': 'install', 'kevlar': False, 'max_copies': 1, 'syslog': True, 'decision_list': False, 'configfile': '/etc/bcfg2.conf', 'remove': None, 'server': 'https://saucy:6789', 'encoding': 'UTF-8', 'timeout': 90, 'debug': True, 'yum24_installonly': ['kernel', 'kernel-bigmem', 'kernel-enterprise', 'kernel-smp', 'kernel-modules', 'kernel-debug', 'kernel-unsupported', 'kernel-devel', 'kernel-source', 'kernel-default', 'kernel-largesmp-devel', 'kernel-largesmp', 'kernel-xen', 'gpg-pubkey'], 'yum24_erase_flags': ['allmatches'], 'yum24_pkg_checks': True, 'interactive': False, 'apt_etc_path': '/etc', 'rpm_installed_action': 'install', 'yum24_verify_fail_action': 'reinstall', 'omit_lock_check': False, 'yum24_pkg_verify': True, 'serverCN': None, 'file': None, 'apt_var_path': '/var', 'posix_gid_whitelist': [], 'posix_gid_blacklist': [], 'indep': False, 'decision': 'none', 'servicemode': 'default', 'version': False, 'rpm_pkg_checks': True, 'profile': None, 'yum_pkg_checks': True, 'args': [], 'bundle': [], 'posix_uid_blacklist': [], 'user': 'root', 'key': '/etc/ssl/bcfg2.key', 'command_timeout': None, 'probe_exit': True, 'lockfile': '/var/lock/bcfg2.run', 'yum_verify_fail_action': 'reinstall', 'yum24_version_fail_action': 'upgrade', 'yum_verify_flags': [], 'logging': None, 'rpm_pkg_verify': True, 'bundle_quick': False, 'rpm_verify_flags': [], 'yum24_verify_flags': [], 'skipindep': False, 'skipbundle': [], 'portage_binpkgonly': False, 'drivers': ['APK', 'APT', 'Action', 'Blast', 'Chkconfig', 'DebInit', 'Encap', 'FreeBSDInit', 'FreeBSDPackage', 'IPS', 'MacPorts', 'OpenCSW', 'POSIX', 'POSIXUsers', 'Pacman', 'Portage', 'RPM', 'RPMng', 'RcUpdate', 'SELinux', 'SMF', 'SYSV', 'Systemd', 'Upstart', 'VCS', 'YUM', 'YUM24', 'YUMng', 'launchd']}
+ Starting Bcfg2 client run at 1374191628.88
Running probe groups
+ Running: /tmp/tmpEtgdwo
+ < group:amd64
Probe groups has result:
- amd64
+ group:amd64
+
+ POSIX: Handlers loaded: nonexistent, directory, hardlink, symlink, file, device, permissions
Loaded tool drivers:
- APT Action DebInit POSIX
+ APT Action DebInit POSIX POSIXUsers Upstart VCS
+ Loaded experimental tool drivers:
+ POSIXUsers
The following packages are specified in bcfg2:
ubuntu-standard
The following packages are prereqs added by Packages:
- adduser debconf hdparm libdevmapper1.02.1 libk5crypto3 libparted1.8-12 libxml2 passwd upstart
- apt debianutils info libdns53 libkeyutils1 libpci3 logrotate pciutils usbutils
- aptitude dmidecode install-info libelf1 libkrb5-3 libpopt0 lsb-base perl-base wget
- at dnsutils iptables libept0 libkrb5support0 libreadline5 lshw popularity-contest zlib1g
- base-files dosfstools libacl1 libgcc1 liblwres50 libreadline6 lsof psmisc
- base-passwd dpkg libattr1 libgdbm3 libmagic1 libselinux1 ltrace readline-common
- bsdmainutils ed libbind9-50 libgeoip1 libmpfr1ldbl libsigc++-2.0-0c2a man-db rsync
- bsdutils file libc-bin libgmp3c2 libncurses5 libssl0.9.8 memtest86+ sed
- cpio findutils libc6 libgssapi-krb5-2 libncursesw5 libstdc++6 mime-support sensible-utils
- cpp ftp libcap2 libisc50 libpam-modules libusb-0.1-4 ncurses-bin strace
- cpp-4.4 gcc-4.4-base libcomerr2 libisccc50 libpam-runtime libuuid1 netbase time
- cron groff-base libcwidget3 libisccfg50 libpam0g libxapian15 parted tzdata
-
+ accountsservice libdrm2 libusb-1.0-0
+ adduser libedit2 libustr-1.0-1
+ apparmor libelf1 libuuid1
+ apt libexpat1 libwind0-heimdal
+ apt-transport-https libffi6 libx11-6
+ apt-utils libfribidi0 libx11-data
+ base-files libfuse2 libxau6
+ base-passwd libgcc1 libxcb1
+ bash libgck-1-0 libxdmcp6
+ bash-completion libgcr-3-common libxext6
+ bsdmainutils libgcr-base-3-1 libxml2
+ bsdutils libgcrypt11 libxmuu1
+ busybox-initramfs libgdbm3 libxtables10
+ busybox-static libgeoip1 locales
+ ca-certificates libglib2.0-0 login
+ command-not-found libglib2.0-data logrotate
+ command-not-found-data libgnutls26 lsb-base
+ coreutils libgpg-error0 lsb-release
+ cpio libgpm2 lshw
+ cron libgssapi-krb5-2 lsof
+ dash libgssapi3-heimdal ltrace
+ dbus libhcrypto4-heimdal makedev
+ debconf libheimbase1-heimdal man-db
+ debconf-i18n libheimntlm0-heimdal manpages
+ debianutils libhx509-5-heimdal memtest86+
+ diffutils libidn11 mime-support
+ dmidecode libisc92 mlocate
+ dmsetup libisccc90 module-init-tools
+ dnsutils libisccfg90 mount
+ dosfstools libjson-c2 mountall
+ dpkg libjson0 mtr-tiny
+ e2fslibs libk5crypto3 multiarch-support
+ e2fsprogs libkeyutils1 nano
+ ed libklibc ncurses-base
+ file libkmod2 ncurses-bin
+ findutils libkrb5-26-heimdal netbase
+ friendly-recovery libkrb5-3 ntfs-3g
+ ftp libkrb5support0 openssh-client
+ fuse libldap-2.4-2 openssl
+ gcc-4.8-base liblocale-gettext-perl parted
+ geoip-database liblwres90 passwd
+ gettext-base liblzma5 pciutils
+ gnupg libmagic1 perl-base
+ gpgv libmount1 plymouth
+ grep libncurses5 plymouth-theme-ubuntu-text
+ groff-base libncursesw5 popularity-contest
+ gzip libnewt0.52 powermgmt-base
+ hdparm libnfnetlink0 ppp
+ hostname libnih-dbus1 pppconfig
+ ifupdown libnih1 pppoeconf
+ info libnuma1 procps
+ initramfs-tools libp11-kit0 psmisc
+ initramfs-tools-bin libpam-modules python-apt-common
+ initscripts libpam-modules-bin python3
+ insserv libpam-runtime python3-apt
+ install-info libpam-systemd python3-commandnotfound
+ iproute libpam0g python3-dbus
+ iproute2 libparted0debian1 python3-distupgrade
+ iptables libpcap0.8 python3-gdbm
+ iputils-tracepath libpci3 python3-minimal
+ irqbalance libpcre3 python3-update-manager
+ iso-codes libpipeline1 python3.3
+ klibc-utils libplymouth2 python3.3-minimal
+ kmod libpng12-0 readline-common
+ krb5-locales libpolkit-gobject-1-0 rsync
+ language-selector-common libpopt0 sed
+ libaccountsservice0 libprocps0 sensible-utils
+ libacl1 libpython3-stdlib sgml-base
+ libapparmor-perl libpython3.3-minimal shared-mime-info
+ libapparmor1 libpython3.3-stdlib strace
+ libapt-inst1.5 libreadline6 systemd-services
+ libapt-pkg4.12 libroken18-heimdal sysv-rc
+ libasn1-8-heimdal librtmp0 sysvinit-utils
+ libasprintf0c2 libsasl2-2 tar
+ libatm1 libsasl2-modules tcpdump
+ libattr1 libselinux1 telnet
+ libaudit-common libsemanage-common time
+ libaudit1 libsemanage1 tzdata
+ libbind9-90 libsepol1 ubuntu-keyring
+ libblkid1 libslang2 ubuntu-release-upgrader-core
+ libbsd0 libsqlite3-0 ucf
+ libbz2-1.0 libss2 udev
+ libc-bin libssl1.0.0 ufw
+ libc6 libstdc++6 update-manager-core
+ libcap-ng0 libsystemd-daemon0 upstart
+ libcap2 libsystemd-login0 usbutils
+ libcomerr2 libtasn1-3 util-linux
+ libcurl3-gnutls libtext-charwidth-perl uuid-runtime
+ libdb5.1 libtext-iconv-perl wget
+ libdbus-1-3 libtext-wrapi18n-perl whiptail
+ libdbus-glib-1-2 libtinfo5 xauth
+ libdevmapper1.02.1 libudev1 xml-core
+ libdns95 libusb-0.1-4 zlib1g
Phase: initial
- Correct entries: 101
+ Correct entries: 280
Incorrect entries: 0
- Total managed entries: 101
- Unmanaged entries: 281
-
-
+ Total managed entries: 280
+ Unmanaged entries: 313
+ Installing entries in the following bundle(s):
+ base-saucy
+ Bundle base-saucy was not modified
Phase: final
- Correct entries: 101
+ Correct entries: 280
Incorrect entries: 0
- Total managed entries: 101
- Unmanaged entries: 281
+ Total managed entries: 280
+ Unmanaged entries: 313
+ Finished Bcfg2 client run at 1374191642.69
As you can see, the Packages plugin has generated the dependencies
required for the ubuntu-standard package for us automatically. The
ultimate goal should be to move all the packages from the **Unmanaged**
entries section to the **Managed** entries section. So, what exactly *are*
-those Unmanaged entries?::
+those Unmanaged entries?
+
+::
- root@lucid:~# bcfg2 -vqen
+ Starting Bcfg2 client run at 1374192077.76
Running probe groups
Probe groups has result:
- amd64
- Loaded tool drivers:
- APT Action DebInit POSIX
+ group:amd64
+ Loaded tool drivers:
+ APT Action DebInit POSIX POSIXUsers Upstart VCS
+ Loaded experimental tool drivers:
+ POSIXUsers
Phase: initial
- Correct entries: 101
+ Correct entries: 280
Incorrect entries: 0
- Total managed entries: 101
- Unmanaged entries: 281
-
-
+ Total managed entries: 280
+ Unmanaged entries: 313
Phase: final
- Correct entries: 101
+ Correct entries: 280
Incorrect entries: 0
- Total managed entries: 101
- Unmanaged entries: 281
- Package:apparmor
- Package:apparmor-utils
- Package:apport
- ...
-
-Now you can go through these and continue adding the packages you want to
-your Bundle. Note that ``aptitude why`` is useful when trying to figure
-out the reason for a package being installed. Also, deborphan is helpful
-for removing leftover dependencies which are no longer needed. After a
-while, I ended up with a minimal bundle that looks like this
+ Total managed entries: 280
+ Unmanaged entries: 313
+ POSIXGroup:adm
+ POSIXGroup:audio
+ POSIXGroup:backup
+ ...
+ Package:deb:apt-xapian-index
+ Package:deb:aptitude
+ Package:deb:aptitude-common
+ ...
+
+Now you can go through these and continue adding the packages you want
+to your Bundle. Note that ``aptitude why`` is useful when trying to
+figure out the reason for a package being installed. Also, ``deborphan``
+is helpful for removing leftover dependencies which are no longer
+needed. After a while, I ended up with a minimal bundle that looks
+like this:
.. code-block:: xml
- <Bundle name='base-packages'>
- <Package name='bash-completion'/>
+ <Bundle name='base-saucy'>
+ <!-- packages -->
<Package name='bcfg2-server'/>
- <Package name='debconf-i18n'/>
+ <!-- or dependencies -->
+ <Package name='python-pyinotify'/>
+ <Package name='ttf-dejavu-core'/>
+ <Package name='bind9-host'/>
+ <Package name='crda'/>
<Package name='deborphan'/>
- <Package name='diffutils'/>
- <Package name='e2fsprogs'/>
- <Package name='fam'/>
- <Package name='grep'/>
<Package name='grub-pc'/>
- <Package name='gzip'/>
- <Package name='hostname'/>
- <Package name='krb5-config'/>
- <Package name='krb5-user'/>
- <Package name='language-pack-en-base'/>
+ <Package name='language-pack-en'/>
<Package name='linux-generic'/>
<Package name='linux-headers-generic'/>
- <Package name='login'/>
- <Package name='manpages'/>
- <Package name='mlocate'/>
- <Package name='ncurses-base'/>
- <Package name='openssh-server'/>
- <Package name='python-fam'/>
- <Package name='tar'/>
+ <Package name='systemd-shim'/>
+ <Package name='tasksel'/>
<Package name='ubuntu-minimal'/>
<Package name='ubuntu-standard'/>
+ <!-- or dependencies -->
+ <Package name='python3-gi'/>
+ <Package name='wamerican'/>
+ <Package name='wbritish'/>
<Package name='vim'/>
- <Package name='vim-runtime'/>
-
- <!-- PreDepends -->
- <Package name='dash'/>
- <Package name='initscripts'/>
- <Package name='libdbus-1-3'/>
- <Package name='libnih-dbus1'/>
- <Package name='lzma'/>
- <Package name='mountall'/>
- <Package name='sysvinit-utils'/>
- <Package name='sysv-rc'/>
-
- <!-- vim dependencies -->
- <Package name='libgpm2'/>
- <Package name='libpython2.6'/>
</Bundle>
-As you can see below, I no longer have any unmanaged packages. ::
+Once your ``bcfg2 -vqen`` output no longer shows Package entries, you
+can move on to the next section.
- root@lucid:~# bcfg2 -vqen
- Running probe groups
- Probe groups has result:
- amd64
- Loaded tool drivers:
- APT Action DebInit POSIX
+Manage users
+------------
- Phase: initial
- Correct entries: 247
- Incorrect entries: 0
- Total managed entries: 247
- Unmanaged entries: 10
+The default setting in ``login.defs`` is for system accounts to be UIDs
+< 1000. We will ignore those accounts for now (you can manage them if
+you like at a later time).
+To ignore system UID/GIDs, add the following lines to ``bcfg2.conf``
+(we will also ignore the nobody uid and nogroup gid--65534).
- Phase: final
- Correct entries: 247
- Incorrect entries: 0
- Total managed entries: 247
- Unmanaged entries: 10
- Service:bcfg2 Service:fam Service:killprocs Service:rc.local Service:single
- Service:bcfg2-server Service:grub-common Service:ondemand Service:rsync Service:ssh
+::
+
+ [POSIXUsers]
+ uid_blacklist = 0-999,65534
+ gid_blacklist = 0-999,65534
+
+If you run the client again with ``bcfg2 -vqen``, you should now see a
+:ref:`POSIXUser <server-plugins-generators-rules-posixuser-tag>` entry
+and :ref:`POSIXGroup <server-plugins-generators-rules-posixgroup-tag>`
+entry for your user account (assuming this is a fresh install with a
+regular user).
+
+You can manage this user by adding the following to your bundle.
+
+.. code-block:: xml
+
+ <BoundPOSIXUser name='username' uid='1000' gecos="Your Name">
+ <MemberOf>adm</MemberOf>
+ <MemberOf>cdrom</MemberOf>
+ <MemberOf>dip</MemberOf>
+ <MemberOf>lpadmin</MemberOf>
+ <MemberOf>plugdev</MemberOf>
+ <MemberOf>sambashare</MemberOf>
+ <MemberOf>sudo</MemberOf>
+ </BoundPOSIXUser>
Manage services
---------------
-Now let's clear up the unmanaged service entries by adding the following
-entries to our bundle...
+To clear up the unmanaged service entries, you will need to add the
+entries to your bundle. Here's an example of what that might look like.
.. code-block:: xml
- <!-- basic services -->
+ <!-- services -->
<Service name='bcfg2'/>
+ <Service name='bcfg2-report-collector'/>
<Service name='bcfg2-server'/>
- <Service name='fam'/>
+ <Service name='bootmisc.sh'/>
+ <Service name='checkfs.sh'/>
+ <Service name='checkroot-bootclean.sh'/>
+ <Service name='checkroot.sh'/>
+ <Service name='console'/>
+ <Service name='console-font'/>
+ <Service name='console-setup'/>
+ <Service name='container-detect'/>
+ <Service name='control-alt-delete'/>
+ <Service name='cron'/>
+ <Service name='dbus'/>
+ <Service name='dmesg'/>
+ <Service name='dns-clean'/>
+ <Service name='failsafe'/>
+ <Service name='flush-early-job-log'/>
+ <Service name='friendly-recovery'/>
<Service name='grub-common'/>
+ <Service name='hostname'/>
+ <Service name='hwclock'/>
+ <Service name='hwclock-save'/>
+ <Service name='irqbalance'/>
<Service name='killprocs'/>
+ <Service name='kmod'/>
+ <Service name='mountall'/>
+ <Service name='mountall.sh'/>
+ <Service name='mountall-bootclean.sh'/>
+ <Service name='mountall-net'/>
+ <Service name='mountall-reboot'/>
+ <Service name='mountall-shell'/>
+ <Service name='mountdevsubfs.sh'/>
+ <Service name='mounted-debugfs'/>
+ <Service name='mounted-dev'/>
+ <Service name='mounted-proc'/>
+ <Service name='mounted-run'/>
+ <Service name='mounted-tmp'/>
+ <Service name='mounted-var'/>
+ <Service name='mountkernfs.sh'/>
+ <Service name='mountnfs-bootclean.sh'/>
+ <Service name='mountnfs.sh'/>
+ <Service name='mtab.sh'/>
+ <Service name='network-interface'/>
+ <Service name='network-interface-container'/>
+ <Service name='network-interface-security'/>
+ <Service name='networking'/>
<Service name='ondemand'/>
+ <Service name='passwd'/>
+ <Service name='plymouth'/>
+ <Service name='plymouth-log'/>
+ <Service name='plymouth-ready'/>
+ <Service name='plymouth-splash'/>
+ <Service name='plymouth-stop'/>
+ <Service name='plymouth-upstart-bridge'/>
+ <Service name='pppd-dns'/>
+ <Service name='procps'/>
+ <Service name='rc'/>
<Service name='rc.local'/>
+ <Service name='rc-sysinit'/>
+ <Service name='rcS'/>
+ <Service name='resolvconf'/>
<Service name='rsync'/>
+ <Service name='rsyslog'/>
+ <Service name='setvtrgb'/>
+ <Service name='shutdown'/>
<Service name='single'/>
- <Service name='ssh'/>
-
-
-...and bind them in Rules
+ <Service name='startpar-bridge'/>
+ <Service name='sudo'/>
+ <Service name='systemd-logind'/>
+ <Service name='tty1'/>
+ <Service name='tty2'/>
+ <Service name='tty3'/>
+ <Service name='tty4'/>
+ <Service name='tty5'/>
+ <Service name='tty6'/>
+ <Service name='udev'/>
+ <Service name='udev-fallback-graphics'/>
+ <Service name='udev-finish'/>
+ <Service name='udevmonitor'/>
+ <Service name='udevtrigger'/>
+ <Service name='ufw'/>
+ <Service name='upstart-file-bridge'/>
+ <Service name='upstart-socket-bridge'/>
+ <Service name='upstart-udev-bridge'/>
+ <Service name='ureadahead'/>
+ <Service name='ureadahead-other'/>
+ <Service name='wait-for-state'/>
+
+Add the literal entries in Rules to bind the Service entries from above.
.. code-block:: xml
- root@lucid:~# cat /var/lib/bcfg2/Rules/services.xml
+ root@saucy:~# cat /var/lib/bcfg2/Rules/services.xml
<Rules priority='1'>
- <!-- basic services -->
- <Service type='deb' status='on' name='bcfg2'/>
- <Service type='deb' status='on' name='bcfg2-server'/>
- <Service type='deb' status='on' name='fam'/>
- <Service type='deb' status='on' name='grub-common'/>
- <Service type='deb' status='on' name='killprocs'/>
- <Service type='deb' status='on' name='ondemand'/>
- <Service type='deb' status='on' name='rc.local'/>
- <Service type='deb' status='on' name='rsync'/>
- <Service type='deb' status='on' name='single'/>
- <Service type='deb' status='on' name='ssh'/>
+ <!-- sysv services -->
+ <Service name='bcfg2' type='deb' status='on'/>
+ <Service name='bcfg2-server' type='deb' status='on'/>
+ <Service name='dns-clean' type='deb' status='on'/>
+ <Service name='grub-common' type='deb' status='on'/>
+ <Service name='sudo' type='deb' status='on'/>
+
+ <Service name='killprocs' type='deb' bootstatus='on' status='ignore'/>
+ <Service name='ondemand' type='deb' bootstatus='on' status='ignore'/>
+ <Service name='pppd-dns' type='deb' bootstatus='on' status='ignore'/>
+ <Service name='rc.local' type='deb' bootstatus='on' status='ignore'/>
+ <Service name='rsync' type='deb' bootstatus='on' status='ignore'/>
+ <Service name='single' type='deb' bootstatus='on' status='ignore'/>
+
+ <Service name='bcfg2-report-collector' type='deb' status='off'/>
+
+ <!-- upstart services -->
+ <Service name='bootmisc.sh' type='upstart' status='on'/>
+ <Service name='checkfs.sh' type='upstart' status='on'/>
+ <Service name='checkroot-bootclean.sh' type='upstart' status='on'/>
+ <Service name='checkroot.sh' type='upstart' status='on'/>
+ <Service name='cron' type='upstart' status='on'/>
+ <Service name='dbus' type='upstart' status='on'/>
+ <Service name='mountall.sh' type='upstart' status='on'/>
+ <Service name='mountall-bootclean.sh' type='upstart' status='on'/>
+ <Service name='mountdevsubfs.sh' type='upstart' status='on'/>
+ <Service name='mountkernfs.sh' type='upstart' status='on'/>
+ <Service name='mountnfs-bootclean.sh' type='upstart' status='on'/>
+ <Service name='mountnfs.sh' type='upstart' status='on'/>
+ <Service name='mtab.sh' type='upstart' status='on'/>
+ <Service name='network-interface' type='upstart' status='on' parameters='INTERFACE=eth0'/>
+ <Service name='network-interface-security' type='upstart' status='on' parameters='JOB=network-interface/eth0'/>
+ <Service name='networking' type='upstart' status='on'/>
+ <Service name='plymouth-ready' type='upstart' status='ignore'/>
+ <Service name='resolvconf' type='upstart' status='on'/>
+ <Service name='rsyslog' type='upstart' status='on'/>
+ <Service name='startpar-bridge' type='upstart' status='ignore'/>
+ <Service name='systemd-logind' type='upstart' status='on'/>
+ <Service name='tty1' type='upstart' status='on'/>
+ <Service name='tty2' type='upstart' status='on'/>
+ <Service name='tty3' type='upstart' status='on'/>
+ <Service name='tty4' type='upstart' status='on'/>
+ <Service name='tty5' type='upstart' status='on'/>
+ <Service name='tty6' type='upstart' status='on'/>
+ <Service name='udev' type='upstart' status='on'/>
+ <Service name='ufw' type='upstart' status='on'/>
+ <Service name='upstart-file-bridge' type='upstart' status='on'/>
+ <Service name='upstart-socket-bridge' type='upstart' status='on'/>
+ <Service name='upstart-udev-bridge' type='upstart' status='on'/>
+ <Service name='wait-for-state' type='upstart' status='ignore'/>
+
+ <Service name='console' type='upstart' status='off'/>
+ <Service name='console-font' type='upstart' status='off'/>
+ <Service name='console-setup' type='upstart' status='off'/>
+ <Service name='container-detect' type='upstart' status='off'/>
+ <Service name='control-alt-delete' type='upstart' status='off'/>
+ <Service name='dmesg' type='upstart' status='off'/>
+ <Service name='failsafe' type='upstart' status='off'/>
+ <Service name='flush-early-job-log' type='upstart' status='off'/>
+ <Service name='friendly-recovery' type='upstart' status='off'/>
+ <Service name='hostname' type='upstart' status='off'/>
+ <Service name='hwclock' type='upstart' status='off'/>
+ <Service name='hwclock-save' type='upstart' status='off'/>
+ <Service name='irqbalance' type='upstart' status='off'/>
+ <Service name='kmod' type='upstart' status='off'/>
+ <Service name='mountall' type='upstart' status='off'/>
+ <Service name='mountall-net' type='upstart' status='off'/>
+ <Service name='mountall-reboot' type='upstart' status='off'/>
+ <Service name='mountall-shell' type='upstart' status='off'/>
+ <Service name='mounted-debugfs' type='upstart' status='off'/>
+ <Service name='mounted-dev' type='upstart' status='off'/>
+ <Service name='mounted-proc' type='upstart' status='off'/>
+ <Service name='mounted-run' type='upstart' status='off'/>
+ <Service name='mounted-tmp' type='upstart' status='off'/>
+ <Service name='mounted-var' type='upstart' status='off'/>
+ <Service name='network-interface-container' type='upstart' status='off'/>
+ <Service name='passwd' type='upstart' status='off'/>
+ <Service name='plymouth' type='upstart' status='off'/>
+ <Service name='plymouth-log' type='upstart' status='off'/>
+ <Service name='plymouth-splash' type='upstart' status='off'/>
+ <Service name='plymouth-stop' type='upstart' status='off'/>
+ <Service name='plymouth-upstart-bridge' type='upstart' status='off'/>
+ <Service name='procps' type='upstart' status='off'/>
+ <Service name='rc' type='upstart' status='off'/>
+ <Service name='rc-sysinit' type='upstart' status='off'/>
+ <Service name='rcS' type='upstart' status='off'/>
+ <Service name='setvtrgb' type='upstart' status='off'/>
+ <Service name='shutdown' type='upstart' status='off'/>
+ <Service name='udev-fallback-graphics' type='upstart' status='off'/>
+ <Service name='udev-finish' type='upstart' status='off'/>
+ <Service name='udevmonitor' type='upstart' status='off'/>
+ <Service name='udevtrigger' type='upstart' status='off'/>
+ <Service name='ureadahead' type='upstart' status='off'/>
+ <Service name='ureadahead-other' type='upstart' status='off'/>
</Rules>
-Now we run the client and see there are no more unmanaged entries! ::
+Now we run the client and see there are no more unmanaged entries!
- root@lucid:~# bcfg2 -vqn
+::
+
+ root@saucy:~# bcfg2 -vqn
+ Starting Bcfg2 client run at 1374271524.83
Running probe groups
Probe groups has result:
- amd64
- Loaded tool drivers:
- APT Action DebInit POSIX
+ group:amd64
+ Loaded tool drivers:
+ APT Action DebInit POSIX POSIXUsers Upstart VCS
+ Loaded experimental tool drivers:
+ POSIXUsers
Phase: initial
- Correct entries: 257
+ Correct entries: 519
Incorrect entries: 0
- Total managed entries: 257
+ Total managed entries: 519
Unmanaged entries: 0
-
- All entries correct.
-
Phase: final
- Correct entries: 257
+ Correct entries: 519
Incorrect entries: 0
- Total managed entries: 257
+ Total managed entries: 519
Unmanaged entries: 0
-
All entries correct.
+ Finished Bcfg2 client run at 1374271541.56
.. warning::
diff --git a/doc/appendix/tools.txt b/doc/appendix/tools.txt
index 1d7a8dd90..92bde683b 100644
--- a/doc/appendix/tools.txt
+++ b/doc/appendix/tools.txt
@@ -11,4 +11,4 @@ can help you to maintain your Bcfg2 configuration, to make the initial
setup easier, or to do some other tasks.
-http://trac.mcs.anl.gov/projects/bcfg2/browser/tools
+https://github.com/Bcfg2/bcfg2/tree/maint/tools
diff --git a/doc/client/tools/actions.txt b/doc/client/tools/actions.txt
index 81486ecd1..e5fdb1f39 100644
--- a/doc/client/tools/actions.txt
+++ b/doc/client/tools/actions.txt
@@ -31,10 +31,11 @@ central reporting of action failure is desired, set this attribute to
'check'. Also note that Action entries included in Base will not be
executed.
-Actions cannot be completely defined inside of a bundle; they are a bound
-entry, much like Packages, Services or Paths. The Rules plugin can bind
-these entries. For example to include the above action in a bundle,
-first the Action entry must be included in the bundle:
+Actions may be completely defined inside of a bundle with the use of
+:ref:`server-configurationentries`, much like Packages, Services or Paths.
+The Rules plugin can also bind these entries. For example to include the
+above action in a bundle, first the Action entry must be included in the
+bundle:
.. code-block:: xml
@@ -70,3 +71,18 @@ requires this key.
<Action timing='post' name='apt-key-update' command='apt-key adv --recv-keys --keyserver hkp://pgp.mit.edu 0C5A2783' when='modified' status='check'/>
</Group>
</Rules>
+
+Example BoundAction (add RPM GPG keys)
+======================================
+
+This example will add the RPM-GPG-KEY-redhat-release key to the RPM
+GPG keyring **before** Package entries are handled on the client run.
+
+.. code-block:: xml
+
+ <Bundle name="rpm-gpg-keys">
+ <Group name='rhel'>
+ <Path name="/etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release"/>
+ <BoundAction timing="pre" name="install rpm key" command="rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release" when="modified" status="check"/>
+ </Group>
+ </Bundle>
diff --git a/doc/client/tools/augeas.txt b/doc/client/tools/augeas.txt
new file mode 100644
index 000000000..6fed5f5ce
--- /dev/null
+++ b/doc/client/tools/augeas.txt
@@ -0,0 +1,95 @@
+.. -*- mode: rst -*-
+
+.. _client-tools-augeas:
+
+========
+ Augeas
+========
+
+The Augeas tool provides a way to use `Augeas
+<http://www.augeas.net>`_ to edit files that may not be completely
+managed.
+
+In the simplest case, you simply tell Augeas which path to edit, and
+give it a sequence of commands:
+
+.. code-block:: xml
+
+ <Path type="augeas" name="/etc/hosts" owner="root" group="root"
+ mode="0644">
+ <Set path="01/ipaddr" value="192.168.0.1"/>
+ <Set path="01/canonical" value="pigiron.example.com"/>
+ <Set path="01/alias[1]" value="pigiron"/>
+ <Set path="01/alias[2]" value="piggy"/>
+ </Path>
+
+The commands are run in document order. There's no need to do an
+explicit ``save`` at the end.
+
+These commands will be run if any of the paths do not already
+have the given setting. In other words, if any command has not
+already been run, they will all be run.
+
+So, if the first host already has all of the specified settings, then
+that Path will verify successfully and nothing will be changed. But
+suppose the first host looks like this::
+
+ 192.168.0.1 pigiron.example.com pigiron
+
+All that is missing is the second alias, ``piggy``. The entire Augeas
+script will be run in this case. It's important, then, to ensure that
+all commands you use are idempotent. (For instance, the ``Move`` and
+``Insert`` commands are unlikely to be useful.)
+
+The Augeas paths are all relative to ``/files/etc/hosts``.
+
+The Augeas tool understands a subset of ``augtool`` commands. Valid
+tags are: ``Remove``, ``Move``, ``Set``, ``Clear``, ``SetMulti``, and
+``Insert``. Refer to the official Augeas docs or the `Schema`_ below
+for details on the commands.
+
+The Augeas tool also supports one additional directive, ``Initial``,
+for setting initial file content when a file does not exist. For
+instance, the ``Xml`` lens fails to parse a file that does not exist,
+and, as a result, you cannot add content to it. You can use
+``Initial`` to circumvent this issue:
+
+.. code-block:: xml
+
+ <Path type="augeas" name="/etc/test.xml" lens="Xml"
+ owner="root" group="root" mode="0640">
+ <Initial>&lt;Test/&gt;</Initial>
+ <Set path="Test/#text" value="text content"/>
+ </Path>
+
+Editing files outside the default load path
+===========================================
+
+If you're using Augeas to edit files outside of its default load path,
+you must manually specify the lens. For instance:
+
+.. code-block:: xml
+
+ <Path type="augeas" name="/opt/jenkins/home/config.xml" lens="Xml"
+ owner="jenkins" group="jenkins" mode="0640">
+ <Set path="hudson/systemMessage/#text"
+ value="This is a Jenkins server."/>
+ </Path>
+
+Note that there's no need to manually modify the load path by setting
+``/augeas/load/<lens>/incl``, nor do you have to call ``load``
+explicitly.
+
+Schema
+======
+
+.. xml:group:: augeasCommands
+
+
+Performance
+===========
+
+The Augeas tool is quite slow to initialize. For each ``<Path
+type="augeas" ... >`` entry you have, it creates a new Augeas object
+internally, which can take several seconds. It's thus important to
+use this tool sparingly.
diff --git a/doc/conf.py b/doc/conf.py
index d3d30687b..0e4009cd3 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -66,7 +66,7 @@ else:
# The short X.Y version.
version = '1.3'
# The full version, including alpha/beta/rc tags.
-release = '1.3.1'
+release = '1.3.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
@@ -254,6 +254,8 @@ man_pages = [
[], 8),
('man/bcfg2-lint.conf', 'bcfg2-lint.conf',
'Configuration parameters for bcfg2-lint', [], 5),
+ ('man/bcfg2-report-collector', 'bcfg2-report-collector',
+ 'Reports collection daemon', [], 8),
('man/bcfg2-reports', 'bcfg2-reports',
'Query reporting system for client status', [], 8),
('man/bcfg2-server', 'bcfg2-server',
diff --git a/doc/contents.txt b/doc/contents.txt
index 8af0d808a..8220d0d1d 100644
--- a/doc/contents.txt
+++ b/doc/contents.txt
@@ -7,7 +7,7 @@ Bcfg2 documentation |release|
=============================
.. toctree::
- :maxdepth: 2
+ :maxdepth: 3
introduction/index
installation/index
diff --git a/doc/development/lint.txt b/doc/development/lint.txt
index 6a4651f92..6c0be960d 100644
--- a/doc/development/lint.txt
+++ b/doc/development/lint.txt
@@ -106,6 +106,11 @@ Basics
Existing ``bcfg2-lint`` Plugins
===============================
+AWSTagsLint
+-----------
+
+.. autoclass:: Bcfg2.Server.Plugins.AWSTags.AWSTagsLint
+
BundlerLint
-----------
diff --git a/doc/getting_started/index.txt b/doc/getting_started/index.txt
index a9e91e6b8..a9b1b847f 100644
--- a/doc/getting_started/index.txt
+++ b/doc/getting_started/index.txt
@@ -1,4 +1,5 @@
.. -*- mode: rst -*-
+.. vim: ft=rst
.. _getting_started-index:
@@ -115,7 +116,7 @@ files: ``clients.xml`` and ``groups.xml``. Your current
.. code-block:: xml
- <Clients version="3.0">
+ <Clients>
<Client profile="basic" pingable="Y" pingtime="0" name="bcfg-server.example.com"/>
</Clients>
@@ -132,7 +133,7 @@ Our simple ``groups.xml`` file looks like:
.. code-block:: xml
- <Groups version='3.0'>
+ <Groups>
<Group profile='true' public='false' name='basic'>
<Group name='suse'/>
</Group>
@@ -168,14 +169,14 @@ Next, we create a motd.xml file in the Bundler directory:
.. code-block:: xml
- <Bundle name='motd' version='2.0'>
+ <Bundle name='motd'>
<Path name='/etc/motd' />
</Bundle>
Now when we run the client, we get slightly different output::
Loaded tool drivers:
- Chkconfig POSIX YUMng
+ Chkconfig POSIX YUM
Incomplete information for entry Path:/etc/motd; cannot verify
Phase: initial
@@ -205,7 +206,7 @@ real ``/etc/motd`` file to that location, run the client again, and
you will find that we now have a correct entry::
Loaded tool drivers:
- Chkconfig POSIX PostInstall RPM
+ Chkconfig POSIX YUM
Phase: initial
Correct entries: 1
diff --git a/doc/installation/distributions.txt b/doc/installation/distributions.txt
index 3dcfd7721..9db111682 100644
--- a/doc/installation/distributions.txt
+++ b/doc/installation/distributions.txt
@@ -66,19 +66,7 @@ This way is not recommended on production systems. Only for testing.
Gentoo
======
-Early in July 2008, Bcfg2 was added to the Gentoo portage tree. So far
-it's still keyworded for all architectures, but we are actively working
-to get it marked as stable.
-
-If you don't use portage to install Bcfg2, you'll want to make sure you
-have all the prerequisites installed first. For a server, you'll need:
-
-* ``app-admin/gamin`` or ``app-admin/fam``
-* ``dev-python/lxml``
-
-Clients will need at least:
-
-* ``app-portage/gentoolkit``
+Bcfg2 can be installed via portage.
OS X
====
diff --git a/doc/installation/prerequisites.txt b/doc/installation/prerequisites.txt
index 0cb721bb9..e920f955b 100644
--- a/doc/installation/prerequisites.txt
+++ b/doc/installation/prerequisites.txt
@@ -21,7 +21,7 @@ Bcfg2 Client
+----------------------------+------------------------+--------------------------------+
| libxslt (if lxml is used) | Any | libxml2 |
+----------------------------+------------------------+--------------------------------+
-| python | 2.4 and greater [#f1] | |
+| python | 2.4 and greater [#f1]_ | |
+----------------------------+------------------------+--------------------------------+
| lxml or elementtree [#f2]_ | Any | lxml: libxml2, libxslt, python |
+----------------------------+------------------------+--------------------------------+
@@ -50,12 +50,23 @@ Bcfg2 Server
+-------------------------------+----------+--------------------------------+
| lxml | 0.9+ | lxml: libxml2, libxslt, python |
+-------------------------------+----------+--------------------------------+
-| gamin or fam | Any | |
+| gamin or inotify | Any | |
+-------------------------------+----------+--------------------------------+
-| python-gamin or python-fam | Any | gamin or fam, python |
+| python-gamin or pyinotify | Any | gamin or inotify, python |
+-------------------------------+----------+--------------------------------+
-| M2crypto or python-ssl (note | Any | python, openssl |
-| that the ssl module is | | |
-| included in python versions | | |
-| 2.6 and later | | |
+| python-ssl (note | Any | python, backported ssl module |
++-------------------------------+----------+--------------------------------+
+
+Bcfg2 Reporting
+---------------
+
+A webserver capabable of running wsgi applications is required for web
+reporting, such as Apache + mod_wsgi or nginx.
+
++-------------------------------+----------+--------------------------------+
+| Software | Version | Requires |
++===============================+==========+================================+
+| django | 1.2.0+ | |
++-------------------------------+----------+--------------------------------+
+| south | 0.7.0+ | |
+-------------------------------+----------+--------------------------------+
diff --git a/doc/installation/source.txt b/doc/installation/source.txt
index 1406a5ceb..064371e87 100644
--- a/doc/installation/source.txt
+++ b/doc/installation/source.txt
@@ -2,7 +2,7 @@
.. _GPG1: http://pgp.mit.edu:11371/pks/lookup?op=get&search=0x75BF2C177F7D197E
.. _GPG2: http://pgp.mit.edu:11371/pks/lookup?op=get&search=0x80B8492FA88FFF4B
-.. _Download: http://trac.mcs.anl.gov/projects/bcfg2/wiki/Download
+.. _Download: http://bcfg2.org/download/
.. _source:
diff --git a/doc/man/bcfg2-report-collector.txt b/doc/man/bcfg2-report-collector.txt
new file mode 100644
index 000000000..07c618537
--- /dev/null
+++ b/doc/man/bcfg2-report-collector.txt
@@ -0,0 +1,40 @@
+.. -*- mode: rst -*-
+.. vim: ft=rst
+
+
+bcfg2-report-collector
+======================
+
+.. program:: bcfg2-report-collector
+
+Synopsis
+--------
+
+**bcfg2-report-collector** [*options*]
+
+Description
+-----------
+
+:program:`bcfg2-report-collector` runs a daemon to collect logs from the
+LocalFilesystem :ref:`Bcfg2 Reports <reports-dynamic>` transport object
+and add them to the Reporting storage backend.
+
+Options
+-------
+
+-C configfile Specify alternate bcfg2.conf location.
+-D pidfile Daemonize, placing the program pid in *pidfile*.
+-E encoding Specify the encoding of config files.
+-Q path Specify the path to the server repository.
+-W configfile Specify the path to the web interface
+ configuration file.
+-d Enable debugging output.
+-h Print usage information.
+-o path Set path of file log
+-v Run in verbose mode.
+--version Print the version and exit
+
+See Also
+--------
+
+:manpage:`bcfg2-server(8)`, :manpage:`bcfg2-reports(8)`
diff --git a/doc/man/bcfg2-server.txt b/doc/man/bcfg2-server.txt
index d5945cad6..3f8f3ea21 100644
--- a/doc/man/bcfg2-server.txt
+++ b/doc/man/bcfg2-server.txt
@@ -23,8 +23,7 @@ Options
-------
-C configfile Specify alternate bcfg2.conf location.
--D pidfile Daemonize, placing the program pid in the specified
- pidfile.
+-D pidfile Daemonize, placing the program pid in *pidfile*.
-E encoding Specify the encoding of config files.
-Q path Specify the path to the server repository.
-S server Manually specify the server location (as opposed to
diff --git a/doc/man/bcfg2.conf.txt b/doc/man/bcfg2.conf.txt
index 3a0217aef..6faf48a1a 100644
--- a/doc/man/bcfg2.conf.txt
+++ b/doc/man/bcfg2.conf.txt
@@ -46,6 +46,12 @@ filemonitor
fam
pseudo
+fam_blocking
+ Whether the server should block at startup until the file monitor
+ backend has processed all events. This can cause a slower startup,
+ but ensure that all files are recognized before the first client
+ is handled.
+
ignore_files
A comma-separated list of globs that should be ignored by the file
monitor. Default values are::
@@ -729,6 +735,11 @@ control the database connection of the server.
port
Port for database connections. Not used for sqlite3.
+ options
+ Various options for the database connection. The value is
+ expected as multiple key=value pairs, separated with commas.
+ The concrete value depends on the database engine.
+
Reporting options
-----------------
diff --git a/doc/reports/dynamic.txt b/doc/reports/dynamic.txt
index 9de3f868f..6b8a1f467 100644
--- a/doc/reports/dynamic.txt
+++ b/doc/reports/dynamic.txt
@@ -39,7 +39,7 @@ Prerequisites
* sqlite3
* pysqlite2 (if using python 2.4)
-* `Django <http://www.djangoproject.com>`_ >= 1.2
+* `Django <http://www.djangoproject.com>`_ >= 1.3
* mod-wsgi
.. warning::
diff --git a/doc/server/admin/index.txt b/doc/server/admin/index.txt
index ee03cedda..c563ead9c 100644
--- a/doc/server/admin/index.txt
+++ b/doc/server/admin/index.txt
@@ -23,7 +23,6 @@ functionality. Available modes are listed below.
minestruct
perf
pull
- query
snapshots
tidy
viz
diff --git a/doc/server/admin/query.txt b/doc/server/admin/query.txt
deleted file mode 100644
index 65851a43d..000000000
--- a/doc/server/admin/query.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-.. -*- mode: rst -*-
-
-.. _server-admin-query:
-
-query
-=====
-
-Query clients.
-
-The default result format is suitable for consumption by `pdsh`_.
-This example queries the server for all clients in the *ubuntu* group::
-
- bcfg2-admin query g=ubuntu
-
-.. _pdsh: http://sourceforge.net/projects/pdsh/
diff --git a/doc/server/configuration.txt b/doc/server/configuration.txt
index 7892c2612..383367a46 100644
--- a/doc/server/configuration.txt
+++ b/doc/server/configuration.txt
@@ -149,7 +149,7 @@ Consequently, you simply have to run:
.. code-block:: bash
chown bcfg2:bcfg2 /var/run/bcfg2-server
- chmod 0644 /var/run/bcfg2-server
+ chmod 0755 /var/run/bcfg2-server
Additionally, the server daemon itself supports dropping privileges
natively in 1.3. Simply add the following lines to ``bcfg2.conf``::
@@ -218,3 +218,46 @@ To select which backend to use, set the ``backend`` option in the
* ``best`` (the default; currently the same as ``builtin``)
``best`` may change in future releases.
+
+Multiprocessing core configuration
+----------------------------------
+
+If you use the multiprocessing core, there are other bits you may wish
+to twiddle.
+
+By default, the server spawns as many children as the host has CPUs.
+(This is determined by ``multiprocessing.cpu_count()``.) To change
+this, set:
+
+.. code-block:: conf
+
+ [server]
+ children = 4
+
+The optimal number of children may vary depending on your workload.
+For instance, if you are using :ref:`native yum
+library support <native-yum-libraries>`, then a separate process is
+spawned for each client to resolve its package dependencies, so
+keeping the children at or below the CPU count is likely a good idea.
+If you're not using native yum library support, though, you may wish
+to oversubscribe the core slightly. It's recommended that you test
+various configurations and use what works best for your workload.
+
+Secondly, if ``tmpwatch`` is enabled, you must either disable it or
+exclude the pattern ``/tmp/pymp-\*``. For instance, on RHEL or CentOS
+you may have a line like the following in
+``/etc/cron.daily/tmpwatch``:
+
+.. code-block:: bash
+
+ /usr/sbin/tmpwatch -x /tmp/.X11-unix -x /tmp/.XIM-unix -x /tmp/.font-unix \
+ -x /tmp/.ICE-unix -x /tmp/.Test-unix 240 /tmp
+
+You would need to add ``-X /tmp/pymp-\*`` to it, like so:
+
+.. code-block:: bash
+
+ /usr/sbin/tmpwatch -x /tmp/.X11-unix -x /tmp/.XIM-unix -x /tmp/.font-unix \
+ -x /tmp/.ICE-unix -x /tmp/.Test-unix -X /tmp/pymp-\* 240 /tmp
+
+See https://bugzilla.redhat.com/show_bug.cgi?id=1058310 for more information.
diff --git a/doc/server/database.txt b/doc/server/database.txt
index 87d3e3afe..3c8970f68 100644
--- a/doc/server/database.txt
+++ b/doc/server/database.txt
@@ -34,9 +34,10 @@ of ``/etc/bcfg2.conf``.
+-------------+------------------------------------------------------------+-------------------------------+
| Option name | Description | Default |
+=============+============================================================+===============================+
-| engine | The full name of the Django database backend to use. See | "django.db.backends.sqlite3" |
+| engine | The name of the Django database backend to use. See | "sqlite3" |
| | https://docs.djangoproject.com/en/dev/ref/settings/#engine | |
-| | for available options | |
+| | for available options (note that django.db.backends is not | |
+| | included in the engine name) | |
+-------------+------------------------------------------------------------+-------------------------------+
| name | The name of the database | "/var/lib/bcfg2/bcfg2.sqlite" |
+-------------+------------------------------------------------------------+-------------------------------+
@@ -48,6 +49,12 @@ of ``/etc/bcfg2.conf``.
+-------------+------------------------------------------------------------+-------------------------------+
| port | The port to connect to | None |
+-------------+------------------------------------------------------------+-------------------------------+
+| options | Extra parameters to use when connecting to the database. | None |
+| | Available parameters vary depending on your database | |
+| | backend. The parameters are supplied as comma separated | |
+| | key=value pairs. | |
++-------------+------------------------------------------------------------+-------------------------------+
+
Database Schema Sync
====================
diff --git a/doc/server/plugins/connectors/awstags.txt b/doc/server/plugins/connectors/awstags.txt
new file mode 100644
index 000000000..b884ca065
--- /dev/null
+++ b/doc/server/plugins/connectors/awstags.txt
@@ -0,0 +1,124 @@
+.. -*- mode: rst -*-
+
+.. _server-plugins-connectors-awstags:
+
+=========
+ AWSTags
+=========
+
+The AWSTags plugin is a connector that retrieves tags from instances
+in EC2, and can assign optionally assign
+group membership pased on patterns in the tags. See `Using Tags
+<http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html>`_
+for details on using tags in EC2.
+
+AWSTags queries EC2 for instances whose ``private-dns-name`` property
+matches the hostname of the client.
+
+Setup
+=====
+
+#. Add ``AWSTags`` to the ``plugins`` option in ``/etc/bcfg2.conf``
+#. Configure AWS credentials in ``/etc/bcfg2.conf`` (See
+ `Configuration`_ below for details.)
+#. Optionally, create ``AWSTags/config.xml`` (See `Assigning Groups`_
+ below for details.)
+#. Restart the Bcfg2 server.
+
+Using Tag Data
+==============
+
+AWSTags exposes the data in templates as a dict available as
+``metadata.AWSTags``. E.g., in a :ref:`Genshi template
+<server-plugins-generators-cfg-genshi>`, you could do:
+
+.. code-block:: genshitext
+
+ Known tags on ${metadata.hostname}:
+ {% for key, val in metadata.AWSTags.items() %}\
+ ${key} ${val}
+ {% end %}\
+
+This would produce something like::
+
+ Known tags on foo.example.com:
+ Name foo.example.com
+ some random tag the value
+
+Assigning Groups
+================
+
+AWSTags can assign groups based on the tag data. This functionality
+is configured in ``AWSTags/config.xml``.
+
+Example
+-------
+
+.. code-block:: xml
+
+ <AWSTags>
+ <Tag name="^foo$">
+ <Group>foo</Group>
+ </Tag>
+ <Tag name="^bar$" value="^bar$">
+ <Group>bar</Group>
+ </Tag>
+ <Tag name="^bcfg2 group$" value="(.*)">
+ <Group>$1</Group>
+ </Tag>
+ </AWSTags>
+
+In this example, any machine with a tag named ``foo`` would be added
+to the ``foo`` group. Any machine with a tag named ``bar`` whose
+value was also ``bar`` would be added to the ``bar`` group. Finally,
+any machine with a tag named ``bcfg2 group`` would be added to the
+group named in the value of that tag.
+
+Note that both the ``name`` and ``value`` attributes are *always*
+regular expressions.
+
+If a ``<Tag/>`` element has only a ``name`` attribute, then it only
+checks for existence of a matching tag. If it has both ``name`` and
+``value``, then it checks for a matching tag with a matching value.
+
+You can use backreferences (``$1``, ``$2``, etc.) in the group names.
+If only ``name`` is specified, then the backreferences will refer to
+groups in the ``name`` regex. If ``name`` and ``value`` are both
+specified, then backreferences will refer to groups in the ``value``
+regex. If you specify both ``name`` and ``value``, it is not possible
+to refer to groups in the ``name`` regex.
+
+Schema Reference
+----------------
+
+.. xml:schema:: awstags.xsd
+
+Configuration
+=============
+
+AWSTags recognizes several options in ``/etc/bcfg2.conf``; at a
+minimum, you must configure an AWS access key ID and secret key. All
+of the following options are in the ``[awstags]`` section:
+
++-----------------------+-----------------------------------------------------+
+| Option | Description |
++=======================+=====================================================+
+| ``access_key_id`` | The AWS access key ID |
++-----------------------+-----------------------------------------------------+
+| ``secret_access_key`` | The AWS secret access key |
++-----------------------+-----------------------------------------------------+
+| ``cache`` | Whether or not to cache tag lookups. See `Caching`_ |
+| | for details. Default is to cache. |
++-----------------------+-----------------------------------------------------+
+
+Caching
+=======
+
+Since the AWS API isn't always very quick to respond, AWSTags caches
+its results by default. The cache is fairly short-lived: the cache
+for each host is expired when it starts a client run, so it will start
+the run with fresh data.
+
+If you frequently update tags on your instances, you may wish to
+disable caching. That's probably a bad idea, and would tend to
+suggest that updating tags frequently is perhaps the Wrong Thing.
diff --git a/doc/server/plugins/connectors/properties.txt b/doc/server/plugins/connectors/properties.txt
index da511736d..47e82fdbf 100644
--- a/doc/server/plugins/connectors/properties.txt
+++ b/doc/server/plugins/connectors/properties.txt
@@ -181,6 +181,8 @@ XML tag should be ``<Properties>``.
JSON Property Files
-------------------
+.. versionadded:: 1.3.0
+
The data in a JSON property file can be accessed with the ``json``
attribute, which is the loaded JSON data. The JSON properties
interface does not provide any additional functionality beyond the
@@ -189,6 +191,8 @@ interface does not provide any additional functionality beyond the
YAML Property Files
-------------------
+.. versionadded:: 1.3.0
+
The data in a YAML property file can be accessed with the ``yaml``
attribute, which is the loaded YAML data. Only a single YAML document
may be included in a file.
@@ -229,10 +233,10 @@ simply::
%}
You can also enable automatch for individual Property files by setting
-the attribute ``automatch="true"`` on the top-level ``<Property>``
+the attribute ``automatch="true"`` on the top-level ``<Properties>``
tag. Conversely, if automatch is enabled by default in
``bcfg2.conf``, you can disable it for an individual Property file by
-setting ``automatch="false"`` on the top-level ``<Property>`` tag.
+setting ``automatch="false"`` on the top-level ``<Properties>`` tag.
If you want to see what ``XMLMatch()``/automatch would produce for a
given client on a given Properties file, you can use :ref:`bcfg2-info
diff --git a/doc/server/plugins/generators/cfg.txt b/doc/server/plugins/generators/cfg.txt
index f31923866..0f0601105 100644
--- a/doc/server/plugins/generators/cfg.txt
+++ b/doc/server/plugins/generators/cfg.txt
@@ -541,7 +541,8 @@ Example
</Group>
<Allow from="/root/.ssh/id_rsa.pub" host="foo.example.com"/>
<Allow from="/home/foo_user/.ssh/id_rsa.pub">
- <Params command="/home/foo_user/.ssh/ssh_command_filter"/>
+ <Option name="command" value="/home/foo_user/.ssh/ssh_command_filter"/>
+ <Option name="no-X11-forwarding"/>
</Allow>
<Allow>
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDw/rgKQeARRAHK5bQQhAAe1b+gzdtqBXWrZIQ6cIaLgxqj76TwZ3DY4A6aW9RgC4zzd0p4a9MfsScUIB4+UeZsx9GopUj4U6H8Vz7S3pXxrr4E9logVLuSfOLFbI/wMWNRuOANqquLYQ+JYWKeP4kagkVp0aAWp7mH5IOI0rp0A6qE2you4ep9N/nKvHDrtypwhYBWprsgTUXXMHnAWGmyuHGYWxNYBV9AARPdAvZfb8ggtuwibcOULlyK4DdVNbDTAN1/BDBE1ve6WZDcrc386KhqUGj/yoRyPjNZ46uZiOjRr3cdY6yUZoCwzzxvm5vle6mEbLjHgjGEMQMArzM9 vendor@example.com
@@ -596,6 +597,11 @@ Deltas
cat file functionality. ``bcfg2-lint`` checks for deltas and
warns about them.
+.. warning::
+
+ In Bcfg2 1.3, deltas **do not** work with `SSH key or
+ authorized_keys generation <SSH Keys>`_.
+
Bcfg2 has finer grained control over how to deliver configuration
files to a host. Let's say we have a Group named file-server. Members
of this group need the exact same ``/etc/motd`` as all other hosts except
@@ -632,23 +638,35 @@ server and we have the following configuration files::
motd.G01_web-server
motd.G01_mail-server.cat
motd.G02_file-server.cat
+ motd.H_bar.example.com
motd.H_foo.example.com.cat
-If our machine **isn't** *foo.example.com* then here's what would happen:
-
-Bcfg2 would choose ``motd.G01_web-server`` as the base file. It is
-the most specific base file for this host. Bcfg2 would apply the
-``motd.G01_mail-server.cat`` delta to the ``motd.G01_web-server``
-base file. It is the least specific delta. Bcfg2 would then apply the
-``motd.G02_file-server.cat`` delta to the result of the delta before
-it. If our machine **is** *foo.example.com* then here's what would happen:
-
-Bcfg2 would choose ``motd.G01_web-server`` as the base file. It
-is the most specific base file for this host. Bcfg2 would apply the
-``motd.H_foo.example.com.cat`` delta to the ``motd.G01_web-server`` base
-file. The reason the other deltas aren't applied to *foo.example.com*
-is because a **.H_** delta is more specific than a **.G##_** delta. Bcfg2
-applies all the deltas at the most specific level.
+If our machine isn't *foo.example.com* or *bar.example.com*, but
+is a web server, then Bcfg2 would choose ``motd.G01_web-server`` as
+the base file. It is the most specific base file for this host. Bcfg2
+would apply the ``motd.G01_mail-server.cat`` delta to the
+``motd.G01_web-server`` base file. It is the least specific
+delta. Bcfg2 would then apply the ``motd.G02_file-server.cat`` delta
+to the result of the delta before it.
+
+If our machine is *foo.example.com* and a web server, then Bcfg2 would
+choose ``motd.G01_web-server`` as the base file. It is the most
+specific base file for this host. Bcfg2 would apply the
+``motd.H_foo.example.com.cat`` delta to the ``motd.G01_web-server``
+base file. The reason the other deltas aren't applied to
+*foo.example.com* is because a **.H_** delta is more specific than a
+**.G##_** delta. Bcfg2 applies all the deltas at the most specific
+level.
+
+If our machine is *bar.example.com*, then Bcfg2 would chose
+``motd.H_foo.example.com`` as the base file because it is the most
+specific base file for this host. Regardless of the groups
+*bar.example.com* is a member of, **no cat files** would be applied,
+because only cat files as specific or more specific than the base file
+are applied. (In other words, if a group-specific base file is
+selected, only group- or host-specific cat files can be applied; if a
+host-specific base file is selected, only host-specific cat files can
+be applied.)
.. _server-plugins-generators-cfg-validation:
diff --git a/doc/server/plugins/generators/nagiosgen.txt b/doc/server/plugins/generators/nagiosgen.txt
index ee99b2dc1..0ae922fa3 100644
--- a/doc/server/plugins/generators/nagiosgen.txt
+++ b/doc/server/plugins/generators/nagiosgen.txt
@@ -8,7 +8,7 @@ NagiosGen
This page describes the installation and use of the `NagiosGen`_ plugin.
-.. _NagiosGen: http://trac.mcs.anl.gov/projects/bcfg2/browser/src/lib/Server/Plugins/NagiosGen.py
+.. _NagiosGen: https://github.com/Bcfg2/bcfg2/blob/maint/src/lib/Bcfg2/Server/Plugins/NagiosGen.py
Update ``/etc/bcfg2.conf``, adding NagiosGen to plugins::
diff --git a/doc/server/plugins/generators/packages.txt b/doc/server/plugins/generators/packages.txt
index cdc4f7282..31f3ccf22 100644
--- a/doc/server/plugins/generators/packages.txt
+++ b/doc/server/plugins/generators/packages.txt
@@ -490,17 +490,18 @@ Benefits to this include:
* Much lower memory usage by the ``bcfg2-server`` process.
* Much faster ``Packages.Refresh`` behavior.
* More accurate dependency resolution.
+* Better use of multiple processors/cores.
Drawbacks include:
-* More disk I/O. In some cases, you may have to raise the open file
+* Resolution of package dependencies is slower and more
+ resource-intensive. At times it can be much slower, particularly
+ after running ``Packages.Refresh``.
+* More disk I/O. This can be alleviated by putting
+ ``/var/lib/bcfg2/Packages/cache`` on tmpfs, but that offsets the
+ lower memory usage. In some cases, you may have to raise the open file
limit for the user who runs your Bcfg2 server process, particularly
if you have a lot of repositories.
-* Resolution of package dependencies is slower in some cases,
- particularly after running ``Packages.Refresh``.
-* If you have a very large number of clients using a very small number
- of repositories, using native yum libraries may actually increase
- memory usage.
Configuring the Yum Helper
--------------------------
diff --git a/doc/server/plugins/generators/rules.txt b/doc/server/plugins/generators/rules.txt
index 2493be53f..a21dd217f 100644
--- a/doc/server/plugins/generators/rules.txt
+++ b/doc/server/plugins/generators/rules.txt
@@ -1,4 +1,5 @@
.. -*- mode: rst -*-
+.. vim: ft=rst
.. _server-plugins-generators-rules:
@@ -59,7 +60,7 @@ Rules Tag
.. xml:element:: Rules
:linktotype:
:noautodep:
- :inlinetypes: PostInstall,RContainerType
+ :inlinetypes: RContainerType
Package Tag
-----------
@@ -135,6 +136,20 @@ Attributes common to all Path tags:
:onlyattrs: name,type
+augeas
+^^^^^^
+
+Run `Augeas <http://www.augeas.net>`_ commands. See
+:ref:`client-tools-augeas` for more details.
+
+.. xml:type:: PathType
+ :nochildren:
+ :noattributegroups:
+ :nodoc:
+ :notext:
+ :onlyattrs: owner,group,mode,secontext,lens
+ :requiredattrs: owner,group,mode
+
device
^^^^^^
@@ -376,6 +391,8 @@ SEModule Tag
See also :ref:`server-plugins-generators-semodules`.
+.. _server-plugins-generators-rules-posixuser-tag:
+
POSIXUser Tag
-------------
@@ -411,6 +428,8 @@ Defaults plugin <server-plugins-structures-defaults>`.
See :ref:`client-tools-posixusers` for more information on managing
users and groups.
+.. _server-plugins-generators-rules-posixgroup-tag:
+
POSIXGroup Tag
--------------
diff --git a/doc/server/plugins/generators/tcheetah.txt b/doc/server/plugins/generators/tcheetah.txt
index ab147ce56..c79a8ced5 100644
--- a/doc/server/plugins/generators/tcheetah.txt
+++ b/doc/server/plugins/generators/tcheetah.txt
@@ -99,7 +99,7 @@ Simple Example
==============
TCheetah works similar to Cfg in that you define all literal information
-about a particular file in a directory rooted at TGenshi/path_to_file.
+about a particular file in a directory rooted at TCheetah/path_to_file.
The actual file contents are placed in a file named `template` in that
directory. Below is a simple example a file ``/foo``.
diff --git a/doc/server/plugins/index.txt b/doc/server/plugins/index.txt
index 4f2b484ac..f3d6daa73 100644
--- a/doc/server/plugins/index.txt
+++ b/doc/server/plugins/index.txt
@@ -31,7 +31,7 @@ Default Plugins
The `Bcfg2 repository`_ contains the all plugins currently distributed
with Bcfg2.
-.. _Bcfg2 repository: http://trac.mcs.anl.gov/projects/bcfg2/browser/src/lib/Server/Plugins
+.. _Bcfg2 repository: https://github.com/Bcfg2/bcfg2/tree/maint/src/lib/Bcfg2/Server/Plugins
Metadata (Grouping)
-------------------
diff --git a/doc/server/plugins/probes/index.txt b/doc/server/plugins/probes/index.txt
index 306a752b6..2e23c31d5 100644
--- a/doc/server/plugins/probes/index.txt
+++ b/doc/server/plugins/probes/index.txt
@@ -13,6 +13,9 @@ the system disk, you would want to know this information to correctly
generate an `/etc/auto.master` autofs config file for each type. Here
we will look at how to do this.
+Probes also allow dynamic group assignment for clients, see
+:ref:`_server-plugins-probes-dynamic-groups`.
+
First, create a ``Probes`` directory in our toplevel repository
location::
@@ -119,6 +122,45 @@ is to add the ``/etc/auto.master`` to a Bundle:
<Path name='/etc/auto.master'/>
+.. _server-plugins-probes-dynamic-groups:
+
+Dynamic Group Assignment
+========================
+
+The output lines of the probe matching "group:" are used to
+dynamically assign hosts to groups. These dynamic groups need not already
+exist in ``Metadata/groups.xml``. If a dynamic group is defined in
+``Metadata/groups.xml``, clients that include this group will also get
+all included groups and bundles.
+
+Consider the following output of a probe::
+
+ group:debian-wheezy
+ group:amd64
+
+This assigns the client to the groups debian-wheezy and amd64.
+
+To prevent clients from manipulating the probe output and choosing
+unexpected groups (and receiving their potential sensitive files) you
+can use the ``allowed_groups`` option in the ``[probes]`` section of
+``bcfg2.conf`` on the server. This whitespace-separated list of
+anchored regular expressions (must match the complete group name)
+controls dynamic group assignments. Only matching groups are
+allowed. The default allows all groups.
+
+.. versionadded:: 1.3.4
+
+Example:
+
+.. code-block:: ini
+
+ [probes]
+ allowed_groups = debian-(squeeze|wheezy|sid) i386
+
+This allows the groups `debian-squeeze`, `debian-wheezy`, `debian-sid`
+and `i386`. With the probe output from above, this setting would
+disallow the group `amd64`.
+
Handling Probe Output
=====================
diff --git a/doc/server/plugins/structures/bundler/kernel.txt b/doc/server/plugins/structures/bundler/kernel.txt
index 2e3d84e93..d83679683 100644
--- a/doc/server/plugins/structures/bundler/kernel.txt
+++ b/doc/server/plugins/structures/bundler/kernel.txt
@@ -1,4 +1,5 @@
.. -*- mode: rst -*-
+.. vim: ft=rst
.. _server-plugins-structures-bundler-kernel:
@@ -21,7 +22,7 @@ some of which might be better than this one. Feel free to hack as needed.
.. code-block:: xml
- <Bundle name='kernel' version='2.0'>
+ <Bundle name='kernel'>
<Group name='sles8'>
<!-- =================== ia32 ==================== -->
<Group name='ia32'>
@@ -30,7 +31,7 @@ some of which might be better than this one. Feel free to hack as needed.
<Path name='/boot/initrd'/>
<Path name='/boot/vmlinuz.old'/>
<Path name='/boot/initrd.old'/>
- <PostInstall name='/sbin/lilo'/>
+ <BoundAction name='lilo' command='/sbin/lilo' timing='post' when='modified'/>
<!-- Current kernel -->
<Package name='linux-2.4.21-314.tg1'/>
<Package name='linux-2.4.21-314.tg1-source'/>
diff --git a/doc/unsorted/bcfg2.conf-options.txt b/doc/unsorted/bcfg2.conf-options.txt
deleted file mode 100644
index 57e26cbd2..000000000
--- a/doc/unsorted/bcfg2.conf-options.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-.. -*- mode: rst -*-
-
-.. _unsorted-bcfg2.conf-options:
-
-==========
-bcfg2.conf
-==========
-
-This page documents the various options available in bcfg2.conf. The
-various sections correspond to the sections in the file itself.
-
-components
-==========
-
-logging
--------
-
-Specify an alternate path for the lockfile used by the bcfg2 client.
-Default value is ``/var/lock/bcfg2.run``
diff --git a/doc/unsorted/dynamic_groups.txt b/doc/unsorted/dynamic_groups.txt
deleted file mode 100644
index 11535dc8b..000000000
--- a/doc/unsorted/dynamic_groups.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-.. -*- mode: rst -*-
-
-.. _unsorted-dynamic_groups:
-
-==============
-Dynamic Groups
-==============
-
-Bcfg2 supports the use of dynamic groups. These groups are not included
-in a client's profile group, but instead are derived from the results
-of probes executed on the client. These dynamic groups need not already
-exist in ``Metadata/groups.xml``. If a dynamic group is defined in
-``Metadata/groups.xml``, clients that include this group will also get
-all included groups and bundles.
-
-Setting up dynamic groups
-=========================
-
-In order to define a dynamic group, setup a probe that outputs the text
-based on system properties::
-
- group:groupname
-
-This output is processed by the Bcfg2 server, and results in dynamic
-group membership in groupname for the client. See the :ref:`Probes
-<server-plugins-probes-index>` page for a more thorough description
-of probes.
diff --git a/doc/unsorted/howtos.txt b/doc/unsorted/howtos.txt
index 0c5b482d9..cef64a394 100644
--- a/doc/unsorted/howtos.txt
+++ b/doc/unsorted/howtos.txt
@@ -14,5 +14,5 @@ Here are several howtos that describe different aspects of Bcfg2 deployment
* :ref:`appendix-guides-gentoo` - Issues specific to running Bcfg2 on Gentoo
* :ref:`server-plugins-probes-index` - How to use Probes to gather information from a client machine.
* :ref:`client-tools-actions` - How to use Actions
-* :ref:`unsorted-dynamic_groups` - Using dynamic groups
+* :ref:`server-plugins-probes-dynamic-groups` - Using dynamic groups
* :ref:`client-modes-paranoid` - How to run an update in paranoid mode
diff --git a/doc/unsorted/python-ssl_1.14-1_amd64.deb b/doc/unsorted/python-ssl_1.14-1_amd64.deb
deleted file mode 100644
index e6c8ad137..000000000
--- a/doc/unsorted/python-ssl_1.14-1_amd64.deb
+++ /dev/null
Binary files differ
diff --git a/doc/unsorted/python-stdeb_0.3-1_all.deb b/doc/unsorted/python-stdeb_0.3-1_all.deb
deleted file mode 100644
index 5cee96e3c..000000000
--- a/doc/unsorted/python-stdeb_0.3-1_all.deb
+++ /dev/null
Binary files differ
diff --git a/doc/unsorted/ssl.txt b/doc/unsorted/ssl.txt
deleted file mode 100644
index 91b62ca59..000000000
--- a/doc/unsorted/ssl.txt
+++ /dev/null
@@ -1,68 +0,0 @@
-.. -*- mode: rst -*-
-
-.. _unsorted-ssl:
-
-==========
-Python SSL
-==========
-
-The ssl module can be found `here <http://pypi.python.org/pypi/ssl>`_.
-
-With this change, SSL certificate based client authentication is
-supported. In order to use this, based CA-type capabilities are
-required. A central CA needs to be created, with each server and all
-clients getting a signed cert. See [wiki:Authentication] for details.
-
-Setting up keys is accomplished with three settings, each in the
-"`[communication]`" section of ``bcfg2.conf``::
-
- key = /path/to/ssl private key
- certificate = /path/to/signed cert for that key
- ca = /path/to/cacert.pem
-
-
-Python SSL Backport Packaging
-=============================
-
-Both the Bcfg2 server and client are able to use the in-tree ssl module
-included with python 2.6. The client is also able to still use M2Crypto. A
-python ssl backport exists for 2.3, 2.4, and 2.5. With this, M2Crypto
-is not needed, and tlslite is no longer included with Bcfg2 sources. See
-[wiki:Authentication] for details.
-
-To build a package of the ssl backport for .deb based distributions
-that don't ship with python 2.6, you can follow these instructions,
-which use `stdeb`_. Alternatively if you happen to have .deb packaging
-skills, it would be great to get policy-complaint .debs into the major
-deb-based distributions.
-
-.. _stdeb: http://github.com/astraw/stdeb/tree/master
-
-The following commands were used to generate :download:`this
-<python-ssl_1.14-1_amd64.deb>` debian package The ``easy_install`` command
-can be found in the `python-setuptools` package.::
-
- sudo aptitude install python-all-dev fakeroot
- sudo easy_install stdeb
- wget http://pypi.python.org/packages/source/s/ssl/ssl-1.14.tar.gz#md5=4e08aae0cd2c7388d1b4bbb7f374b14a
- tar xvfz ssl-1.14.tar.gz
- cd ssl-1.14
- stdeb_run_setup
- cd deb_dist/ssl-1.14
- dpkg-buildpackage -rfakeroot -uc -us
- sudo dpkg -i ../python-ssl_1.14-1_amd64.deb
-
-.. note:: Version numbers for the SSL module have changed.
-
-For complete Bcfg2 goodness, you'll also want to package stdeb using stdeb.
-The completed debian package can be grabbed from :download:`here
-<python-stdeb_0.3-1_all.deb>`, which was generated using the following::
-
- sudo aptitude install apt-file
- wget http://pypi.python.org/packages/source/s/stdeb/stdeb-0.3.tar.gz#md5=e692f745597dcdd9343ce133e3b910d0
- tar xvfz stdeb-0.3.tar.gz
- cd stdeb-0.3
- stdeb_run_setup
- cd deb_dist/stdeb-0.3
- dpkg-buildpackage -rfakeroot -uc -us
- sudo dpkg -i ../python-stdeb_0.3-1_all.deb
diff --git a/man/bcfg2-report-collector.8 b/man/bcfg2-report-collector.8
new file mode 100644
index 000000000..195b15ec8
--- /dev/null
+++ b/man/bcfg2-report-collector.8
@@ -0,0 +1,79 @@
+.TH "BCFG2-REPORT-COLLECTOR" "8" "July 27, 2013" "1.3" "Bcfg2"
+.SH NAME
+bcfg2-report-collector \- Reports collection daemon
+.
+.nr rst2man-indent-level 0
+.
+.de1 rstReportMargin
+\\$1 \\n[an-margin]
+level \\n[rst2man-indent-level]
+level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
+-
+\\n[rst2man-indent0]
+\\n[rst2man-indent1]
+\\n[rst2man-indent2]
+..
+.de1 INDENT
+.\" .rstReportMargin pre:
+. RS \\$1
+. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
+. nr rst2man-indent-level +1
+.\" .rstReportMargin post:
+..
+.de UNINDENT
+. RE
+.\" indent \\n[an-margin]
+.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.nr rst2man-indent-level -1
+.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
+..
+.\" Man page generated from reStructuredText.
+.
+.SH SYNOPSIS
+.sp
+\fBbcfg2\-report\-collector\fP [\fIoptions\fP]
+.SH DESCRIPTION
+.sp
+\fBbcfg2\-report\-collector\fP runs a daemon to collect logs from the
+LocalFilesystem \fIBcfg2 Reports\fP transport object
+and add them to the Reporting storage backend.
+.SH OPTIONS
+.INDENT 0.0
+.TP
+.BI \-C \ configfile
+Specify alternate bcfg2.conf location.
+.TP
+.BI \-D \ pidfile
+Daemonize, placing the program pid in \fIpidfile\fP.
+.TP
+.BI \-E \ encoding
+Specify the encoding of config files.
+.TP
+.BI \-Q \ path
+Specify the path to the server repository.
+.TP
+.BI \-W \ configfile
+Specify the path to the web interface
+configuration file.
+.TP
+.B \-d
+Enable debugging output.
+.TP
+.B \-h
+Print usage information.
+.TP
+.BI \-o \ path
+Set path of file log
+.TP
+.B \-v
+Run in verbose mode.
+.TP
+.B \-\-version
+Print the version and exit
+.UNINDENT
+.SH SEE ALSO
+.sp
+\fIbcfg2\-server(8)\fP, \fIbcfg2\-reports(8)\fP
+.\" Generated by docutils manpage writer.
+.
diff --git a/man/bcfg2-server.8 b/man/bcfg2-server.8
index 27f6a7b01..dcec03252 100644
--- a/man/bcfg2-server.8
+++ b/man/bcfg2-server.8
@@ -1,4 +1,4 @@
-.TH "BCFG2-SERVER" "8" "March 18, 2013" "1.3" "Bcfg2"
+.TH "BCFG2-SERVER" "8" "July 27, 2013" "1.3" "Bcfg2"
.SH NAME
bcfg2-server \- Server for client configuration specifications
.
@@ -46,8 +46,7 @@ configurations to clients based on the data in its repository.
Specify alternate bcfg2.conf location.
.TP
.BI \-D \ pidfile
-Daemonize, placing the program pid in the specified
-pidfile.
+Daemonize, placing the program pid in \fIpidfile\fP.
.TP
.BI \-E \ encoding
Specify the encoding of config files.
diff --git a/man/bcfg2.conf.5 b/man/bcfg2.conf.5
index b0db91a5b..5e64caae9 100644
--- a/man/bcfg2.conf.5
+++ b/man/bcfg2.conf.5
@@ -1,4 +1,4 @@
-.TH "BCFG2.CONF" "5" "March 18, 2013" "1.3" "Bcfg2"
+.TH "BCFG2.CONF" "5" "July 19, 2013" "1.3" "Bcfg2"
.SH NAME
bcfg2.conf \- Configuration parameters for Bcfg2
.
@@ -76,6 +76,13 @@ pseudo
.UNINDENT
.UNINDENT
.TP
+.B fam_blocking
+.
+Whether the server should block at startup until the file monitor
+backend has processed all events. This can cause a slower startup,
+but ensure that all files are recognized before the first client
+is handled.
+.TP
.B ignore_files
A comma\-separated list of globs that should be ignored by the file
monitor. Default values are:
@@ -771,6 +778,11 @@ Host for database connections. Not used for sqlite3.
.TP
.B port
Port for database connections. Not used for sqlite3.
+.TP
+.B options
+Various options for the database connection. The value is
+expected as multiple key=value pairs, separated with commas.
+The concrete value depends on the database engine.
.UNINDENT
.UNINDENT
.UNINDENT
diff --git a/misc/bcfg2-selinux.spec b/misc/bcfg2-selinux.spec
index 9c5262dfd..d694783b5 100644
--- a/misc/bcfg2-selinux.spec
+++ b/misc/bcfg2-selinux.spec
@@ -7,9 +7,17 @@
%global selinux_types %(%{__awk} '/^#[[:space:]]*SELINUXTYPE=/,/^[^#]/ { if ($3 == "-") printf "%s ", $2 }' /etc/selinux/config 2>/dev/null)
%global selinux_variants %([ -z "%{selinux_types}" ] && echo mls strict targeted || echo %{selinux_types})
+# For -pre or -rc releases, remove the initial <hash><percent>
+# characters from the appropriate line below.
+#
+# Don't forget to change the Release: tag below to something like 0.1
+#%%global _rc 1
+#%%global _pre 2
+%global _pre_rc %{?_pre:.pre%{_pre}}%{?_rc:.rc%{_rc}}
+
Name: bcfg2-selinux
-Version: 1.3.1
-Release: 1
+Version: 1.3.3
+Release: 1%{?_pre_rc}%{?dist}
Summary: Bcfg2 Client and Server SELinux policy
%if 0%{?suse_version}
@@ -65,7 +73,7 @@ deployment strategies.
This package includes the Bcfg2 server and client SELinux policy.
%prep
-%setup -q -n %{name}-%{version}
+%setup -q -n %{name}-%{version}%{?_pre_rc}
%build
cd redhat/selinux
@@ -120,6 +128,12 @@ if [ $1 -eq 0 ] ; then
fi
%changelog
+* Thu Nov 07 2013 Sol Jerome <sol.jerome@gmail.com> 1.3.3-1
+- New upstream release
+
+* Mon Jul 01 2013 Sol Jerome <sol.jerome@gmail.com> 1.3.2-1
+- New upstream release
+
* Thu Mar 21 2013 Sol Jerome <sol.jerome@gmail.com> 1.3.1-1
- New upstream release
diff --git a/misc/bcfg2.spec b/misc/bcfg2.spec
index f7289f1dd..946b1d4a9 100644
--- a/misc/bcfg2.spec
+++ b/misc/bcfg2.spec
@@ -1,13 +1,37 @@
-%global __python python
-%{!?py_ver: %global py_ver %(%{__python} -c 'import sys;print(sys.version[0:3])')}
-%global pythonversion %{py_ver}
-%{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")}
+# Fedora 13+ and EL6 contain these macros already; only needed for EL5
+%if 0%{?rhel} && 0%{?rhel} <= 5
+%global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")
+%define python_version %(%{__python} -c 'import sys;print(sys.version[0:3])')
+%endif
+
+# openSUSE macro translation
+%if 0%{?suse_version}
+%global python_version %{py_ver}
%{!?_initrddir: %global _initrddir %{_sysconfdir}/rc.d/init.d}
+# openSUSE < 11.2
+%if %{suse_version} < 1120
+%global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")
+%endif
+%endif
+
+# For -pre or -rc releases, remove the initial <hash><percent>
+# characters from the appropriate line below.
+#
+# Don't forget to change the Release: tag below to something like 0.1
+#%%global _rc 1
+#%%global _pre 2
+%global _pre_rc %{?_pre:.pre%{_pre}}%{?_rc:.rc%{_rc}}
+
+# cherrypy 3.3 actually doesn't exist yet, but 3.2 has bugs that
+# prevent it from working:
+# https://bitbucket.org/cherrypy/cherrypy/issue/1154/assertionerror-in-recv-when-ssl-is-enabled
+%global build_cherry_py 0
+
Name: bcfg2
-Version: 1.3.1
-Release: 1
-Summary: Configuration management system
+Version: 1.3.3
+Release: 4%{?_pre_rc}%{?dist}
+Summary: A configuration management system
%if 0%{?suse_version}
# http://en.opensuse.org/openSUSE:Package_group_guidelines
@@ -17,12 +41,72 @@ Group: Applications/System
%endif
License: BSD
URL: http://bcfg2.org
-Source0: ftp://ftp.mcs.anl.gov/pub/bcfg/%{name}-%{version}.tar.gz
+Source0: ftp://ftp.mcs.anl.gov/pub/bcfg/%{name}-%{version}%{?_pre_rc}.tar.gz
+# Used in %%check
+Source1: http://www.w3.org/2001/XMLSchema.xsd
+%if %{?rhel}%{!?rhel:10} <= 5 || 0%{?suse_version}
+# EL5 and OpenSUSE require the BuildRoot tag
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
+%endif
BuildArch: noarch
+BuildRequires: python
BuildRequires: python-devel
BuildRequires: python-lxml
+BuildRequires: python-boto
+%if 0%{?suse_version}
+BuildRequires: python-M2Crypto
+BuildRequires: python-Genshi
+BuildRequires: python-gamin
+BuildRequires: python-pyinotify
+BuildRequires: python-python-daemon
+%if %{build_cherry_py}
+BuildRequires: python-CherryPy >= 3
+%endif
+%else # ! suse_version
+BuildRequires: python-daemon
+BuildRequires: python-inotify
+%if "%{_vendor}" == "redhat" && 0%{!?rhel:1} && 0%{!?fedora:1}
+# by default, el5 doesn't have the %%rhel macro, provided by this
+# package; EPEL build servers install buildsys-macros by default, but
+# explicitly requiring this may help builds in other environments
+BuildRequires: buildsys-macros
+%else # vendor != redhat || rhel defined
+%if 0%{?rhel} && 0%{?rhel} < 6
+BuildRequires: python-ssl
+%else # rhel > 5
+# EL5 lacks python-mock, so test suite is disabled
+BuildRequires: python-sqlalchemy
+BuildRequires: python-nose
+BuildRequires: mock
+BuildRequires: m2crypto
+# EPEL uses the properly-named python-django starting with EPEL7
+%if 0%{?rhel} && 0%{?rhel} > 6
+BuildRequires: python-django
+%else
+BuildRequires: Django
+%endif
+BuildRequires: python-genshi
+BuildRequires: python-cheetah
+BuildRequires: libselinux-python
+%if 0%{?rhel} != 7
+# FIXME: Not yet present in EPEL7; for %%check
+BuildRequires: pylibacl
+BuildRequires: python-pep8
+BuildRequires: pylint
+%endif
+%if %{build_cherry_py}
+BuildRequires: python-cherrypy >= 3
+%endif
+BuildRequires: python-mock
+%endif # rhel > 5
+%endif # vendor != redhat || rhel defined
+%endif # ! suse_version
+%if 0%{?fedora} && 0%{?fedora} >= 16 || 0%{?rhel} && 0%{?rhel} >= 7
+# Pick up _unitdir macro
+BuildRequires: systemd
+%endif
+
%if 0%{?mandriva_version}
# mandriva seems to behave differently than other distros and needs
@@ -40,24 +124,39 @@ BuildRequires: libsane1
# a different package name in EPEL.
%if "%{_vendor}" == "redhat" && 0%{?rhel} <= 6 && 0%{?fedora} == 0
BuildRequires: python-sphinx10
-# the python-sphinx10 package doesn't set sys.path correctly, so we
-# have to do it for them
+# python-sphinx10 doesn't set sys.path correctly; do it for them
%global pythonpath %(find %{python_sitelib} -name Sphinx*.egg)
%else
BuildRequires: python-sphinx >= 1.0
%endif
+BuildRequires: python-docutils
+
+%if 0%{?fedora} >= 16
+BuildRequires: systemd-units
+%endif
+
+%if 0%{?rhel} && 0%{?rhel} < 6
+Requires: python-ssl
+%endif
+Requires: libselinux-python
-Requires: python-lxml >= 0.9
-%if 0%{?rhel_version}
-# the debian init script needs redhat-lsb.
-# iff we switch to the redhat one, this might not be needed anymore.
-Requires: redhat-lsb
+%if 0%{?fedora} >= 16
+Requires(post): systemd-units
+Requires(preun): systemd-units
+Requires(postun): systemd-units
+%else
+Requires(post): /sbin/chkconfig
+Requires(preun): /sbin/chkconfig
+Requires(preun): /sbin/service
+Requires(postun): /sbin/service
%endif
+
%if "%{_vendor}" != "redhat"
# fedora and rhel (and possibly other distros) do not know this tag.
Recommends: cron
%endif
+
%description
Bcfg2 helps system administrators produce a consistent, reproducible,
and verifiable description of their environment, and offers
@@ -87,20 +186,14 @@ deployment strategies.
This package includes the Bcfg2 client software.
%package server
-Version: 1.3.1
Summary: Bcfg2 Server
%if 0%{?suse_version}
Group: System/Management
%else
-Group: System Tools
-%endif
-Requires: bcfg2 = %{version}
-%if "%{py_ver}" < "2.6"
-Requires: python-ssl
+Group: System Environment/Daemons
%endif
+Requires: bcfg2 = %{version}-%{release}
Requires: python-lxml >= 1.2.1
-%if "%{_vendor}" == "redhat"
-%endif
%if 0%{?suse_version}
Requires: python-pyinotify
Requires: python-python-daemon
@@ -110,8 +203,24 @@ Requires: python-daemon
%endif
Requires: /usr/sbin/sendmail
Requires: /usr/bin/openssl
+Requires: graphviz
Requires: python-nose
+%if %{_vendor} == redhat
+%if 0%{?fedora} >= 16
+Requires(post): systemd-units
+Requires(preun): systemd-units
+Requires(postun): systemd-units
+Requires(post): systemd-sysv
+%else
+Requires(post): /sbin/chkconfig
+Requires(preun): /sbin/chkconfig
+Requires(preun): /sbin/service
+Requires(postun): /sbin/service
+%endif
+%endif
+
+
%description server
Bcfg2 helps system administrators produce a consistent, reproducible,
and verifiable description of their environment, and offers
@@ -140,18 +249,18 @@ deployment strategies.
This package includes the Bcfg2 server software.
+%if %{build_cherry_py}
%package server-cherrypy
-Version: 1.3.1
Summary: Bcfg2 Server - CherryPy backend
%if 0%{?suse_version}
Group: System/Management
%else
-Group: System Tools
+Group: System Environment/Daemons
%endif
-Requires: bcfg2 = %{version}
-Requires: bcfg2-server = %{version}
+Requires: bcfg2 = %{version}-%{release}
+Requires: bcfg2-server = %{version}-%{release}
-# cherrypy 3.2.3 actually doesn't exist yet, but 3.2.2 has bugs that
+# cherrypy 3.3 actually doesn't exist yet, but 3.2 has bugs that
# prevent it from working:
# https://bitbucket.org/cherrypy/cherrypy/issue/1154/assertionerror-in-recv-when-ssl-is-enabled
Requires: python-cherrypy > 3.3
@@ -183,36 +292,75 @@ Bcfg2 can enable the construction of complex change management and
deployment strategies.
This package includes the Bcfg2 CherryPy server backend.
+%endif # build_cherry_py
+
+
+%package web
+Summary: Bcfg2 Web Reporting Interface
-%package doc
-Summary: Configuration management system documentation
%if 0%{?suse_version}
-Group: Documentation/HTML
+Group: System/Management
+Requires: python-django >= 1.2
+Requires: python-django-south >= 0.7
%else
-Group: Documentation
+Group: System Tools
+# EPEL uses the properly-named python-django starting with EPEL7
+%if 0%{?rhel} && 0%{?rhel} > 6
+Requires: python-django
+%else
+Requires: Django >= 1.2
+Requires: Django-south >= 0.7
%endif
-%if 0%{?suse_version}
-BuildRequires: python-M2Crypto
-BuildRequires: python-Genshi
-BuildRequires: python-gamin
-BuildRequires: python-pyinotify
-BuildRequires: python-python-daemon
-BuildRequires: python-CherryPy >= 3
+Requires: bcfg2-server
+%endif
+Requires: httpd
+%if "%{_vendor}" == "redhat"
+Requires: mod_wsgi
+%global apache_conf %{_sysconfdir}/httpd
%else
-BuildRequires: m2crypto
-BuildRequires: python-genshi
-BuildRequires: gamin-python
-BuildRequires: python-inotify
-BuildRequires: python-daemon
+Requires: apache2-mod_wsgi
+%global apache_conf %{_sysconfdir}/apache2
%endif
-%if "%{_vendor}" == "redhat" && 0%{?rhel} < 6 && 0%{?fedora} == 0
-BuildRequires: python-ssl
+
+%description web
+Bcfg2 helps system administrators produce a consistent, reproducible,
+and verifiable description of their environment, and offers
+visualization and reporting tools to aid in day-to-day administrative
+tasks. It is the fifth generation of configuration management tools
+developed in the Mathematics and Computer Science Division of Argonne
+National Laboratory.
+
+It is based on an operational model in which the specification can be
+used to validate and optionally change the state of clients, but in a
+feature unique to bcfg2 the client's response to the specification can
+also be used to assess the completeness of the specification. Using
+this feature, bcfg2 provides an objective measure of how good a job an
+administrator has done in specifying the configuration of client
+systems. Bcfg2 is therefore built to help administrators construct an
+accurate, comprehensive specification.
+
+Bcfg2 has been designed from the ground up to support gentle
+reconciliation between the specification and current client states. It
+is designed to gracefully cope with manual system modifications.
+
+Finally, due to the rapid pace of updates on modern networks, client
+systems are constantly changing; if required in your environment,
+Bcfg2 can enable the construction of complex change management and
+deployment strategies.
+
+This package includes the Bcfg2 reports web frontend.
+
+
+%package doc
+Summary: Documentation for Bcfg2
+%if 0%{?suse_version}
+Group: Documentation/HTML
%else
-BuildRequires: python-cherrypy >= 3
-BuildRequires: python-mock
+Group: Documentation
%endif
+
%description doc
Bcfg2 helps system administrators produce a consistent, reproducible,
and verifiable description of their environment, and offers
@@ -241,26 +389,13 @@ deployment strategies.
This package includes the Bcfg2 documentation.
-%package web
-Version: 1.3.1
-Summary: Bcfg2 Web Reporting Interface
-%if 0%{?suse_version}
-Group: System/Management
-Requires: httpd,python-django >= 1.2,python-django-south >= 0.7
-%else
-Group: System Tools
-Requires: httpd,Django >= 1.2,Django-south >= 0.7
-%endif
-Requires: bcfg2-server
-%if "%{_vendor}" == "redhat"
-Requires: mod_wsgi
-%global apache_conf %{_sysconfdir}/httpd
-%else
-Requires: apache2-mod_wsgi
-%global apache_conf %{_sysconfdir}/apache2
-%endif
-%description web
+%package examples
+Summary: Examples for Bcfg2
+Group: Documentation
+
+
+%description examples
Bcfg2 helps system administrators produce a consistent, reproducible,
and verifiable description of their environment, and offers
visualization and reporting tools to aid in day-to-day administrative
@@ -286,222 +421,470 @@ systems are constantly changing; if required in your environment,
Bcfg2 can enable the construction of complex change management and
deployment strategies.
-This package includes the Bcfg2 reports web frontend.
+This package includes the examples files for Bcfg2.
+
%prep
-%setup -q -n %{name}-%{version}
+%setup -q -n %{name}-%{version}%{?_pre_rc}
+
+# The pylint and pep8 unit tests fail on RH-derivative distros
+%if "%{_vendor}" == "redhat"
+mv testsuite/Testsrc/test_code_checks.py \
+ testsuite/Testsrc/test_code_checks.py.disable_unit_tests
+awk '
+ BEGIN {line=0}
+ /class Test(Pylint|PEP8)/ {line=FNR+1}
+ FNR==line {sub("True","False")}
+ {print $0}
+ ' testsuite/Testsrc/test_code_checks.py.disable_unit_tests \
+ > testsuite/Testsrc/test_code_checks.py
+%endif
+
+# Fixup some paths
+%{__perl} -pi -e 's@/etc/default@%{_sysconfdir}/sysconfig@g' tools/bcfg2-cron
+
+# Get rid of extraneous shebangs
+for f in `find src/lib -name \*.py`
+do
+ sed -i -e '/^#!/,1d' $f
+done
+
+sed -i "s/apache2/httpd/g" misc/apache/bcfg2.conf
+
%build
-%{__python}%{pythonversion} setup.py build
+%{__python} setup.py build
+%{?pythonpath: PYTHONPATH="%{pythonpath}"} \
+ %{__python} setup.py build_sphinx
-%{?pythonpath: export PYTHONPATH="%{pythonpath}"}
-%{__python}%{pythonversion} setup.py build_sphinx
%install
+%if 0%{?rhel} == 5 || 0%{?suse_version}
+# EL5 and OpenSUSE require the buildroot to be cleaned manually
rm -rf %{buildroot}
-%{__python}%{pythonversion} setup.py install --root=%{buildroot} --record=INSTALLED_FILES --prefix=/usr
-%{__install} -d %{buildroot}%{_bindir}
-%{__install} -d %{buildroot}%{_sbindir}
-%{__install} -d %{buildroot}%{_initrddir}
-%{__install} -d %{buildroot}%{_sysconfdir}/default
-%{__install} -d %{buildroot}%{_sysconfdir}/cron.daily
-%{__install} -d %{buildroot}%{_sysconfdir}/cron.hourly
-%{__install} -d %{buildroot}%{_prefix}/lib/bcfg2
-mkdir -p %{buildroot}%{_defaultdocdir}/bcfg2-doc-%{version}
-mkdir -p %{buildroot}%{_defaultdocdir}/bcfg2-server-%{version}
+%endif
+
+%{__python} setup.py install -O1 --skip-build --root=%{buildroot} --prefix=/usr
+install -d %{buildroot}%{_bindir}
+install -d %{buildroot}%{_sbindir}
+install -d %{buildroot}%{_initrddir}
+install -d %{buildroot}%{_sysconfdir}/cron.daily
+install -d %{buildroot}%{_sysconfdir}/cron.hourly
+install -d %{buildroot}%{_sysconfdir}/sysconfig
+install -d %{buildroot}%{_libexecdir}
+install -d %{buildroot}%{_localstatedir}/cache/%{name}
+install -d %{buildroot}%{_localstatedir}/lib/%{name}
%if 0%{?suse_version}
-%{__install} -d %{buildroot}/var/adm/fillup-templates
-%endif
-
-%{__mv} %{buildroot}%{_bindir}/bcfg2* %{buildroot}%{_sbindir}
-%{__install} -m 755 debian/bcfg2.init %{buildroot}%{_initrddir}/bcfg2
-%{__install} -m 755 debian/bcfg2-server.init %{buildroot}%{_initrddir}/bcfg2-server
-%{__install} -m 755 debian/bcfg2-report-collector.init %{buildroot}%{_initrddir}/bcfg2-report-collector
-%{__install} -m 755 debian/bcfg2.default %{buildroot}%{_sysconfdir}/default/bcfg2
-%{__install} -m 755 debian/bcfg2-server.default %{buildroot}%{_sysconfdir}/default/bcfg2-server
-%{__install} -m 755 debian/bcfg2.cron.daily %{buildroot}%{_sysconfdir}/cron.daily/bcfg2
-%{__install} -m 755 debian/bcfg2.cron.hourly %{buildroot}%{_sysconfdir}/cron.hourly/bcfg2
-%{__install} -m 755 tools/bcfg2-cron %{buildroot}%{_prefix}/lib/bcfg2/bcfg2-cron
+install -d %{buildroot}/var/adm/fillup-templates
+%endif
+
+mv %{buildroot}%{_bindir}/bcfg2* %{buildroot}%{_sbindir}
+
+%if 0%{?fedora} && 0%{?fedora} < 16 || 0%{?rhel} && 0%{?rhel} < 7
+# Install SysV init scripts for everyone but new Fedoras
+install -m 755 redhat/scripts/bcfg2.init \
+ %{buildroot}%{_initrddir}/bcfg2
+install -m 755 redhat/scripts/bcfg2-server.init \
+ %{buildroot}%{_initrddir}/bcfg2-server
+install -m 755 redhat/scripts/bcfg2-report-collector.init \
+ %{buildroot}%{_initrddir}/bcfg2-report-collector
+%endif
+install -m 755 debian/bcfg2.cron.daily \
+ %{buildroot}%{_sysconfdir}/cron.daily/bcfg2
+install -m 755 debian/bcfg2.cron.hourly \
+ %{buildroot}%{_sysconfdir}/cron.hourly/bcfg2
+install -m 755 tools/bcfg2-cron \
+ %{buildroot}%{_libexecdir}/bcfg2-cron
+
+install -m 644 debian/bcfg2.default \
+ %{buildroot}%{_sysconfdir}/sysconfig/bcfg2
+install -m 644 debian/bcfg2-server.default \
+ %{buildroot}%{_sysconfdir}/sysconfig/bcfg2-server
%if 0%{?suse_version}
-%{__install} -m 755 debian/bcfg2.default %{buildroot}/var/adm/fillup-templates/sysconfig.bcfg2
-%{__install} -m 755 debian/bcfg2-server.default %{buildroot}/var/adm/fillup-templates/sysconfig.bcfg2-server
+install -m 755 debian/bcfg2.default \
+ %{buildroot}/var/adm/fillup-templates/sysconfig.bcfg2
+install -m 755 debian/bcfg2-server.default \
+ %{buildroot}/var/adm/fillup-templates/sysconfig.bcfg2-server
ln -s %{_initrddir}/bcfg2 %{buildroot}%{_sbindir}/rcbcfg2
ln -s %{_initrddir}/bcfg2-server %{buildroot}%{_sbindir}/rcbcfg2-server
%endif
-cp -r tools/* %{buildroot}%{_defaultdocdir}/bcfg2-server-%{version}
-cp -r build/sphinx/html/* %{buildroot}%{_defaultdocdir}/bcfg2-doc-%{version}
+touch %{buildroot}%{_sysconfdir}/%{name}.{cert,conf,key}
-%{__install} -d %{buildroot}%{apache_conf}/conf.d
-sed -i "s/apache2/httpd/g" misc/apache/bcfg2.conf
-%{__install} -m 644 misc/apache/bcfg2.conf %{buildroot}%{apache_conf}/conf.d/wsgi_bcfg2.conf
+# systemd
+install -d %{buildroot}%{_unitdir}
+install -p -m 644 redhat/systemd/%{name}.service \
+ %{buildroot}%{_unitdir}/%{name}.service
+install -p -m 644 redhat/systemd/%{name}-server.service \
+ %{buildroot}%{_unitdir}/%{name}-server.service
+
+%if 0%{?rhel} != 5
+# Webserver
+install -d %{buildroot}%{apache_conf}/conf.d
+install -p -m 644 misc/apache/bcfg2.conf \
+ %{buildroot}%{apache_conf}/conf.d/wsgi_bcfg2.conf
+%else
+# remove web server files not in EL5 packages
+rm -r %{buildroot}%{_datadir}/bcfg2/reports.wsgi \
+ %{buildroot}%{_datadir}/bcfg2/site_media
+%endif
-%{__mkdir_p} %{buildroot}%{_localstatedir}/cache/%{name}
-%{__mkdir_p} %{buildroot}%{_localstatedir}/lib/%{name}
-# mandriva and RHEL 5 cannot handle %ghost without the file existing,
+# mandriva cannot handle %ghost without the file existing,
# so let's touch a bunch of empty config files
-touch %{buildroot}%{_sysconfdir}/bcfg2.conf \
- %{buildroot}%{_sysconfdir}/bcfg2-web.conf
+touch %{buildroot}%{_sysconfdir}/bcfg2.conf
+%if 0%{?rhel} == 5
+# Required for EL5
%clean
-[ "%{buildroot}" != "/" ] && %{__rm} -rf %{buildroot} || exit 2
+rm -rf %{buildroot}
+%endif
+
+
+%if 0%{?rhel} != 5
+# EL5 lacks python-mock, so test suite is disabled
+%if 0%{?rhel} != 7
+# FIXME: EL7 has some missing EPEL deps, so test suite is disabled
+%check
+# Downloads not allowed in koji; fix .xsd urls to point to local files
+sed -i "s@schema_url = .*\$@schema_url = 'file://`pwd`/`basename %{SOURCE1}`'@" \
+ testsuite/Testschema/test_schema.py
+sed "s@http://www.w3.org/2001/xml.xsd@file://$(pwd)/schemas/xml.xsd@" \
+ %{SOURCE1} > `basename %{SOURCE1}`
+%{__python} setup.py test
+%endif
+%endif
+
+
+%post
+%if 0%{?fedora} >= 18
+ %systemd_post bcfg2.service
+%else
+ if [ $1 -eq 1 ] ; then
+ # Initial installation
+ %if 0%{?suse_version}
+ %fillup_and_insserv -f bcfg2
+ %else %if 0%{?fedora} >= 16
+ /bin/systemctl daemon-reload >/dev/null 2>&1 || :
+ %else
+ /sbin/chkconfig --add bcfg2
+ %endif %endif
+ fi
+%endif
+
+%post server
+%if 0%{?fedora} >= 18
+ %systemd_post bcfg2-server.service
+%else
+ if [ $1 -eq 1 ] ; then
+ # Initial installation
+ %if 0%{?suse_version}
+ %fillup_and_insserv -f bcfg2-server
+ %else %if 0%{?fedora} >= 16
+ /bin/systemctl daemon-reload >/dev/null 2>&1 || :
+ %else
+ /sbin/chkconfig --add bcfg2-server
+ %endif %endif
+ fi
+%endif
+
+%preun
+%if 0%{?fedora} >= 18
+ %systemd_preun bcfg2.service
+%else
+ if [ $1 -eq 0 ]; then
+ # Package removal, not upgrade
+ %if 0%{?suse_version}
+ %stop_on_removal bcfg2
+ %else %if 0%{?fedora} >= 16
+ /bin/systemctl --no-reload disable bcfg2.service > /dev/null 2>&1 || :
+ /bin/systemctl stop bcfg2.service > /dev/null 2>&1 || :
+ %else
+ /sbin/service bcfg2 stop &>/dev/null || :
+ /sbin/chkconfig --del bcfg2
+ %endif %endif
+ fi
+%endif
+
+%preun server
+%if 0%{?fedora} >= 18
+ %systemd_preun bcfg2-server.service
+%else
+ if [ $1 -eq 0 ]; then
+ # Package removal, not upgrade
+ %if 0%{?suse_version}
+ %stop_on_removal bcfg2-server
+ %stop_on_removal bcfg2-report-collector
+ %else %if 0%{?fedora} >= 16
+ /bin/systemctl --no-reload disable bcfg2-server.service > /dev/null 2>&1 || :
+ /bin/systemctl stop bcfg2-server.service > /dev/null 2>&1 || :
+ %else
+ /sbin/service bcfg2-server stop &>/dev/null || :
+ /sbin/chkconfig --del bcfg2-server
+ %endif %endif
+ fi
+%endif
+
+%postun
+%if 0%{?fedora} >= 18
+ %systemd_postun bcfg2.service
+%else
+ %if 0%{?fedora} >= 16
+ /bin/systemctl daemon-reload >/dev/null 2>&1 || :
+ %endif
+ if [ $1 -ge 1 ] ; then
+ # Package upgrade, not uninstall
+ %if 0%{?suse_version}
+ %insserv_cleanup
+ %else %if 0%{?fedora} >= 16
+ /bin/systemctl try-restart bcfg2.service >/dev/null 2>&1 || :
+ %else
+ /sbin/service bcfg2 condrestart &>/dev/null || :
+ %endif %endif
+ fi
+%endif
+
+%postun server
+%if 0%{?fedora} >= 18
+ %systemd_postun bcfg2-server.service
+%else
+ %if 0%{?fedora} >= 16
+ /bin/systemctl daemon-reload >/dev/null 2>&1 || :
+ %endif
+ if [ $1 -ge 1 ] ; then
+ # Package upgrade, not uninstall
+ %if 0%{?fedora} >= 16
+ /bin/systemctl try-restart bcfg2-server.service >/dev/null 2>&1 || :
+ %else
+ /sbin/service bcfg2-server condrestart &>/dev/null || :
+ %endif
+ fi
+ %if 0%{?suse_version}
+ if [ $1 -eq 0 ]; then
+ # clean up on removal.
+ %insserv_cleanup
+ fi
+ %endif
+%endif
+
+%if 0%{?fedora} || 0%{?rhel}
+%triggerun -- bcfg2 < 1.2.1-1
+/usr/bin/systemd-sysv-convert --save bcfg2 >/dev/null 2>&1 || :
+/bin/systemctl --no-reload enable bcfg2.service >/dev/null 2>&1 || :
+/sbin/chkconfig --del bcfg2 >/dev/null 2>&1 || :
+/bin/systemctl try-restart bcfg2.service >/dev/null 2>&1 || :
+
+%triggerun server -- bcfg2-server < 1.2.1-1
+/usr/bin/systemd-sysv-convert --save bcfg2-server >/dev/null 2>&1 || :
+/bin/systemctl --no-reload enable bcfg2-server.service >/dev/null 2>&1 || :
+/sbin/chkconfig --del bcfg2-server >/dev/null 2>&1 || :
+/bin/systemctl try-restart bcfg2-server.service >/dev/null 2>&1 || :
+%endif
+
%files
+%if 0%{?rhel} == 5 || 0%{?suse_version}
+# Required for EL5 and OpenSUSE
%defattr(-,root,root,-)
+%endif
+%doc COPYRIGHT LICENSE README
+%{_mandir}/man1/bcfg2.1*
+%{_mandir}/man5/bcfg2.conf.5*
+%ghost %attr(600,root,root) %config(noreplace,missingok) %{_sysconfdir}/bcfg2.cert
+%ghost %attr(0600,root,root) %config(noreplace,missingok) %{_sysconfdir}/bcfg2.conf
+%if 0%{?fedora} >= 16 || 0%{?rhel} >= 7
+ %config(noreplace) %{_unitdir}/%{name}.service
+%else
+ %{_initrddir}/bcfg2
+%endif
+%if 0%{?fedora} || 0%{?rhel}
+%config(noreplace) %{_sysconfdir}/sysconfig/bcfg2
+%else
+%config(noreplace) %{_sysconfdir}/default/bcfg2
+%endif
+%{_sysconfdir}/cron.daily/bcfg2
+%{_sysconfdir}/cron.hourly/bcfg2
%{_sbindir}/bcfg2
+%{_libexecdir}/bcfg2-cron
+%dir %{_localstatedir}/cache/%{name}
+%{python_sitelib}/Bcfg2*.egg-info
%dir %{python_sitelib}/Bcfg2
-%{python_sitelib}/Bcfg2/Compat.py*
%{python_sitelib}/Bcfg2/__init__.py*
+%{python_sitelib}/Bcfg2/Client
+%{python_sitelib}/Bcfg2/Compat.py*
%{python_sitelib}/Bcfg2/Logger.py*
%{python_sitelib}/Bcfg2/Options.py*
%{python_sitelib}/Bcfg2/Proxy.py*
%{python_sitelib}/Bcfg2/Utils.py*
%{python_sitelib}/Bcfg2/version.py*
-%{python_sitelib}/Bcfg2/Client
-%{_mandir}/man1/bcfg2.1*
-%{_mandir}/man5/bcfg2.conf.5*
-%{_initrddir}/bcfg2
-%config(noreplace) %{_sysconfdir}/default/bcfg2
-%{_sysconfdir}/cron.hourly/bcfg2
-%{_sysconfdir}/cron.daily/bcfg2
-%{_prefix}/lib/bcfg2/bcfg2-cron
-%{_localstatedir}/cache/%{name}
-%{_localstatedir}/lib/%{name}
%if 0%{?suse_version}
%{_sbindir}/rcbcfg2
%config(noreplace) /var/adm/fillup-templates/sysconfig.bcfg2
%endif
-%ghost %config(noreplace,missingok) %attr(0600,root,root) %{_sysconfdir}/bcfg2.conf
%files server
+%if 0%{?rhel} == 5 || 0%{?suse_version}
%defattr(-,root,root,-)
-%{_initrddir}/bcfg2-server
-%{_initrddir}/bcfg2-report-collector
-%dir %{python_sitelib}/Bcfg2
+%endif
+%ghost %attr(600,root,root) %config(noreplace) %{_sysconfdir}/bcfg2.key
+%if 0%{?fedora} >= 16 || 0%{?rhel} >= 7
+ %config(noreplace) %{_unitdir}/%{name}-server.service
+%else
+ %{_initrddir}/bcfg2-server
+ %{_initrddir}/bcfg2-report-collector
+%endif
+%config(noreplace) %{_sysconfdir}/sysconfig/bcfg2-server
+%{_sbindir}/bcfg2-*
+%dir %{_localstatedir}/lib/%{name}
%{python_sitelib}/Bcfg2/Cache.py*
%{python_sitelib}/Bcfg2/Encryption.py*
%{python_sitelib}/Bcfg2/SSLServer.py*
%{python_sitelib}/Bcfg2/Statistics.py*
-%{python_sitelib}/Bcfg2/manage.py*
%{python_sitelib}/Bcfg2/settings.py*
%{python_sitelib}/Bcfg2/Server
%{python_sitelib}/Bcfg2/Reporting
-%exclude %{python_sitelib}/Bcfg2/Server/CherryPyCore.py
-
-%{python_sitelib}/*egg-info
+%{python_sitelib}/Bcfg2/manage.py*
+%exclude %{python_sitelib}/Bcfg2/Server/CherryPyCore.py*
%dir %{_datadir}/bcfg2
-%{_datadir}/bcfg2/Hostbase
%{_datadir}/bcfg2/schemas
%{_datadir}/bcfg2/xsl-transforms
-%config(noreplace) %{_sysconfdir}/default/bcfg2-server
-%{_sbindir}/bcfg2-admin
-%{_sbindir}/bcfg2-build-reports
-%{_sbindir}/bcfg2-crypt
-%{_sbindir}/bcfg2-info
-%{_sbindir}/bcfg2-lint
-%{_sbindir}/bcfg2-repo-validate
-%{_sbindir}/bcfg2-reports
-%{_sbindir}/bcfg2-report-collector
-%{_sbindir}/bcfg2-server
-%{_sbindir}/bcfg2-yum-helper
-%{_sbindir}/bcfg2-test
+%{_datadir}/bcfg2/Hostbase
%if 0%{?suse_version}
%{_sbindir}/rcbcfg2-server
%config(noreplace) /var/adm/fillup-templates/sysconfig.bcfg2-server
%endif
%{_mandir}/man5/bcfg2-lint.conf.5*
-%{_mandir}/man8/*.8*
-%dir %{_prefix}/lib/bcfg2
-%ghost %config(noreplace,missingok) %attr(0600,root,root) %{_sysconfdir}/bcfg2.conf
+%{_mandir}/man8/bcfg2*.8*
-%doc %{_defaultdocdir}/bcfg2-server-%{version}
+%doc tools/*
+%if %{build_cherry_py}
%files server-cherrypy
+%if 0%{?rhel} == 5 || 0%{?suse_version}
%defattr(-,root,root,-)
+%endif
%{python_sitelib}/Bcfg2/Server/CherryPyCore.py
+%endif
-%files doc
-%defattr(-,root,root,-)
-%doc %{_defaultdocdir}/bcfg2-doc-%{version}
-
+# bcfg2-web package is disabled on EL5, which lacks Django
+%if 0%{?rhel} != 5
%files web
+%if 0%{?suse_version}
%defattr(-,root,root,-)
+%endif
%{_datadir}/bcfg2/reports.wsgi
%{_datadir}/bcfg2/site_media
-%dir %{apache_conf}
-%dir %{apache_conf}/conf.d
%config(noreplace) %{apache_conf}/conf.d/wsgi_bcfg2.conf
-%ghost %config(noreplace,missingok) %attr(0640,root,apache) %{_sysconfdir}/bcfg2-web.conf
-
-%post server
-# enable daemon on first install only (not on update).
-if [ $1 -eq 1 ]; then
-%if 0%{?suse_version}
- %fillup_and_insserv -f bcfg2-server
-%else
- /sbin/chkconfig --add bcfg2-server
%endif
-fi
-%preun
-%if 0%{?suse_version}
-# stop on removal (not on update).
-if [ $1 -eq 0 ]; then
- %stop_on_removal bcfg2
-fi
+%files doc
+%if 0%{?rhel} == 5 || 0%{?suse_version}
+%defattr(-,root,root,-)
%endif
+%doc build/sphinx/html/*
-%preun server
-%if 0%{?suse_version}
-if [ $1 -eq 0 ]; then
- %stop_on_removal bcfg2-server
- %stop_on_removal bcfg2-report-collector
-fi
+%files examples
+%if 0%{?rhel} == 5 || 0%{?suse_version}
+%defattr(-,root,root,-)
%endif
+%doc examples/*
-%postun
-%if 0%{?suse_version}
-if [ $1 -eq 0 ]; then
- %insserv_cleanup
-fi
-%endif
-
-%postun server
-%if 0%{?suse_version}
-if [ $1 -eq 0 ]; then
- # clean up on removal.
- %insserv_cleanup
-fi
-%endif
%changelog
-* Thu Mar 21 2013 Sol Jerome <sol.jerome@gmail.com> 1.3.1-1
+* Sat Feb 1 2014 John Morris <john@zultron.com> - 1.3.3-4
+- Disable bcfg2-web package on EL5; bz #1058427
+- Disable %%check on EL7; missing EPEL deps
+- BR: systemd to pick up _unitdir macro
+
+* Mon Jan 27 2014 Sol Jerome <sol.jerome@gmail.com> - 1.3.3-4
+- Fix BuildRequires for EPEL7's Django
+- Remove unnecessary client-side lxml dependency
+- Add Django dependency for bcfg2-web (the web package *does* require
+ Django for the database)
+- Fix OS detection for RHEL7 initscripts
+
+* Sun Dec 15 2013 John Morris <john@zultron.com> - 1.3.3-3
+- Remove unneeded Django dep in 'web' package, bz #1043229
+
+* Sun Nov 24 2013 John Morris <john@zultron.com> - 1.3.3-2
+- Fix CherryPyCore.py exclude glob to include compiled files
+- Disable server-cherrypy package build to make Fedora buildsys happy
+
+* Thu Nov 07 2013 Sol Jerome <sol.jerome@gmail.com> 1.3.3-1
- New upstream release
-* Fri Mar 15 2013 Sol Jerome <sol.jerome@gmail.com> 1.3.0-0.0
-- New upstream release
+* Sun Aug 04 2013 John Morris <john@zultron.com> - 1.3.2-2
+- Reconcile divergences with Fedora specfile, as requested by upstream
+ (equally large changes made in Fedora version to reconcile with
+ this file)
+- Python macro cleanups
+- Accommodations for OpenSUSE
+- Macros for pre and rc releases
+- %%check section
+- Move BRs to top of file
+- Rearrange lines to match Fedora
+- Group: tag tweaks
+- Startup/shutdown changes
+- Separate examples package
+- Remove %%{__install} macros; RH has backed away from those
+- Add fedora systemd units, both f16 and f18 variants :P
+ - Changes to %%post* scripts
+- Rearrange %%files sections
+
+* Wed Jul 3 2013 John Morris <john@zultron.com> - 1.3.2-1
+- Update to new upstream version 1.3.2
+- Move settings.py into server package (fixes bug reported on bcfg2-dev ML)
+- Use init scripts from redhat/scripts directory
+- Fix EL5/EL6 sphinx docs
+- Require python-inotify instead of gamin-python; recommended by upstream
+- Remove obsolete bcfg2-py27-auth.patch, accepted upstream
+- Add %%check script
+ - Hack test suite to use local copies of XMLSchema.xsd and xml.xsd
+ - Many new BRs to support %%check script
+ - Disable %%check script on EL5, where there is no python-mock package
+- Cleanups to _pre/_rc macros
+- Mark EL5 relics
+- Other minor formatting
+
+* Mon Apr 08 2013 Fabian Affolter <mail@fabian-affolter.ch> - 1.3.1-1
+- Updated to new upstream version 1.3.1
+
+* Mon Mar 18 2013 Fabian Affolter <mail@fabian-affolter.ch> - 1.3.0-1
+- Updated to new upstream version 1.3.0
+
+* Wed Feb 13 2013 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 1.3.0-0.2.pre2
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_19_Mass_Rebuild
+
+* Wed Oct 31 2012 Fabian Affolter <mail@fabian-affolter.ch> - 1.3.0-0.1.pre2
+- Updated to new upstream version 1.3.0 pre2
-* Tue Jan 29 2013 Sol Jerome <sol.jerome@gmail.com> 1.3.0-0.0rc2
-- New upstream release
+* Wed Oct 17 2012 Chris St. Pierre <chris.a.st.pierre@gmail.com> 1.3.0-0.2pre1
+- Split bcfg2-selinux into its own specfile
-* Wed Jan 09 2013 Sol Jerome <sol.jerome@gmail.com> 1.3.0-0.0rc1
-- New upstream release
+* Mon Aug 27 2012 Václav Pavlín <vpavlin@redhat.com> - 1.2.3-3
+- Scriptlets replaced with new systemd macros (#850043)
-* Tue Oct 30 2012 Sol Jerome <sol.jerome@gmail.com> 1.3.0-0.0pre2
-- New upstream release
+* Wed Jul 18 2012 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 1.2.3-2
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_18_Mass_Rebuild
-* Wed Oct 17 2012 Chris St. Pierre <chris.a.st.pierre@gmail.com> 1.3.0-0.2pre1
-- Split bcfg2-selinux into its own specfile
+* Sat Jul 07 2012 Fabian Affolter <mail@fabian-affolter.ch> - 1.2.3-1
+- Fix CVE-2012-3366
+- Updated to new upstream version 1.2.3
+
+* Tue May 01 2012 Fabian Affolter <mail@fabian-affolter.ch> - 1.2.2-2
+- python-nose is needed by bcfg2-test
+
+* Fri Apr 06 2012 Fabian Affolter <mail@fabian-affolter.ch> - 1.2.2-1
+- Updated to new upstream version 1.2.2
+
+* Sun Feb 26 2012 Fabian Affolter <mail@fabian-affolter.ch> - 1.2.1-2
+- Fixed systemd files
* Fri Sep 14 2012 Chris St. Pierre <chris.a.st.pierre@gmail.com> 1.3.0-0.1pre1
- Added -selinux subpackage
-* Fri Aug 31 2012 Sol Jerome <sol.jerome@gmail.com> 1.3.0-0.0pre1
-- New upstream release
-
* Wed Aug 15 2012 Chris St. Pierre <chris.a.st.pierre@gmail.com> 1.2.3-0.1
- Added tools/ as doc for bcfg2-server subpackage
@@ -513,15 +896,212 @@ fi
- Added openSUSE compatibility.
- Various changes to satisfy rpmlint.
+* Tue Feb 07 2012 Fabian Affolter <mail@fabian-affolter.ch> - 1.2.1-1
+- Added examples package
+- Updated to new upstream version 1.2.1
+
+* Mon Jan 02 2012 Fabian Affolter <mail@fabian-affolter.ch> - 1.2.0-6
+- Added support for systemd
+- Example subpackage
+
+* Wed Sep 07 2011 Fabian Affolter <mail@fabian-affolter.ch> - 1.2.0-5
+- Updated to new upstreadm version 1.2.0
+
+* Wed Sep 07 2011 Fabian Affolter <mail@fabian-affolter.ch> - 1.2.0-4.1.rc1
+- Updated to new upstreadm version 1.2.0rc1
+
+* Wed Jun 22 2011 Fabian Affolter <mail@fabian-affolter.ch> - 1.2.0-3.1.pre3
+- Updated to new upstreadm version 1.2.0pre3
+
+* Wed May 04 2011 Fabian Affolter <mail@fabian-affolter.ch> - 1.2.0-2.1.pre2
+- Added bcfg2-lint stuff
+- Pooled file section entries to reduce future maintainance
+- Removed Patch
+
+* Wed May 04 2011 Fabian Affolter <mail@fabian-affolter.ch> - 1.2.0-1.1.pre2
+- Updated to new upstream version 1.2.0pre2
+
+* Sun Mar 20 2011 Fabian Affolter <mail@fabian-affolter.ch> - 1.2.0-1.1.pre1
+- Added doc subpackage
+- Updated to new upstream version 1.2.0pre1
+
+* Mon Feb 07 2011 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 1.1.1-2.1
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_15_Mass_Rebuild
+
* Thu Jan 27 2011 Chris St. Pierre <chris.a.st.pierre@gmail.com> 1.2.0pre1-0.0
- Added -doc sub-package
+* Thu Nov 18 2010 Fabian Affolter <mail@fabian-affolter.ch> - 1.1.1-2
+- Added new man page
+- Updated doc section (ChangeLog is gone)
+
+* Thu Nov 18 2010 Fabian Affolter <mail@fabian-affolter.ch> - 1.1.1-1
+- Updated to new upstream version 1.1.1
+
+* Fri Nov 5 2010 Jeffrey C. Ollie <jeff@ocjtech.us> - 1.1.0-3
+- Add patch from Gordon Messmer to fix authentication on F14+ (Python 2.7)
+
+* Mon Sep 27 2010 Jeffrey C. Ollie <jeff@ocjtech.us> - 1.1.0-2
+- Update to final version
+
+* Wed Sep 15 2010 Jeffrey C. Ollie <jeff@ocjtech.us> - 1.1.0-1.3.rc5
+- Update to 1.1.0rc5:
+
+* Tue Aug 31 2010 Jeffrey C. Ollie <jeff@ocjtech.us> - 1.1.0-1.2.rc4
+- Add new YUMng driver
+
+* Wed Jul 21 2010 David Malcolm <dmalcolm@redhat.com> - 1.1.0-1.1.rc4.1
+- Rebuilt for https://fedoraproject.org/wiki/Features/Python_2.7/MassRebuild
+
+* Tue Jul 20 2010 Fabian Affolter <mail@fabian-affolter.ch> - 1.1.0-1.1.rc4
+- Added patch to fix indention
+
+* Tue Jul 20 2010 Fabian Affolter <mail@fabian-affolter.ch> - 1.1.0-0.1.rc4
+- Updated to new upstream release candidate RC4
+
* Mon Jun 21 2010 Fabian Affolter <fabian@bernewireless.net> - 1.1.0rc3-0.1
- Changed source0 in order that it works with spectool
+* Sat Jun 19 2010 Fabian Affolter <mail@fabian-affolter.ch> - 1.1.0-0.1.rc3
+- Updated to new upstream release candidate RC3
+
+* Sun May 02 2010 Fabian Affolter <mail@fabian-affolter.ch> - 1.1.0-0.2.rc1
+- Changed define to global
+- Added graphviz for the server package
+
+* Wed Apr 28 2010 Jeffrey C. Ollie <jeff@ocjtech.us> - 1.1.0-0.1.rc1
+- Update to 1.1.0rc1
+
+* Tue Apr 13 2010 Jeffrey C. Ollie <jeff@ocjtech.us> - 1.0.1-1
+- Update to final version
+
+* Fri Nov 6 2009 Jeffrey C. Ollie <jeff@ocjtech.us> - 1.0.0-2
+- Fixup the bcfg2-server init script
+
+* Fri Nov 6 2009 Jeffrey C. Ollie <jeff@ocjtech.us> - 1.0.0-1
+- Update to 1.0.0 final
+
+* Wed Nov 4 2009 Jeffrey C. Ollie <jeff@ocjtech.us> - 1.0.0-0.5.rc2
+- Only require python-ssl on EPEL
+
+* Sat Oct 31 2009 Jeffrey C. Ollie <jeff@ocjtech.us> - 1.0.0-0.4.rc2
+- Update to 1.0.0rc2
+
+* Mon Oct 26 2009 Jeffrey C. Ollie <jeff@ocjtech.us> - 1.0.0-0.3.rc1
+- Update to 1.0rc1
+
+* Fri Oct 16 2009 Jeffrey C. Ollie <jeff@ocjtech.us> - 1.0-0.2.pre5
+- Add python-ssl requirement
+
+* Tue Aug 11 2009 Jeffrey C. Ollie <jeff@ocjtech.us> - 1.0-0.1.pre5
+- Update to 1.0pre5
+
+* Fri Jul 24 2009 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 0.9.6-4
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_12_Mass_Rebuild
+
+* Mon Feb 23 2009 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 0.9.6-3
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_11_Mass_Rebuild
+
+* Sat Nov 29 2008 Ignacio Vazquez-Abrams <ivazqueznet+rpm@gmail.com> - 0.9.6-2
+- Rebuild for Python 2.6
+
+* Tue Nov 18 2008 Jeffrey C. Ollie <jeff@ocjtech.us> - 0.9.6-1
+- Update to 0.9.6 final.
+
+* Tue Oct 14 2008 Jeffrey C. Ollie <jeff@ocjtech.us> - 0.9.6-0.8.pre3
+- Update to 0.9.6pre3
+
+* Sat Aug 9 2008 Jeffrey C. Ollie <jeff@ocjtech.us> - 0.9.6-0.2.pre2
+- Update to 0.9.6pre2
+
+* Wed May 28 2008 Jeffrey C. Ollie <jeff@ocjtech.us> - 0.9.6-0.1.pre1
+- Update to 0.9.6pre1
+
+* Fri Feb 15 2008 Jeffrey C. Ollie <jeff@ocjtech.us> - 0.9.5.7-1
+- Update to 0.9.5.7.
+
+* Fri Feb 15 2008 Jeffrey C. Ollie <jeff@ocjtech.us> - 0.9.5.7-1
+- Update to 0.9.5.7.
+
+* Fri Jan 11 2008 Jeffrey C. Ollie <jeff@ocjtech.us> - 0.9.5.5-1
+- Update to 0.9.5.5
+- More egg-info entries.
+
+* Wed Jan 9 2008 Jeffrey C. Ollie <jeff@ocjtech.us> - 0.9.5.4-1
+- Update to 0.9.5.4.
+
+* Tue Jan 8 2008 Jeffrey C. Ollie <jeff@ocjtech.us> - 0.9.5.3-1
+- Update to 0.9.5.3
+- Package egg-info files.
+
+* Mon Nov 12 2007 Jeffrey C. Ollie <jeff@ocjtech.us> - 0.9.5.2-1
+- Update to 0.9.5.2
+
+* Mon Nov 12 2007 Jeffrey C. Ollie <jeff@ocjtech.us> - 0.9.5-2
+- Fix oops.
+
+* Mon Nov 12 2007 Jeffrey C. Ollie <jeff@ocjtech.us> - 0.9.5-1
+- Update to 0.9.5 final.
+
+* Mon Nov 05 2007 Jeffrey C. Ollie <jeff@ocjtech.us> - 0.9.5-0.5.pre7
+- Commit new patches to CVS.
+
+* Mon Nov 05 2007 Jeffrey C. Ollie <jeff@ocjtech.us> - 0.9.5-0.4.pre7
+- Update to 0.9.5pre7
+
+* Wed Jun 27 2007 Jeffrey C. Ollie <jeff@ocjtech.us> - 0.9.4-4
+- Oops, apply right patch
+
+* Wed Jun 27 2007 Jeffrey C. Ollie <jeff@ocjtech.us> - 0.9.4-3
+- Add patch to fix YUMng problem
+
+* Mon Jun 25 2007 Jeffrey C. Ollie <jeff@ocjtech.us> - 0.9.4-2
+- Bump revision and rebuild
+
+* Mon Jun 25 2007 Jeffrey C. Ollie <jeff@ocjtech.us> - 0.9.4-1
+- Update to 0.9.4 final
+
+* Thu Jun 21 2007 Jeffrey C. Ollie <jeff@ocjtech.us> - 0.9.4-0.1.pre4
+- Update to 0.9.4pre4
+
+* Thu Jun 14 2007 Jeffrey C. Ollie <jeff@ocjtech.us> - 0.9.4-0.1.pre3
+- Update to 0.9.4pre3
+
+* Tue Jun 12 2007 Jeffrey C. Ollie <jeff@ocjtech.us> - 0.9.4-0.1.pre2
+- Update to 0.9.4pre2
+
+* Tue May 22 2007 Jeffrey C. Ollie <jeff@ocjtech.us> - 0.9.3-2
+- Drop requires on pyOpenSSL
+- Add requires on redhat-lsb
+- (Fixes #240871)
+
+* Mon Apr 30 2007 Jeffrey C. Ollie <jeff@ocjtech.us> - 0.9.3-1
+- Update to 0.9.3
+
+* Tue Mar 20 2007 Jeffrey C. Ollie <jeff@ocjtech.us> - 0.9.2-4
+- Server needs pyOpenSSL
+
+* Wed Feb 28 2007 Jeffrey C. Ollie <jeff@ocjtech.us> - 0.9.2-3
+- Don't forget %%dir
+
+* Wed Feb 28 2007 Jeffrey C. Ollie <jeff@ocjtech.us> - 0.9.2-2
+- Fix #230478
+
+* Mon Feb 19 2007 Jeffrey C. Ollie <jeff@ocjtech.us> - 0.9.2-1
+- Update to 0.9.2
+
+* Thu Feb 8 2007 Jeffrey C. Ollie <jeff@ocjtech.us> - 0.9.1-1.d
+- Update to 0.9.1d
+
* Fri Feb 2 2007 Mike Brady <mike.brady@devnull.net.nz> 0.9.1
- Removed use of _libdir due to Red Hat x86_64 issue.
+* Tue Jan 9 2007 Jeffrey C. Ollie <jeff@ocjtech.us> - 0.8.7.3-2
+- Merge client back into base package.
+
+* Wed Dec 27 2006 Jeffrey C. Ollie <jeff@ocjtech.us> - 0.8.7.3-1
+- Update to 0.8.7.3
+
* Fri Dec 22 2006 Jeffrey C. Ollie <jeff@ocjtech.us> - 0.8.7.1-5
- Server needs client library files too so put them in main package
diff --git a/osx/Makefile b/osx/Makefile
index f25e71927..1f3564c1c 100644
--- a/osx/Makefile
+++ b/osx/Makefile
@@ -29,9 +29,9 @@ SITELIBDIR = /Library/Python/${PYVERSION}/site-packages
# an Info.plist file for packagemaker to look at for package creation
# and substitute the version strings. Major/Minor versions can only be
# integers (e.g. "1" and "00" for bcfg2 version 1.0.0.
-BCFGVER = 1.3.1
+BCFGVER = 1.3.3
MAJOR = 1
-MINOR = 31
+MINOR = 33
default: clean client
diff --git a/osx/macports/Portfile b/osx/macports/Portfile
index 45cf3dd2b..83c7f4075 100644
--- a/osx/macports/Portfile
+++ b/osx/macports/Portfile
@@ -5,7 +5,7 @@ PortSystem 1.0
PortGroup python26 1.0
name bcfg2
-version 1.3.1
+version 1.3.3
categories sysutils python
maintainers gmail.com:sol.jerome
license BSD
diff --git a/redhat/Makefile b/redhat/Makefile
deleted file mode 100644
index f8f779557..000000000
--- a/redhat/Makefile
+++ /dev/null
@@ -1,153 +0,0 @@
-# -- generic Makefile for building RPM-based packages out of source
-# code control systems (git, cvs, svn)
-#
-# $Id: Makefile 46 2007-10-24 09:14:12Z robin $
-
-.SUFFIXES:
-.PHONY: clean mrclean distclean prepclean all default
-.PHONY: rpm rpmdist buildrpm buildrpmdist
-.PHONY: buildtarball buildtargz
-.PHONY: builddir distdir prepbuildtarball
-.PHONY: cvs-export git-export svn-export test-export
-.PHONY: cvs-clean git-clean svn-clean test-clean
-
-SCM_TYPE := git
-SCM_PATH := ../
-#CVSROOT := $(shell cat 2>/dev/null src/CVS/Root)
-#SVN_PATH := $(shell svn info ${SCM_PATH} 2>/dev/null | awk '/^URL:/{print $$2}')
-#SVN_REV := $(shell svn info ${SVN_PATH} 2>/dev/null | awk '/^Last Changed Rev:/{print $$4}')
-
-PACKAGE := $(shell cat PACKAGE)
-VERSION := $(shell cat VERSION)
-RELEASE := $(shell cat RELEASE)
-BASE_VER := ${VERSION}-${RELEASE}
-CURRENT_PACKAGE := $(PACKAGE)-$(BASE_VER)
-TARBALL := $(PACKAGE)-$(VERSION).tar
-
-DIRNAME := $(shell echo $${PWD})
-DIRBASE := $(shell basename $${PWD})
-
-
-default: rpmdist
-
-# -- the "rpmdist" target will build out of the SCM, but will
-# use the user's default build settings (which in many cases
-# is exposed as an RPM repository)
-#
-rpmdist: buildrpmdist distclean
-
-buildrpmdist: specfile buildtargz
- @rpmbuild \
- -ta ./build/$(TARBALL).gz
-
-# -- the "rpm" target will build out of the SCM, but will leave
-# the resulting package in the relative ./build/ directory
-#
-rpm: buildrpm $(SCM_TYPE)-clean
-
-# add "debug" to a target to see the values of all these vars
-debug:
- echo SCM_TYPE: ${SCM_TYPE} && \
- echo SCM_PATH: ${SCM_PATH} && \
- echo SVN_PATH: ${SVN_PATH} && \
- echo SVN_REV : ${SVN_REV} && \
- echo PACKAGE : ${PACKAGE} && \
- echo VERSION : ${VERSION} && \
- echo RELEASE : ${RELEASE} && \
- echo BASE_VER: ${BASE_VER} && \
- echo CURRENT_PACKAGE: ${CURRENT_PACKAGE} && \
- echo TARBALL: ${TARBALL} && \
- echo DIRNAME: ${DIRNAME} && \
- echo DIRBASE: ${DIRBASE}
-
-buildrpm: specfile buildtargz
- @rpmbuild \
- --define "_rpmdir ./build/" \
- --define "_sourcedir ./build/" \
- --define "_srcrpmdir ./build/" \
- -ta ./build/$(TARBALL).gz
-
-buildtarball: prepbuildtarball
- @tar \
- --create \
- --directory ./build/ \
- --file ./build/$(TARBALL) \
- --exclude misc/bcfg2.spec \
- ${CURRENT_PACKAGE}
-
-buildtargz: buildtarball
- @gzip -c < ./build/$(TARBALL) > ./build/$(TARBALL).gz
-
-# This target copies files that are not in svn into the build tree
-prepbuildtarball: $(SCM_TYPE)-export
- @cp ${PACKAGE}.spec ./build/${CURRENT_PACKAGE}/redhat/ && \
- cp -R scripts ./build/${CURRENT_PACKAGE}/redhat/
-
-specfile: $(PACKAGE).spec
-
-# create the spec file from the .in file and put in the build tree
-$(PACKAGE).spec: PACKAGE VERSION RELEASE $(PACKAGE).spec.in
- @sed -e "s|@PACKAGE@|$(PACKAGE)|" \
- -e "s|@VERSION@|$(VERSION)|" \
- -e "s|@RELEASE@|$(RELEASE)|" \
- $(PACKAGE).spec.in > $@
-test-clean:
- @cd .. \
- && rm "$(CURRENT_PACKAGE)"
-
-test-export: builddir
- @cd .. \
- && ln -snvf $(DIRBASE) $(CURRENT_PACKAGE) \
- && tar \
- --create \
- --dereference \
- --to-stdout \
- --exclude "*.git*" \
- --exclude "*.svn*" \
- --exclude "*/CVS/*" \
- --exclude "$(CURRENT_PACKAGE)/build/*" \
- $(CURRENT_PACKAGE) \
- | tar \
- --extract \
- --directory $(CURRENT_PACKAGE)/build/ \
- --file -
-
-git-export: builddir prepclean
- @cd ../ && git archive --format=tar --prefix=$(CURRENT_PACKAGE)/ HEAD \
- | (cd redhat/build && tar xf -)
-
-git-clean:
- @:
-
-cvs-export: builddir prepclean
- @cd ./build/ \
- && echo CURRENT_PACKAGE: ${CURRENT_PACKAGE} \
- && echo CVSROOT: ${CVSROOT} \
- && CVSROOT=${CVSROOT} cvs export -r HEAD -d$(CURRENT_PACKAGE) ${PACKAGE}
-
-cvs-clean:
- @:
-
-svn-export: builddir prepclean
- @cd ./build/ \
- && svn export $(SVN_PATH) $(CURRENT_PACKAGE)
-
-svn-clean:
- @rm -f bcfg2.spec 2>/dev/null || :
-
-builddir:
- @mkdir -p ./build
-
-distdir:
- @mkdir -p ./dist
-
-prepclean:
- @rm -rf ./build/$(CURRENT_PACKAGE)*
-
-clean:
- @rm -rf ./build/* ./dist/* 2>/dev/null || :
-
-mrclean: clean
-
-distclean: clean $(SCM_TYPE)-clean
- @rmdir ./build/ ./dist/ 2>/dev/null || :
diff --git a/redhat/PACKAGE b/redhat/PACKAGE
deleted file mode 100644
index 5b418ae6b..000000000
--- a/redhat/PACKAGE
+++ /dev/null
@@ -1 +0,0 @@
-bcfg2
diff --git a/redhat/RELEASE b/redhat/RELEASE
deleted file mode 100644
index ba66466c2..000000000
--- a/redhat/RELEASE
+++ /dev/null
@@ -1 +0,0 @@
-0.0
diff --git a/redhat/VERSION b/redhat/VERSION
deleted file mode 100644
index 3a3cd8cc8..000000000
--- a/redhat/VERSION
+++ /dev/null
@@ -1 +0,0 @@
-1.3.1
diff --git a/redhat/bcfg2.spec.in b/redhat/bcfg2.spec.in
deleted file mode 100644
index b1cd0d097..000000000
--- a/redhat/bcfg2.spec.in
+++ /dev/null
@@ -1,324 +0,0 @@
-%if ! (0%{?fedora} > 12 || 0%{?rhel} > 5)
-%{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")}
-%endif
-
-Name: @PACKAGE@
-Version: @VERSION@
-Release: @RELEASE@
-
-%define __python python
-%{!?py_ver: %define py_ver %(%{__python} -c 'import sys;print(sys.version[0:3])')}
-%define pythonversion %{py_ver}
-%{!?_initrddir: %define _initrddir %{_sysconfdir}/rc.d/init.d}
-
-Summary: Configuration management system
-
-Group: Applications/System
-License: BSD
-URL: http://bcfg2.org
-Source0: ftp://ftp.mcs.anl.gov/pub/bcfg/%{name}-%{version}.tar.gz
-BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
-BuildArch: noarch
-
-%if 0%{?fedora} >= 8
-BuildRequires: python-setuptools-devel
-%else
-BuildRequires: python-setuptools
-%endif
-
-# RHEL 5 and 6 ship with sphinx 0.6, but sphinx 1.0 is available with
-# a different package name in EPEL.
-%if "%{_vendor}" == "redhat" && 0%{?rhel} != 0
-BuildRequires: python-sphinx10
-# the python-sphinx10 package doesn't set sys.path correctly, so we
-# have to do it for them
-%define pythonpath %(find %{python_sitelib} -name Sphinx*.egg)
-%else
-BuildRequires: python-sphinx >= 1.0
-%endif
-
-BuildRequires: python-lxml
-
-%if "%{py_ver}" == "2.4"
-Requires: python-elementtree
-%else if "%{py_ver}" < "2.5"
-Requires: python-lxml
-%endif
-Requires: python-nose
-Requires: initscripts
-Requires(post): /sbin/chkconfig
-Requires(preun): /sbin/chkconfig
-Requires(preun): /sbin/service
-Requires(postun): /sbin/service
-
-%description
-Bcfg2 helps system administrators produce a consistent, reproducible,
-and verifiable description of their environment, and offers
-visualization and reporting tools to aid in day-to-day administrative
-tasks. It is the fifth generation of configuration management tools
-developed in the Mathematics and Computer Science Division of Argonne
-National Laboratory.
-
-It is based on an operational model in which the specification can be
-used to validate and optionally change the state of clients, but in a
-feature unique to bcfg2 the client's response to the specification can
-also be used to assess the completeness of the specification. Using
-this feature, bcfg2 provides an objective measure of how good a job an
-administrator has done in specifying the configuration of client
-systems. Bcfg2 is therefore built to help administrators construct an
-accurate, comprehensive specification.
-
-Bcfg2 has been designed from the ground up to support gentle
-reconciliation between the specification and current client states. It
-is designed to gracefully cope with manual system modifications.
-
-Finally, due to the rapid pace of updates on modern networks, client
-systems are constantly changing; if required in your environment,
-Bcfg2 can enable the construction of complex change management and
-deployment strategies.
-
-%package server
-Summary: Configuration management server
-Group: System Environment/Daemons
-Requires: bcfg2 = %{version}-%{release}
-%if "%{py_ver}" < "2.6"
-Requires: python-ssl
-%endif
-Requires: python-lxml >= 0.9
-Requires: /usr/bin/openssl
-Requires: gamin-python
-Requires: python-daemon
-Requires(post): /sbin/chkconfig
-Requires(preun): /sbin/chkconfig
-Requires(preun): /sbin/service
-Requires(postun): /sbin/service
-
-%description server
-Configuration management server
-
-%package doc
-Summary: Configuration management system documentation
-Group: Documentation
-
-%description doc
-Configuration management system documentation
-
-%prep
-%setup -q -n %{name}-%{version}-%{release}
-
-# fixup some paths
-%{__perl} -pi -e 's@/etc/default@%{_sysconfdir}/sysconfig@g' debian/bcfg2.init
-%{__perl} -pi -e 's@/etc/default@%{_sysconfdir}/sysconfig@g' debian/bcfg2-server.init
-%{__perl} -pi -e 's@/etc/default@%{_sysconfdir}/sysconfig@g' tools/bcfg2-cron
-
-# don't start servers by default
-%{__perl} -pi -e 's@chkconfig: (\d+)@chkconfig: -@' debian/bcfg2.init
-%{__perl} -pi -e 's@chkconfig: (\d+)@chkconfig: -@' debian/bcfg2-server.init
-
-# get rid of extraneous shebangs
-for f in `find src/lib/Bcfg2 -name \*.py`
-do
- %{__sed} -i -e '/^#!/,1d' $f
-done
-
-%build
-%{__python} -c 'import setuptools; execfile("setup.py")' build
-
-%{?pythonpath: export PYTHONPATH="%{pythonpath}"}
-%{__python} -c 'import setuptools; execfile("setup.py")' build_sphinx
-
-%install
-rm -rf %{buildroot}
-%{__python} -c 'import setuptools; execfile("setup.py")' install --skip-build --root %{buildroot}
-
-mkdir -p %{buildroot}%{_sbindir}
-mkdir -p %{buildroot}%{_initrddir}
-mkdir -p %{buildroot}%{_sysconfdir}/cron.daily
-mkdir -p %{buildroot}%{_sysconfdir}/cron.hourly
-mkdir -p %{buildroot}%{_sysconfdir}/sysconfig
-mkdir -p %{buildroot}%{_libexecdir}
-mkdir -p %{buildroot}%{_var}/lib/bcfg2
-mkdir -p %{buildroot}%{_var}/cache/bcfg2
-mkdir -p %{buildroot}%{_defaultdocdir}/bcfg2-doc-%{version}
-
-mv %{buildroot}%{_bindir}/bcfg2* %{buildroot}%{_sbindir}
-
-install -m 755 redhat/scripts/bcfg2.init %{buildroot}%{_initrddir}/bcfg2
-install -m 755 redhat/scripts/bcfg2-server.init %{buildroot}%{_initrddir}/bcfg2-server
-install -m 755 debian/bcfg2.cron.daily %{buildroot}%{_sysconfdir}/cron.daily/bcfg2
-install -m 755 debian/bcfg2.cron.hourly %{buildroot}%{_sysconfdir}/cron.hourly/bcfg2
-install -m 755 tools/bcfg2-cron %{buildroot}%{_libexecdir}/bcfg2-cron
-
-install -m 644 debian/bcfg2.default %{buildroot}%{_sysconfdir}/sysconfig/bcfg2
-
-mv build/sphinx/html/* %{buildroot}%{_defaultdocdir}/bcfg2-doc-%{version}
-
-touch %{buildroot}%{_sysconfdir}/bcfg2.conf
-touch %{buildroot}%{_sysconfdir}/bcfg2.key
-
-%clean
-rm -rf %{buildroot}
-
-%post
-/sbin/chkconfig --add bcfg2
-
-%preun
-if [ $1 = 0 ]; then
- /sbin/service bcfg2 stop >/dev/null 2>&1 || :
- /sbin/chkconfig --del bcfg2
-fi
-
-%postun
-if [ "$1" -ge "1" ]; then
- /sbin/service bcfg2 condrestart >/dev/null 2>&1 || :
-fi
-
-%post server
-/sbin/chkconfig --add bcfg2-server
-
-%preun server
-if [ $1 = 0 ]; then
- /sbin/service bcfg2-server stop >/dev/null 2>&1 || :
- /sbin/chkconfig --del bcfg2-server
-fi
-
-%postun server
-if [ "$1" -ge "1" ]; then
- /sbin/service bcfg2-server condrestart >/dev/null 2>&1 || :
-fi
-
-%files
-%defattr(-,root,root,-)
-%doc examples LICENSE COPYRIGHT README
-
-%ghost %attr(600,root,root) %config(noreplace) %{_sysconfdir}/bcfg2.conf
-
-%config(noreplace) %{_sysconfdir}/sysconfig/bcfg2
-%{_sysconfdir}/cron.daily/bcfg2
-%{_sysconfdir}/cron.hourly/bcfg2
-
-%{_initrddir}/bcfg2
-
-%{python_sitelib}/Bcfg2*.egg-info
-%dir %{python_sitelib}/Bcfg2
-%{python_sitelib}/Bcfg2/Compat.py*
-%{python_sitelib}/Bcfg2/__init__.py*
-%{python_sitelib}/Bcfg2/Logger.py*
-%{python_sitelib}/Bcfg2/Options.py*
-%{python_sitelib}/Bcfg2/Proxy.py*
-%{python_sitelib}/Bcfg2/version.py*
-%{python_sitelib}/Bcfg2/Client
-%{python_sitelib}/Bcfg2/Component.*
-%{python_sitelib}/Bcfg2/Logger.*
-%{python_sitelib}/Bcfg2/Options.*
-%{python_sitelib}/Bcfg2/Proxy.*
-%{python_sitelib}/Bcfg2/SSLServer.*
-%{python_sitelib}/Bcfg2/Statistics.*
-
-%{_sbindir}/bcfg2
-%{_mandir}/man1/bcfg2.1*
-%{_mandir}/man5/bcfg2.conf.5*
-
-%{_libexecdir}/bcfg2-cron
-
-%dir %{_var}/cache/bcfg2
-
-%files server
-%defattr(-,root,root,-)
-
-%ghost %attr(600,root,root) %config(noreplace) %{_sysconfdir}/bcfg2.key
-
-%{_initrddir}/bcfg2-server
-
-%dir %{python_sitelib}/Bcfg2
-%{python_sitelib}/Bcfg2/Cache.py*
-%{python_sitelib}/Bcfg2/Encryption.py*
-%{python_sitelib}/Bcfg2/SSLServer.py*
-%{python_sitelib}/Bcfg2/Statistics.py*
-%{python_sitelib}/Bcfg2/manage.py*
-%{python_sitelib}/Bcfg2/settings.py*
-%{python_sitelib}/Bcfg2/Server
-
-%{_datadir}/bcfg2
-
-%{_sbindir}/bcfg2-admin
-%{_sbindir}/bcfg2-build-reports
-%{_sbindir}/bcfg2-info
-%{_sbindir}/bcfg2-ping-sweep
-%{_sbindir}/bcfg2-lint
-%{_sbindir}/bcfg2-repo-validate
-%{_sbindir}/bcfg2-reports
-%{_sbindir}/bcfg2-server
-%{_sbindir}/bcfg2-yum-helper
-%{_sbindir}/bcfg2-test
-
-%{_mandir}/man5/bcfg2-lint.conf.5*
-%{_mandir}/man8/*.8*
-
-%dir %{_var}/lib/bcfg2
-
-%files doc
-%defattr(-,root,root,-)
-%doc %{_defaultdocdir}/bcfg2-doc-%{version}
-
-%changelog
-* Thu Mar 21 2013 Sol Jerome <sol.jerome@gmail.com> 1.3.1-1
-- New upstream release
-
-* Fri Mar 15 2013 Sol Jerome <sol.jerome@gmail.com> 1.3.0-0.0
-- New upstream release
-
-* Tue Jan 29 2013 Sol Jerome <sol.jerome@gmail.com> 1.3.0-0.0rc2
-- New upstream release
-
-* Wed Jan 09 2013 Sol Jerome <sol.jerome@gmail.com> 1.3.0-0.0rc1
-- New upstream release
-
-* Tue Oct 30 2012 Sol Jerome <sol.jerome@gmail.com> 1.3.0-0.0pre2
-- New upstream release
-
-* Fri Aug 31 2012 Sol Jerome <sol.jerome@gmail.com> 1.3.0-0.0pre1
-- New upstream release
-
-* Thu Jan 27 2011 Chris St. Pierre <stpierreca@ornl.gov> 1.2.0pre1-0.0
-- Added -doc sub-package
-
-* Wed Jun 15 2009 Sol Jerome <solj@ices.utexas.edu> 1.0pre4-0.1
-- Remove python-cheetah dependency
-
-* Tue Oct 28 2008 Robin Bowes <robin@robinbowes.com> 0.9.6-0.2
-- spec file is now created dynamically so Version & Release
- can be hard-coded so SRPM can be rebuilt without the Makefile
-
-* Thu May 08 2008 Robin Bowes <robin@robinbowes.com> 0.9.6-0.1
-- Revised spec file to build directly from svn checkout using Makefile
-- copied lots of stuff from the "official" spec file
-
-* Fri Feb 2 2007 Mike Brady <mike.brady@devnull.net.nz> 0.9.1
-- Removed use of _libdir due to Red Hat x86_64 issue.
-
-* Fri Dec 22 2006 Jeffrey C. Ollie <jeff@ocjtech.us> - 0.8.7.1-5
-- Server needs client library files too so put them in main package
-
-* Wed Dec 20 2006 Jeffrey C. Ollie <jeff@ocjtech.us> - 0.8.7.1-4
-- Yes, actually we need to require openssl
-
-* Wed Dec 20 2006 Jeffrey C. Ollie <jeff@ocjtech.us> - 0.8.7.1-3
-- Don't generate SSL cert in post script, it only needs to be done on
- the server and is handled by the bcfg2-admin tool.
-- Move the /etc/bcfg2.key file to the server package
-- Don't install a sample copy of the config file, just ghost it
-- Require gamin-python for the server package
-- Don't require openssl
-- Make the client a separate package so you don't have to have the
- client if you don't want it
-
-* Wed Dec 20 2006 Jeffrey C. Ollie <jeff@ocjtech.us> - 0.8.7.1-2
-- Add more documentation
-
-* Mon Dec 18 2006 Jeffrey C. Ollie <jeff@ocjtech.us> - 0.8.7.1-1
-- First version for Fedora Extras
-
-* Fri Sep 15 2006 Narayan Desai <desai@mcs.anl.gov> - 0.8.4-1
-- Initial log
-
diff --git a/redhat/scripts/bcfg2-report-collector.init b/redhat/scripts/bcfg2-report-collector.init
index 43e875a6b..3c112006d 100755
--- a/redhat/scripts/bcfg2-report-collector.init
+++ b/redhat/scripts/bcfg2-report-collector.init
@@ -17,7 +17,7 @@
### END INIT INFO
# Include lsb functions
-. /etc//init.d/functions
+. /etc/init.d/functions
# Commonly used stuff
DAEMON=/usr/sbin/bcfg2-report-collector
@@ -25,7 +25,7 @@ PIDFILE=/var/run/bcfg2-server/bcfg2-report-collector.pid
PARAMS="-D $PIDFILE"
# Include default startup configuration if exists
-test -f "/etc/sysconfig/bcfg2-server" && . /etc/sysconfig/bcfg2-server
+test -f "/etc/sysconfig/bcfg2-report-collector" && . /etc/sysconfig/bcfg2-report-collector
# Exit if $DAEMON doesn't exist and is not executable
test -x $DAEMON || exit 5
diff --git a/redhat/systemd/bcfg2.service b/redhat/systemd/bcfg2.service
index 572391fd0..245c80cce 100644
--- a/redhat/systemd/bcfg2.service
+++ b/redhat/systemd/bcfg2.service
@@ -3,11 +3,12 @@ Description=Bcfg2 configuration client
After=syslog.target network.target
[Service]
-Type=forking
+Type=oneshot
StandardOutput=syslog
StandardError=syslog
EnvironmentFile=-/etc/sysconfig/bcfg2
-ExecStart=/usr/sbin/bcfg2 $OPTIONS
+ExecStart=/usr/sbin/bcfg2 $BCFG2_OPTIONS
+RemainAfterExit=yes
[Install]
WantedBy=multi-user.target
diff --git a/schemas/augeas.xsd b/schemas/augeas.xsd
new file mode 100644
index 000000000..df27f91cc
--- /dev/null
+++ b/schemas/augeas.xsd
@@ -0,0 +1,229 @@
+<?xml version="1.0" encoding="utf-8"?>
+<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema"
+ xmlns:py="http://genshi.edgewall.org/" xml:lang="en">
+
+ <xsd:annotation>
+ <xsd:documentation>
+ Augeas commands
+ </xsd:documentation>
+ </xsd:annotation>
+
+ <xsd:import namespace="http://genshi.edgewall.org/"
+ schemaLocation="genshi.xsd"/>
+
+ <xsd:complexType name="AugeasRemoveCommand">
+ <xsd:annotation>
+ <xsd:documentation>
+ Implementation of the Augeas ``rm`` command.
+ </xsd:documentation>
+ </xsd:annotation>
+ <xsd:attribute type="xsd:string" name="path" use="required">
+ <xsd:annotation>
+ <xsd:documentation>
+ Delete nodes (and all children) matching the given Augeas
+ path expression.
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:attribute>
+ <xsd:attributeGroup ref="py:genshiAttrs"/>
+ </xsd:complexType>
+
+ <xsd:complexType name="AugeasMoveCommand">
+ <xsd:annotation>
+ <xsd:documentation>
+ Implementation of the Augeas ``mv`` command.
+ </xsd:documentation>
+ </xsd:annotation>
+ <xsd:attribute type="xsd:string" name="source" use="required">
+ <xsd:annotation>
+ <xsd:documentation>
+ Move the node matching this path expression. ``source``
+ must match exactly one node.
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:attribute>
+ <xsd:attribute type="xsd:string" name="destination" use="required">
+ <xsd:annotation>
+ <xsd:documentation>
+ Move the node to this location. ``destination`` must match
+ either zero or one nodes.
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:attribute>
+ <xsd:attributeGroup ref="py:genshiAttrs"/>
+ </xsd:complexType>
+
+ <xsd:complexType name="AugeasSetCommand">
+ <xsd:annotation>
+ <xsd:documentation>
+ Implementation of the Augeas ``set`` command.
+ </xsd:documentation>
+ </xsd:annotation>
+ <xsd:attribute type="xsd:string" name="path" use="required">
+ <xsd:annotation>
+ <xsd:documentation>
+ Path to set the value for. If the path does not exist, it
+ and all of its ancestors will be created.
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:attribute>
+ <xsd:attribute type="xsd:string" name="value" use="required">
+ <xsd:annotation>
+ <xsd:documentation>
+ Value to set.
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:attribute>
+ <xsd:attributeGroup ref="py:genshiAttrs"/>
+ </xsd:complexType>
+
+ <xsd:complexType name="AugeasClearCommand">
+ <xsd:annotation>
+ <xsd:documentation>
+ Implementation of the Augeas ``clear`` command.
+ </xsd:documentation>
+ </xsd:annotation>
+ <xsd:attribute type="xsd:string" name="path" use="required">
+ <xsd:annotation>
+ <xsd:documentation>
+ Path whose value will be set to ``NULL``. If the path does
+ not exist, it and all of its ancestors will be created.
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:attribute>
+ <xsd:attributeGroup ref="py:genshiAttrs"/>
+ </xsd:complexType>
+
+ <xsd:complexType name="AugeasSetMultiCommand">
+ <xsd:annotation>
+ <xsd:documentation>
+ Set multiple node values at once.
+ </xsd:documentation>
+ </xsd:annotation>
+ <xsd:attribute type="xsd:string" name="base" use="required">
+ <xsd:annotation>
+ <xsd:documentation>
+ The base path.
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:attribute>
+ <xsd:attribute type="xsd:string" name="sub" use="required">
+ <xsd:annotation>
+ <xsd:documentation>
+ ``sub`` will be used as an expression relative to each node
+ that matches the :xml:attribute:`AugeasSetMultiCommand:base`
+ expression.
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:attribute>
+ <xsd:attribute type="xsd:string" name="value" use="required">
+ <xsd:annotation>
+ <xsd:documentation>
+ The value to set on all nodes that match
+ :xml:attribute:`AugeasSetMultiCommand:sub` relative to each
+ node matching :xml:attribute:`AugeasSetMultiCommand:base`.
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:attribute>
+ <xsd:attributeGroup ref="py:genshiAttrs"/>
+ </xsd:complexType>
+
+ <xsd:simpleType name="AugeasWhenEnum">
+ <xsd:restriction base="xsd:string">
+ <xsd:enumeration value="before"/>
+ <xsd:enumeration value="after"/>
+ </xsd:restriction>
+ </xsd:simpleType>
+
+ <xsd:complexType name="AugeasInsertCommand">
+ <xsd:annotation>
+ <xsd:documentation>
+ Implementation of the Augeas ``ins`` command.
+ </xsd:documentation>
+ </xsd:annotation>
+ <xsd:attribute type="xsd:string" name="path" use="required">
+ <xsd:annotation>
+ <xsd:documentation>
+ The path to a node that will be the sibling of the new node.
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:attribute>
+ <xsd:attribute type="xsd:string" name="label" use="required">
+ <xsd:annotation>
+ <xsd:documentation>
+ The label of the new node to be created.
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:attribute>
+ <xsd:attribute type="AugeasWhenEnum" name="where" default="before">
+ <xsd:annotation>
+ <xsd:documentation>
+ Where to create the node: ``before`` or ``after`` the
+ sibling given in :xml:attribute:`AugeasInsertCommand:path`.
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:attribute>
+ <xsd:attributeGroup ref="py:genshiAttrs"/>
+ </xsd:complexType>
+
+ <xsd:group name="augeasCommands">
+ <xsd:annotation>
+ <xsd:documentation>
+ All available Augeas commands.
+ </xsd:documentation>
+ </xsd:annotation>
+ <xsd:choice>
+ <xsd:element name="Initial" type="xsd:string">
+ <xsd:annotation>
+ <xsd:documentation>
+ Specify initial content for a file, which will be created
+ before Augeas commands are applied if a file doesn't
+ exist.
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:element>
+ <xsd:element name="Remove" type="AugeasRemoveCommand">
+ <xsd:annotation>
+ <xsd:documentation>
+ Implementation of the Augeas ``rm`` command.
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:element>
+ <xsd:element name="Move" type="AugeasMoveCommand">
+ <xsd:annotation>
+ <xsd:documentation>
+ Implementation of the Augeas ``mv`` command.
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:element>
+ <xsd:element name="Set" type="AugeasSetCommand">
+ <xsd:annotation>
+ <xsd:documentation>
+ Implementation of the Augeas ``set`` command.
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:element>
+ <xsd:element name="Clear" type="AugeasClearCommand">
+ <xsd:annotation>
+ <xsd:documentation>
+ Implementation of the Augeas ``clear`` command.
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:element>
+ <xsd:element name="SetMulti" type="AugeasSetMultiCommand">
+ <xsd:annotation>
+ <xsd:documentation>
+ Set multiple node values at once.
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:element>
+ <xsd:element name="Insert" type="AugeasInsertCommand">
+ <xsd:annotation>
+ <xsd:documentation>
+ Implementation of the Augeas ``ins`` command.
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:element>
+ </xsd:choice>
+ </xsd:group>
+</xsd:schema>
diff --git a/schemas/authorizedkeys.xsd b/schemas/authorizedkeys.xsd
index 848f99bae..e59c964f6 100644
--- a/schemas/authorizedkeys.xsd
+++ b/schemas/authorizedkeys.xsd
@@ -42,6 +42,43 @@
</xsd:attribute>
</xsd:complexType>
+ <xsd:complexType name="OptionContainerType">
+ <xsd:annotation>
+ <xsd:documentation>
+ An **OptionContainerType** is a tag used to provide logic.
+ Child entries of an OptionContainerType tag only apply to
+ machines that match the condition specified -- either
+ membership in a group, or a matching client name.
+ :xml:attribute:`OptionContainerType:negate` can be set to
+ negate the sense of the match.
+ </xsd:documentation>
+ </xsd:annotation>
+ <xsd:choice minOccurs="0" maxOccurs="unbounded">
+ <xsd:element name="Group" type="OptionContainerType"/>
+ <xsd:element name="Client" type="OptionContainerType"/>
+ <xsd:element name="Option" type="AuthorizedKeysOptionType"/>
+ <xsd:element name="Params" type="AuthorizedKeysParamsType"/>
+ </xsd:choice>
+ <xsd:attribute name='name' type='xsd:string'>
+ <xsd:annotation>
+ <xsd:documentation>
+ The name of the client or group to match on. Child entries
+ will only apply to this client or group (unless
+ :xml:attribute:`OptionContainerType:negate` is set).
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:attribute>
+ <xsd:attribute name='negate' type='xsd:boolean'>
+ <xsd:annotation>
+ <xsd:documentation>
+ Negate the sense of the match, so that child entries only
+ apply to a client if it is not a member of the given group
+ or does not have the given name.
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:attribute>
+ </xsd:complexType>
+
<xsd:complexType name="AllowType" mixed="true">
<xsd:annotation>
<xsd:documentation>
@@ -50,6 +87,9 @@
</xsd:documentation>
</xsd:annotation>
<xsd:choice minOccurs="0" maxOccurs="unbounded">
+ <xsd:element name="Group" type="OptionContainerType"/>
+ <xsd:element name="Client" type="OptionContainerType"/>
+ <xsd:element name="Option" type="AuthorizedKeysOptionType"/>
<xsd:element name="Params" type="AuthorizedKeysParamsType"/>
</xsd:choice>
<xsd:attribute name="from" type="xsd:string">
@@ -68,6 +108,15 @@
</xsd:documentation>
</xsd:annotation>
</xsd:attribute>
+ <xsd:attribute name="category" type="xsd:string">
+ <xsd:annotation>
+ <xsd:documentation>
+ Use a public key specific to the group in the given
+ category, instead of the category specified in
+ ``bcfg2.conf``.
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:attribute>
<xsd:attribute name="host" type="xsd:string">
<xsd:annotation>
<xsd:documentation>
@@ -77,12 +126,36 @@
</xsd:attribute>
</xsd:complexType>
+ <xsd:complexType name="AuthorizedKeysOptionType">
+ <xsd:annotation>
+ <xsd:documentation>
+ Specify options for public key authentication and connection.
+ See :manpage:`sshd(8)` for details on allowable options.
+ </xsd:documentation>
+ </xsd:annotation>
+ <xsd:attribute name="name" type="xsd:string" use="required">
+ <xsd:annotation>
+ <xsd:documentation>
+ The name of the sshd option.
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:attribute>
+ <xsd:attribute name="value" type="xsd:string">
+ <xsd:annotation>
+ <xsd:documentation>
+ The value of the sshd option. This can be omitted for
+ options that take no value.
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:attribute>
+ </xsd:complexType>
+
<xsd:complexType name="AuthorizedKeysParamsType">
<xsd:annotation>
<xsd:documentation>
- Specify parameters for public key authentication and
- connection. See :manpage:`sshd(8)` for details on allowable
- parameters.
+ **Deprecated** way to specify options for public key
+ authentication and connection. See :manpage:`sshd(8)` for
+ details on allowable parameters.
</xsd:documentation>
</xsd:annotation>
<xsd:anyAttribute processContents="lax"/>
diff --git a/schemas/awstags.xsd b/schemas/awstags.xsd
new file mode 100644
index 000000000..72be0366f
--- /dev/null
+++ b/schemas/awstags.xsd
@@ -0,0 +1,73 @@
+<?xml version="1.0" encoding="utf-8"?>
+<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema" xml:lang="en">
+
+ <xsd:annotation>
+ <xsd:documentation>
+ :ref:`AWSTags &lt;server-plugins-connectors-awstags&gt;` config
+ schema for bcfg2
+ </xsd:documentation>
+ </xsd:annotation>
+
+ <xsd:import namespace="http://www.w3.org/XML/1998/namespace"
+ schemaLocation="xml.xsd"/>
+
+ <xsd:complexType name="TagType">
+ <xsd:choice minOccurs="1" maxOccurs="unbounded">
+ <xsd:element name="Group" type="xsd:string" minOccurs="1"
+ maxOccurs="unbounded">
+ <xsd:annotation>
+ <xsd:documentation>
+ The group to assign to machines with tags that match the
+ enclosing Tag expression. More than one group can be
+ specified.
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:element>
+ </xsd:choice>
+ <xsd:attribute name="name" type="xsd:string" use="required">
+ <xsd:annotation>
+ <xsd:documentation>
+ The name pattern to match against. This is a regular
+ expression. It is not anchored.
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:attribute>
+ <xsd:attribute name="value" type="xsd:string">
+ <xsd:annotation>
+ <xsd:documentation>
+ The value pattern to match against. This is a regular
+ expression. It is not anchored.
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:attribute>
+ </xsd:complexType>
+
+ <xsd:complexType name="AWSTagsType">
+ <xsd:annotation>
+ <xsd:documentation>
+ Top-level tag for ``AWSTags/config.xml``.
+ </xsd:documentation>
+ </xsd:annotation>
+ <xsd:choice minOccurs="1" maxOccurs="unbounded">
+ <xsd:element name="Tag" type="TagType">
+ <xsd:annotation>
+ <xsd:documentation>
+ Representation of a pattern that matches AWS tags. Tags can be
+ matched in one of two ways:
+
+ * If only :xml:attribute:`TagType:name` is specified, then
+ AWSTags will only look for a tag with a matching name, and
+ the value of tags is ignored.
+ * If both :xml:attribute:`TagType:name` and
+ :xml:attribute:`TagType:value` are specified, a tag must
+ have a matching name *and* a matching value.
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:element>
+ <xsd:element name="AWSTags" type="AWSTagsType"/>
+ </xsd:choice>
+ <xsd:attribute ref="xml:base"/>
+ </xsd:complexType>
+
+ <xsd:element name="AWSTags" type="AWSTagsType"/>
+</xsd:schema>
diff --git a/schemas/servicetype.xsd b/schemas/servicetype.xsd
index 4d5ac7c31..4c7e1b803 100644
--- a/schemas/servicetype.xsd
+++ b/schemas/servicetype.xsd
@@ -34,12 +34,21 @@
</xsd:documentation>
</xsd:annotation>
</xsd:attribute>
+ <xsd:attribute name="bootstatus" type="BootStatusEnum" default="off">
+ <xsd:annotation>
+ <xsd:documentation>
+ Whether the service should start at boot. The default value
+ corresponds to the value of the status attribute.
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:attribute>
<xsd:attribute name="status" type="StatusEnum" default="off">
<xsd:annotation>
<xsd:documentation>
- Whether the service should start at boot. If this is set to
- "ignore", then the boot-time status of the service will not
- be checked.
+ Whether the service should be on or off when the bcfg2 client
+ is run. This attribute may have different behavior depending
+ on the characteristics of the client tool. If set to "ignore",
+ then the status of the service will not be checked.
</xsd:documentation>
</xsd:annotation>
</xsd:attribute>
diff --git a/schemas/types.xsd b/schemas/types.xsd
index 05bf674ad..836cfa38e 100644
--- a/schemas/types.xsd
+++ b/schemas/types.xsd
@@ -9,6 +9,7 @@
</xsd:annotation>
<xsd:include schemaLocation="selinux.xsd"/>
+ <xsd:include schemaLocation="augeas.xsd"/>
<xsd:import namespace="http://genshi.edgewall.org/"
schemaLocation="genshi.xsd"/>
@@ -41,6 +42,7 @@
<xsd:simpleType name='PathTypeEnum'>
<xsd:restriction base='xsd:string'>
+ <xsd:enumeration value='augeas' />
<xsd:enumeration value='device' />
<xsd:enumeration value='directory' />
<xsd:enumeration value='file' />
@@ -53,6 +55,13 @@
</xsd:restriction>
</xsd:simpleType>
+ <xsd:simpleType name='BootStatusEnum'>
+ <xsd:restriction base='xsd:string'>
+ <xsd:enumeration value='on'/>
+ <xsd:enumeration value='off'/>
+ </xsd:restriction>
+ </xsd:simpleType>
+
<xsd:simpleType name='StatusEnum'>
<xsd:restriction base='xsd:string'>
<xsd:enumeration value='on'/>
@@ -112,24 +121,25 @@
</xsd:documentation>
</xsd:annotation>
- <xsd:attribute type='ActionTimingEnum' name='timing' use='required'>
+ <xsd:attribute type='ActionTimingEnum' name='timing'>
<xsd:annotation>
<xsd:documentation>
- When the action is run.
+ When the action is run. Actions with "pre" timing are run
+ after important entries have been installed and before
+ bundle entries are installed. Actions with "post" timing
+ are run after bundle entries are installed.
</xsd:documentation>
</xsd:annotation>
</xsd:attribute>
- <xsd:attribute type='ActionWhenEnum' name='when' use='required'>
+ <xsd:attribute type='ActionWhenEnum' name='when'>
<xsd:annotation>
<xsd:documentation>
If the action is always run, or is only run when a bundle
- has been modified. Actions that run before bundle
- installation ("pre" and "both") ignore the setting of
- ``when`` and are always run regardless.
+ has been modified.
</xsd:documentation>
</xsd:annotation>
</xsd:attribute>
- <xsd:attribute type='ActionStatusEnum' name='status' use='required'>
+ <xsd:attribute type='ActionStatusEnum' name='status'>
<xsd:annotation>
<xsd:documentation>
Whether or not to check the return code of the action. If
@@ -155,8 +165,16 @@
<xsd:attribute type='xsd:string' name='command' use='required'>
<xsd:annotation>
<xsd:documentation>
- The command to run. The command is executed within a shell,
- so flow control and other shell-specific things can be used.
+ The command to run.
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:attribute>
+ <xsd:attribute type='xsd:boolean' name='shell'>
+ <xsd:annotation>
+ <xsd:documentation>
+ Whether the command string should be executeed within a shell.
+ If enabled flow control and other shell-specific things can
+ be used.
</xsd:documentation>
</xsd:annotation>
</xsd:attribute>
@@ -251,6 +269,8 @@
<xsd:choice minOccurs='0' maxOccurs='unbounded'>
<xsd:element name='ACL' type='ACLType'/>
+ <xsd:group ref="augeasCommands"/>
+ <xsd:group ref="py:genshiElements"/>
</xsd:choice>
<xsd:attribute type="PathTypeEnum" name="type">
<xsd:annotation>
@@ -320,6 +340,13 @@
</xsd:documentation>
</xsd:annotation>
</xsd:attribute>
+ <xsd:attribute type="xsd:boolean" name="important">
+ <xsd:annotation>
+ <xsd:documentation>
+ Important entries are installed first during client execution.
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:attribute>
<xsd:attribute type="xsd:boolean" name="recursive">
<xsd:annotation>
<xsd:documentation>
@@ -371,6 +398,14 @@
</xsd:documentation>
</xsd:annotation>
</xsd:attribute>
+ <xsd:attribute type="xsd:token" name="lens">
+ <xsd:annotation>
+ <xsd:documentation>
+ The Augeas lens to use when editing files in a non-standard
+ (according to Augeas) location.
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:attribute>
<xsd:attributeGroup ref="py:genshiAttrs"/>
</xsd:complexType>
diff --git a/solaris-ips/MANIFEST.bcfg2-server.header b/solaris-ips/MANIFEST.bcfg2-server.header
index efa11181f..59929fcfa 100644
--- a/solaris-ips/MANIFEST.bcfg2-server.header
+++ b/solaris-ips/MANIFEST.bcfg2-server.header
@@ -1,5 +1,4 @@
license ../../LICENSE license=simplified_bsd
set name=description value="Configuration management server"
set name=pkg.summary value="Configuration management server"
-set name=pkg.fmri value="pkg://bcfg2/bcfg2-server@1.3.1"
-
+set name=pkg.fmri value="pkg://bcfg2/bcfg2-server@1.3.3"
diff --git a/solaris-ips/MANIFEST.bcfg2.header b/solaris-ips/MANIFEST.bcfg2.header
index 8358aafca..5f48a60a1 100644
--- a/solaris-ips/MANIFEST.bcfg2.header
+++ b/solaris-ips/MANIFEST.bcfg2.header
@@ -1,6 +1,5 @@
license ../../LICENSE license=simplified_bsd
set name=description value="Configuration management client"
set name=pkg.summary value="Configuration management client"
-set name=pkg.fmri value="pkg://bcfg2/bcfg2@1.3.1"
-
+set name=pkg.fmri value="pkg://bcfg2/bcfg2@1.3.3"
file usr/bin/bcfg2 group=bin mode=0755 owner=root path=usr/bin/bcfg2
diff --git a/solaris-ips/Makefile b/solaris-ips/Makefile
index 343150dc5..71523f48e 100644
--- a/solaris-ips/Makefile
+++ b/solaris-ips/Makefile
@@ -1,6 +1,6 @@
#!/usr/bin/gmake
-VERS=1.2.4-1
+VERS=1.3.3-1
PYVERSION := $(shell python -c "import sys; print sys.version[0:3]")
default: clean package
diff --git a/solaris-ips/pkginfo.bcfg2 b/solaris-ips/pkginfo.bcfg2
index 90c628c53..00483f961 100644
--- a/solaris-ips/pkginfo.bcfg2
+++ b/solaris-ips/pkginfo.bcfg2
@@ -1,7 +1,7 @@
PKG="SCbcfg2"
NAME="bcfg2"
ARCH="sparc"
-VERSION="1.2.4"
+VERSION="1.3.3"
CATEGORY="application"
VENDOR="Argonne National Labratory"
EMAIL="bcfg-dev@mcs.anl.gov"
diff --git a/solaris-ips/pkginfo.bcfg2-server b/solaris-ips/pkginfo.bcfg2-server
index 0e865522c..ecc5e72c1 100644
--- a/solaris-ips/pkginfo.bcfg2-server
+++ b/solaris-ips/pkginfo.bcfg2-server
@@ -1,7 +1,7 @@
PKG="SCbcfg2-server"
NAME="bcfg2-server"
ARCH="sparc"
-VERSION="1.2.4"
+VERSION="1.3.3"
CATEGORY="application"
VENDOR="Argonne National Labratory"
EMAIL="bcfg-dev@mcs.anl.gov"
diff --git a/solaris/Makefile b/solaris/Makefile
index fd2c254bb..3b367ef71 100644
--- a/solaris/Makefile
+++ b/solaris/Makefile
@@ -1,7 +1,7 @@
#!/usr/sfw/bin/gmake
PYTHON="/usr/local/bin/python"
-VERS=1.3.1-1
+VERS=1.3.3-1
PYVERSION := $(shell $(PYTHON) -c "import sys; print sys.version[0:3]")
default: clean package
diff --git a/solaris/pkginfo.bcfg2 b/solaris/pkginfo.bcfg2
index 2bf3abaf5..00483f961 100644
--- a/solaris/pkginfo.bcfg2
+++ b/solaris/pkginfo.bcfg2
@@ -1,7 +1,7 @@
PKG="SCbcfg2"
NAME="bcfg2"
ARCH="sparc"
-VERSION="1.3.1"
+VERSION="1.3.3"
CATEGORY="application"
VENDOR="Argonne National Labratory"
EMAIL="bcfg-dev@mcs.anl.gov"
diff --git a/solaris/pkginfo.bcfg2-server b/solaris/pkginfo.bcfg2-server
index 4425220c2..ecc5e72c1 100644
--- a/solaris/pkginfo.bcfg2-server
+++ b/solaris/pkginfo.bcfg2-server
@@ -1,7 +1,7 @@
PKG="SCbcfg2-server"
NAME="bcfg2-server"
ARCH="sparc"
-VERSION="1.3.1"
+VERSION="1.3.3"
CATEGORY="application"
VENDOR="Argonne National Labratory"
EMAIL="bcfg-dev@mcs.anl.gov"
diff --git a/src/lib/Bcfg2/Client/Frame.py b/src/lib/Bcfg2/Client/Frame.py
index d30708e83..ad718749e 100644
--- a/src/lib/Bcfg2/Client/Frame.py
+++ b/src/lib/Bcfg2/Client/Frame.py
@@ -1,6 +1,7 @@
""" Frame is the Client Framework that verifies and installs entries,
and generates statistics. """
+import copy
import time
import fnmatch
import logging
@@ -97,8 +98,8 @@ class Frame(object):
self.logger.warning(deprecated)
experimental = [tool.name for tool in self.tools if tool.experimental]
if experimental:
- self.logger.warning("Loaded experimental tool drivers:")
- self.logger.warning(experimental)
+ self.logger.info("Loaded experimental tool drivers:")
+ self.logger.info(experimental)
# find entries not handled by any tools
self.unhandled = [entry for struct in config
@@ -207,7 +208,15 @@ class Frame(object):
# take care of important entries first
if not self.dryrun:
- for parent in self.config.findall(".//Path/.."):
+ parent_map = dict((c, p)
+ for p in self.config.getiterator()
+ for c in p)
+ for cfile in self.config.findall(".//Path"):
+ if (cfile.get('name') not in self.__important__ or
+ cfile.get('type') != 'file' or
+ cfile not in self.whitelist):
+ continue
+ parent = parent_map[cfile]
if ((parent.tag == "Bundle" and
((self.setup['bundle'] and
parent.get("name") not in self.setup['bundle']) or
@@ -216,15 +225,9 @@ class Frame(object):
(parent.tag == "Independent" and
(self.setup['bundle'] or self.setup['skipindep']))):
continue
- for cfile in parent.findall("./Path"):
- if (cfile.get('name') not in self.__important__ or
- cfile.get('type') != 'file' or
- cfile not in self.whitelist):
- continue
- tools = [t for t in self.tools
- if t.handlesEntry(cfile) and t.canVerify(cfile)]
- if not tools:
- continue
+ tools = [t for t in self.tools
+ if t.handlesEntry(cfile) and t.canVerify(cfile)]
+ if tools:
if (self.setup['interactive'] and not
self.promptFilter("Install %s: %s? (y/N):", [cfile])):
self.whitelist.remove(cfile)
@@ -326,11 +329,13 @@ class Frame(object):
if bundle.tag != 'Bundle':
continue
bmodified = len([item for item in bundle
- if item in self.whitelist])
+ if item in self.whitelist or
+ item in self.modified])
actions = [a for a in bundle.findall('./Action')
if (a.get('timing') != 'post' and
(bmodified or a.get('when') == 'always'))]
- # now we process all "always actions"
+ # now we process all "pre" and "both" actions that are either
+ # always or the bundle has been modified
if self.setup['interactive']:
self.promptFilter(iprompt, actions)
self.DispatchInstallCalls(actions)
@@ -520,7 +525,7 @@ class Frame(object):
container = Bcfg2.Client.XML.SubElement(stats, ename)
for item in data:
item.set('qtext', '')
- container.append(item)
+ container.append(copy.deepcopy(item))
item.text = None
timeinfo = Bcfg2.Client.XML.Element("OpStamps")
diff --git a/src/lib/Bcfg2/Client/Tools/Action.py b/src/lib/Bcfg2/Client/Tools/Action.py
index da4412b1d..0166e4c00 100644
--- a/src/lib/Bcfg2/Client/Tools/Action.py
+++ b/src/lib/Bcfg2/Client/Tools/Action.py
@@ -32,10 +32,17 @@ class Action(Bcfg2.Client.Tools.Tool):
def RunAction(self, entry):
"""This method handles command execution and status return."""
+ shell = False
+ shell_string = ''
+ if entry.get('shell', 'false') == 'true':
+ shell = True
+ shell_string = '(in shell) '
+
if not self.setup['dryrun']:
if self.setup['interactive']:
- prompt = ('Run Action %s, %s: (y/N): ' %
- (entry.get('name'), entry.get('command')))
+ prompt = ('Run Action %s%s, %s: (y/N): ' %
+ (shell_string, entry.get('name'),
+ entry.get('command')))
# flush input buffer
while len(select.select([sys.stdin.fileno()], [], [],
0.0)[0]) > 0:
@@ -48,8 +55,9 @@ class Action(Bcfg2.Client.Tools.Tool):
self.logger.debug("Action: Deferring execution of %s due "
"to build mode" % entry.get('command'))
return False
- self.logger.debug("Running Action %s" % (entry.get('name')))
- rv = self.cmd.run(entry.get('command'))
+ self.logger.debug("Running Action %s %s" %
+ (shell_string, entry.get('name')))
+ rv = self.cmd.run(entry.get('command'), shell=shell)
self.logger.debug("Action: %s got return code %s" %
(entry.get('command'), rv.retval))
entry.set('rc', str(rv.retval))
diff --git a/src/lib/Bcfg2/Client/Tools/Chkconfig.py b/src/lib/Bcfg2/Client/Tools/Chkconfig.py
index 1fce5515b..4833f3f68 100644
--- a/src/lib/Bcfg2/Client/Tools/Chkconfig.py
+++ b/src/lib/Bcfg2/Client/Tools/Chkconfig.py
@@ -19,26 +19,22 @@ class Chkconfig(Bcfg2.Client.Tools.SvcTool):
def get_svc_command(self, service, action):
return "/sbin/service %s %s" % (service.get('name'), action)
- def VerifyService(self, entry, _):
- """Verify Service status for entry."""
- entry.set('target_status', entry.get('status'))
- if entry.get('status') == 'ignore':
- return True
-
+ def verify_bootstatus(self, entry, bootstatus):
+ """Verify bootstatus for entry."""
rv = self.cmd.run("/sbin/chkconfig --list %s " % entry.get('name'))
if rv.success:
srvdata = rv.stdout.splitlines()[0].split()
else:
# service not installed
- entry.set('current_status', 'off')
+ entry.set('current_bootstatus', 'service not installed')
return False
if len(srvdata) == 2:
# This is an xinetd service
- if entry.get('status') == srvdata[1]:
+ if bootstatus == srvdata[1]:
return True
else:
- entry.set('current_status', srvdata[1])
+ entry.set('current_bootstatus', srvdata[1])
return False
try:
@@ -47,46 +43,81 @@ class Chkconfig(Bcfg2.Client.Tools.SvcTool):
except IndexError:
onlevels = []
- pstatus = self.check_service(entry)
- if entry.get('status') == 'on':
- status = (len(onlevels) > 0 and pstatus)
+ if bootstatus == 'on':
+ current_bootstatus = (len(onlevels) > 0)
else:
- status = (len(onlevels) == 0 and not pstatus)
+ current_bootstatus = (len(onlevels) == 0)
+ return current_bootstatus
+
+ def VerifyService(self, entry, _):
+ """Verify Service status for entry."""
+ entry.set('target_status', entry.get('status')) # for reporting
+ bootstatus = self.get_bootstatus(entry)
+ if bootstatus is None:
+ return True
+ current_bootstatus = self.verify_bootstatus(entry, bootstatus)
- if not status:
+ if entry.get('status') == 'ignore':
+ # 'ignore' should verify
+ current_svcstatus = True
+ svcstatus = True
+ else:
+ svcstatus = self.check_service(entry)
if entry.get('status') == 'on':
- entry.set('current_status', 'off')
- else:
- entry.set('current_status', 'on')
- return status
+ if svcstatus:
+ current_svcstatus = True
+ else:
+ current_svcstatus = False
+ elif entry.get('status') == 'off':
+ if svcstatus:
+ current_svcstatus = False
+ else:
+ current_svcstatus = True
+
+ if svcstatus:
+ entry.set('current_status', 'on')
+ else:
+ entry.set('current_status', 'off')
+
+ return current_bootstatus and current_svcstatus
def InstallService(self, entry):
"""Install Service entry."""
- rcmd = "/sbin/chkconfig %s %s"
- self.cmd.run("/sbin/chkconfig --add %s" % (entry.attrib['name']))
+ self.cmd.run("/sbin/chkconfig --add %s" % (entry.get('name')))
self.logger.info("Installing Service %s" % (entry.get('name')))
- rv = True
- if (entry.get('status') == 'off' or
- self.setup["servicemode"] == "build"):
- rv &= self.cmd.run((rcmd + " --level 0123456") %
- (entry.get('name'),
- entry.get('status'))).success
- if entry.get("current_status") == "on" and \
- self.setup["servicemode"] != "disabled":
- rv &= self.stop_service(entry).success
+ bootstatus = self.get_bootstatus(entry)
+ if bootstatus is not None:
+ if bootstatus == 'on':
+ # make sure service is enabled on boot
+ bootcmd = '/sbin/chkconfig %s %s' % \
+ (entry.get('name'), bootstatus)
+ elif bootstatus == 'off':
+ # make sure service is disabled on boot
+ bootcmd = '/sbin/chkconfig %s %s' % (entry.get('name'),
+ bootstatus)
+ bootcmdrv = self.cmd.run(bootcmd).success
+ if self.setup['servicemode'] == 'disabled':
+ # 'disabled' means we don't attempt to modify running svcs
+ return bootcmdrv
+ buildmode = self.setup['servicemode'] == 'build'
+ if (entry.get('status') == 'on' and not buildmode) and \
+ entry.get('current_status') == 'off':
+ svccmdrv = self.start_service(entry)
+ elif (entry.get('status') == 'off' or buildmode) and \
+ entry.get('current_status') == 'on':
+ svccmdrv = self.stop_service(entry)
+ else:
+ svccmdrv = True # ignore status attribute
+ return bootcmdrv and svccmdrv
else:
- rv &= self.cmd.run(rcmd % (entry.get('name'),
- entry.get('status'))).success
- if entry.get("current_status") == "off" and \
- self.setup["servicemode"] != "disabled":
- rv &= self.start_service(entry).success
- return rv
+ # when bootstatus is 'None', status == 'ignore'
+ return True
def FindExtra(self):
"""Locate extra chkconfig Services."""
allsrv = [line.split()[0]
- for line in self.cmd.run("/sbin/chkconfig",
- "--list").stdout.splitlines()
+ for line in
+ self.cmd.run("/sbin/chkconfig --list").stdout.splitlines()
if ":on" in line]
self.logger.debug('Found active services:')
self.logger.debug(allsrv)
diff --git a/src/lib/Bcfg2/Client/Tools/DebInit.py b/src/lib/Bcfg2/Client/Tools/DebInit.py
index d916b1662..b544e44d4 100644
--- a/src/lib/Bcfg2/Client/Tools/DebInit.py
+++ b/src/lib/Bcfg2/Client/Tools/DebInit.py
@@ -18,13 +18,11 @@ class DebInit(Bcfg2.Client.Tools.SvcTool):
svcre = \
re.compile(r'/etc/.*/(?P<action>[SK])(?P<sequence>\d+)(?P<name>\S+)')
- # implement entry (Verify|Install) ops
- def VerifyService(self, entry, _):
- """Verify Service status for entry."""
-
- if entry.get('status') == 'ignore':
- return True
+ def get_svc_command(self, service, action):
+ return '/usr/sbin/invoke-rc.d %s %s' % (service.get('name'), action)
+ def verify_bootstatus(self, entry, bootstatus):
+ """Verify bootstatus for entry."""
rawfiles = glob.glob("/etc/rc*.d/[SK]*%s" % (entry.get('name')))
files = []
@@ -54,9 +52,9 @@ class DebInit(Bcfg2.Client.Tools.SvcTool):
continue
if match.group('name') == entry.get('name'):
files.append(filename)
- if entry.get('status') == 'off':
+ if bootstatus == 'off':
if files:
- entry.set('current_status', 'on')
+ entry.set('current_bootstatus', 'on')
return False
else:
return True
@@ -72,12 +70,47 @@ class DebInit(Bcfg2.Client.Tools.SvcTool):
return False
return True
else:
- entry.set('current_status', 'off')
+ entry.set('current_bootstatus', 'off')
return False
+ def VerifyService(self, entry, _):
+ """Verify Service status for entry."""
+ entry.set('target_status', entry.get('status')) # for reporting
+ bootstatus = self.get_bootstatus(entry)
+ if bootstatus is None:
+ return True
+ current_bootstatus = self.verify_bootstatus(entry, bootstatus)
+
+ if entry.get('status') == 'ignore':
+ # 'ignore' should verify
+ current_svcstatus = True
+ svcstatus = True
+ else:
+ svcstatus = self.check_service(entry)
+ if entry.get('status') == 'on':
+ if svcstatus:
+ current_svcstatus = True
+ else:
+ current_svcstatus = False
+ elif entry.get('status') == 'off':
+ if svcstatus:
+ current_svcstatus = False
+ else:
+ current_svcstatus = True
+
+ if svcstatus:
+ entry.set('current_status', 'on')
+ else:
+ entry.set('current_status', 'off')
+
+ return current_bootstatus and current_svcstatus
+
def InstallService(self, entry):
- """Install Service for entry."""
+ """Install Service entry."""
self.logger.info("Installing Service %s" % (entry.get('name')))
+ bootstatus = self.get_bootstatus(entry)
+
+ # check if init script exists
try:
os.stat('/etc/init.d/%s' % entry.get('name'))
except OSError:
@@ -85,20 +118,41 @@ class DebInit(Bcfg2.Client.Tools.SvcTool):
entry.get('name'))
return False
- if entry.get('status') == 'off':
- self.cmd.run("/usr/sbin/invoke-rc.d %s stop" % (entry.get('name')))
- return self.cmd.run("/usr/sbin/update-rc.d -f %s remove" %
- entry.get('name')).success
+ if bootstatus is not None:
+ seqcmdrv = True
+ if bootstatus == 'on':
+ # make sure service is enabled on boot
+ bootcmd = '/usr/sbin/update-rc.d %s defaults' % \
+ entry.get('name')
+ if entry.get('sequence'):
+ seqcmd = '/usr/sbin/update-rc.d -f %s remove' % \
+ entry.get('name')
+ seqcmdrv = self.cmd.run(seqcmd)
+ start_sequence = int(entry.get('sequence'))
+ kill_sequence = 100 - start_sequence
+ bootcmd = '%s %d %d' % (bootcmd, start_sequence,
+ kill_sequence)
+ elif bootstatus == 'off':
+ # make sure service is disabled on boot
+ bootcmd = '/usr/sbin/update-rc.d -f %s remove' % \
+ entry.get('name')
+ bootcmdrv = self.cmd.run(bootcmd)
+ if self.setup['servicemode'] == 'disabled':
+ # 'disabled' means we don't attempt to modify running svcs
+ return bootcmdrv and seqcmdrv
+ buildmode = self.setup['servicemode'] == 'build'
+ if (entry.get('status') == 'on' and not buildmode) and \
+ entry.get('current_status') == 'off':
+ svccmdrv = self.start_service(entry)
+ elif (entry.get('status') == 'off' or buildmode) and \
+ entry.get('current_status') == 'on':
+ svccmdrv = self.stop_service(entry)
+ else:
+ svccmdrv = True # ignore status attribute
+ return bootcmdrv and svccmdrv and seqcmdrv
else:
- command = "/usr/sbin/update-rc.d %s defaults" % (entry.get('name'))
- if entry.get('sequence'):
- if not self.cmd.run("/usr/sbin/update-rc.d -f %s remove" %
- entry.get('name')).success:
- return False
- start_sequence = int(entry.get('sequence'))
- kill_sequence = 100 - start_sequence
- command = "%s %d %d" % (command, start_sequence, kill_sequence)
- return self.cmd.run(command).success
+ # when bootstatus is 'None', status == 'ignore'
+ return True
def FindExtra(self):
"""Find Extra Debian Service entries."""
@@ -116,6 +170,3 @@ class DebInit(Bcfg2.Client.Tools.SvcTool):
# Extra service removal is nonsensical
# Extra services need to be reflected in the config
return
-
- def get_svc_command(self, service, action):
- return '/usr/sbin/invoke-rc.d %s %s' % (service.get('name'), action)
diff --git a/src/lib/Bcfg2/Client/Tools/POSIX/Augeas.py b/src/lib/Bcfg2/Client/Tools/POSIX/Augeas.py
new file mode 100644
index 000000000..8506f4bc7
--- /dev/null
+++ b/src/lib/Bcfg2/Client/Tools/POSIX/Augeas.py
@@ -0,0 +1,296 @@
+""" Augeas driver """
+
+import sys
+import Bcfg2.Client.XML
+from augeas import Augeas
+from Bcfg2.Client.Tools.POSIX.base import POSIXTool
+from Bcfg2.Client.Tools.POSIX.File import POSIXFile
+
+
+class AugeasCommand(object):
+ """ Base class for all Augeas command objects """
+
+ def __init__(self, command, augeas_obj, logger):
+ self._augeas = augeas_obj
+ self.command = command
+ self.entry = self.command.getparent()
+ self.logger = logger
+
+ def get_path(self, attr="path"):
+ """ Get a fully qualified path from the name of the parent entry and
+ the path given in this command tag.
+
+ @param attr: The attribute to get the relative path from
+ @type attr: string
+ @returns: string - the fully qualified Augeas path
+
+ """
+ return "/files/%s/%s" % (self.entry.get("name").strip("/"),
+ self.command.get(attr).lstrip("/"))
+
+ def _exists(self, path):
+ """ Return True if a path exists in Augeas, False otherwise.
+
+ Note that a False return can mean many things: A file that
+ doesn't exist, a node within the file that doesn't exist, no
+ lens to parse the file, etc. """
+ return len(self._augeas.match(path)) > 1
+
+ def _verify_exists(self, path=None):
+ """ Verify that the given path exists, with friendly debug
+ logging.
+
+ @param path: The path to verify existence of. Defaults to the
+ result of
+ :func:`Bcfg2.Client.Tools.POSIX.Augeas.AugeasCommand.getpath`.
+ @type path: string
+ @returns: bool - Whether or not the path exists
+ """
+ if path is None:
+ path = self.get_path()
+ self.logger.debug("Augeas: Verifying that '%s' exists" % path)
+ return self._exists(path)
+
+ def _verify_not_exists(self, path=None):
+ """ Verify that the given path does not exist, with friendly
+ debug logging.
+
+ @param path: The path to verify existence of. Defaults to the
+ result of
+ :func:`Bcfg2.Client.Tools.POSIX.Augeas.AugeasCommand.getpath`.
+ @type path: string
+ @returns: bool - Whether or not the path does not exist.
+ (I.e., True if it does not exist, False if it does
+ exist.)
+ """
+ if path is None:
+ path = self.get_path()
+ self.logger.debug("Augeas: Verifying that '%s' does not exist" % path)
+ return not self._exists(path)
+
+ def _verify_set(self, expected, path=None):
+ """ Verify that the given path is set to the given value, with
+ friendly debug logging.
+
+ @param expected: The expected value of the node.
+ @param path: The path to verify existence of. Defaults to the
+ result of
+ :func:`Bcfg2.Client.Tools.POSIX.Augeas.AugeasCommand.getpath`.
+ @type path: string
+ @returns: bool - Whether or not the path matches the expected value.
+
+ """
+ if path is None:
+ path = self.get_path()
+ self.logger.debug("Augeas: Verifying '%s' == '%s'" % (path, expected))
+ actual = self._augeas.get(path)
+ if actual == expected:
+ return True
+ else:
+ self.logger.debug("Augeas: '%s' failed verification: '%s' != '%s'"
+ % (path, actual, expected))
+ return False
+
+ def __str__(self):
+ return Bcfg2.Client.XML.tostring(self.command)
+
+ def verify(self):
+ """ Verify that the command has been applied. """
+ raise NotImplementedError
+
+ def install(self):
+ """ Run the command. """
+ raise NotImplementedError
+
+
+class Remove(AugeasCommand):
+ """ Augeas ``rm`` command """
+ def verify(self):
+ return self._verify_not_exists()
+
+ def install(self):
+ self.logger.debug("Augeas: Removing %s" % self.get_path())
+ return self._augeas.remove(self.get_path())
+
+
+class Move(AugeasCommand):
+ """ Augeas ``move`` command """
+ def __init__(self, command, augeas_obj, logger):
+ AugeasCommand.__init__(self, command, augeas_obj, logger)
+ self.source = self.get_path("source")
+ self.dest = self.get_path("destination")
+
+ def verify(self):
+ return (self._verify_not_exists(self.source),
+ self._verify_exists(self.dest))
+
+ def install(self):
+ self.logger.debug("Augeas: Moving %s to %s" % (self.source, self.dest))
+ return self._augeas.move(self.source, self.dest)
+
+
+class Set(AugeasCommand):
+ """ Augeas ``set`` command """
+ def __init__(self, command, augeas_obj, logger):
+ AugeasCommand.__init__(self, command, augeas_obj, logger)
+ self.value = self.command.get("value")
+
+ def verify(self):
+ return self._verify_set(self.value)
+
+ def install(self):
+ self.logger.debug("Augeas: Setting %s to %s" % (self.get_path(),
+ self.value))
+ return self._augeas.set(self.get_path(), self.value)
+
+
+class Clear(Set):
+ """ Augeas ``clear`` command """
+ def __init__(self, command, augeas_obj, logger):
+ Set.__init__(self, command, augeas_obj, logger)
+ self.value = None
+
+
+class SetMulti(AugeasCommand):
+ """ Augeas ``setm`` command """
+ def __init__(self, command, augeas_obj, logger):
+ AugeasCommand.__init__(self, command, augeas_obj, logger)
+ self.sub = self.command.get("sub")
+ self.value = self.command.get("value")
+ self.base = self.get_path("base")
+
+ def verify(self):
+ return all(self._verify_set(self.value,
+ path="%s/%s" % (path, self.sub))
+ for path in self._augeas.match(self.base))
+
+ def install(self):
+ return self._augeas.setm(self.base, self.sub, self.value)
+
+
+class Insert(AugeasCommand):
+ """ Augeas ``ins`` command """
+ def __init__(self, command, augeas_obj, logger):
+ AugeasCommand.__init__(self, command, augeas_obj, logger)
+ self.label = self.command.get("label")
+ self.where = self.command.get("where", "before")
+ self.before = self.where == "before"
+
+ def verify(self):
+ return self._verify_exists("%s/../%s" % (self.get_path(), self.label))
+
+ def install(self):
+ self.logger.debug("Augeas: Inserting new %s %s %s" %
+ (self.label, self.where, self.get_path()))
+ return self._augeas.insert(self.get_path(), self.label, self.before)
+
+
+class POSIXAugeas(POSIXTool):
+ """ Handle <Path type='augeas'...> entries. See
+ :ref:`client-tools-augeas`. """
+ __req__ = ['name', 'mode', 'owner', 'group']
+
+ def __init__(self, logger, setup, config):
+ POSIXTool.__init__(self, logger, setup, config)
+ self._augeas = dict()
+ # file tool for setting initial values of files that don't
+ # exist
+ self.filetool = POSIXFile(logger, setup, config)
+
+ def get_augeas(self, entry):
+ """ Get an augeas object for the given entry. """
+ if entry.get("name") not in self._augeas:
+ aug = Augeas()
+ if entry.get("lens"):
+ self.logger.debug("Augeas: Adding %s to include path for %s" %
+ (entry.get("name"), entry.get("lens")))
+ incl = "/augeas/load/%s/incl" % entry.get("lens")
+ ilen = len(aug.match(incl))
+ if ilen == 0:
+ self.logger.error("Augeas: Lens %s does not exist" %
+ entry.get("lens"))
+ else:
+ aug.set("%s[%s]" % (incl, ilen + 1), entry.get("name"))
+ aug.load()
+ self._augeas[entry.get("name")] = aug
+ return self._augeas[entry.get("name")]
+
+ def fully_specified(self, entry):
+ return len(entry.getchildren()) != 0
+
+ def get_commands(self, entry):
+ """ Get a list of commands to verify or install.
+
+ @param entry: The entry to get commands from.
+ @type entry: lxml.etree._Element
+ @param unverified: Only get commands that failed verification.
+ @type unverified: bool
+ @returns: list of
+ :class:`Bcfg2.Client.Tools.POSIX.Augeas.AugeasCommand`
+ objects representing the commands.
+ """
+ rv = []
+ for cmd in entry.iterchildren():
+ if cmd.tag == "Initial":
+ continue
+ if cmd.tag in globals():
+ rv.append(globals()[cmd.tag](cmd, self.get_augeas(entry),
+ self.logger))
+ else:
+ err = "Augeas: Unknown command %s in %s" % (cmd.tag,
+ entry.get("name"))
+ self.logger.error(err)
+ entry.set('qtext', "\n".join([entry.get('qtext', ''), err]))
+ return rv
+
+ def verify(self, entry, modlist):
+ rv = True
+ for cmd in self.get_commands(entry):
+ try:
+ if not cmd.verify():
+ err = "Augeas: Command has not been applied to %s: %s" % \
+ (entry.get("name"), cmd)
+ self.logger.debug(err)
+ entry.set('qtext', "\n".join([entry.get('qtext', ''),
+ err]))
+ rv = False
+ cmd.command.set("verified", "false")
+ else:
+ cmd.command.set("verified", "true")
+ except: # pylint: disable=W0702
+ err = "Augeas: Unexpected error verifying %s: %s: %s" % \
+ (entry.get("name"), cmd, sys.exc_info()[1])
+ self.logger.error(err)
+ entry.set('qtext', "\n".join([entry.get('qtext', ''), err]))
+ rv = False
+ cmd.command.set("verified", "false")
+ return POSIXTool.verify(self, entry, modlist) and rv
+
+ def install(self, entry):
+ rv = True
+ if entry.get("current_exists", "true") == "false":
+ initial = entry.find("Initial")
+ if initial is not None:
+ self.logger.debug("Augeas: Setting initial data for %s" %
+ entry.get("name"))
+ file_entry = Bcfg2.Client.XML.Element("Path",
+ **dict(entry.attrib))
+ file_entry.text = initial.text
+ self.filetool.install(file_entry)
+ # re-parse the file
+ self.get_augeas(entry).load()
+ for cmd in self.get_commands(entry):
+ try:
+ cmd.install()
+ except: # pylint: disable=W0702
+ self.logger.error(
+ "Failure running Augeas command on %s: %s: %s" %
+ (entry.get("name"), cmd, sys.exc_info()[1]))
+ rv = False
+ try:
+ self.get_augeas(entry).save()
+ except: # pylint: disable=W0702
+ self.logger.error("Failure saving Augeas changes to %s: %s" %
+ (entry.get("name"), sys.exc_info()[1]))
+ rv = False
+ return POSIXTool.install(self, entry) and rv
diff --git a/src/lib/Bcfg2/Client/Tools/POSIX/File.py b/src/lib/Bcfg2/Client/Tools/POSIX/File.py
index 168c35c98..b1bde1057 100644
--- a/src/lib/Bcfg2/Client/Tools/POSIX/File.py
+++ b/src/lib/Bcfg2/Client/Tools/POSIX/File.py
@@ -53,6 +53,10 @@ class POSIXFile(POSIXTool):
def verify(self, entry, modlist):
ondisk = self._exists(entry)
tempdata, is_binary = self._get_data(entry)
+ if isinstance(tempdata, str) and str != unicode:
+ tempdatasize = len(tempdata)
+ else:
+ tempdatasize = len(tempdata.encode(self.setup['encoding']))
different = False
content = None
@@ -61,7 +65,7 @@ class POSIXFile(POSIXTool):
# they're clearly different
different = True
content = ""
- elif len(tempdata) != ondisk[stat.ST_SIZE]:
+ elif tempdatasize != ondisk[stat.ST_SIZE]:
# next, see if the size of the target file is different
# from the size of the desired content
different = True
@@ -72,6 +76,9 @@ class POSIXFile(POSIXTool):
# for everything else
try:
content = open(entry.get('name')).read()
+ except UnicodeDecodeError:
+ content = open(entry.get('name'),
+ encoding=self.setup['encoding']).read()
except IOError:
self.logger.error("POSIX: Failed to read %s: %s" %
(entry.get("name"), sys.exc_info()[1]))
@@ -89,7 +96,7 @@ class POSIXFile(POSIXTool):
def _write_tmpfile(self, entry):
""" Write the file data to a temp file """
- filedata, _ = self._get_data(entry)
+ filedata = self._get_data(entry)[0]
# get a temp file to write to that is in the same directory as
# the existing file in order to preserve any permissions
# protections on that directory, and also to avoid issues with
@@ -105,7 +112,11 @@ class POSIXFile(POSIXTool):
(os.path.dirname(entry.get('name')), err))
return False
try:
- os.fdopen(newfd, 'w').write(filedata)
+ if isinstance(filedata, str) and str != unicode:
+ os.fdopen(newfd, 'w').write(filedata)
+ else:
+ os.fdopen(newfd, 'wb').write(
+ filedata.encode(self.setup['encoding']))
except (OSError, IOError):
err = sys.exc_info()[1]
self.logger.error("POSIX: Failed to open temp file %s for writing "
@@ -146,8 +157,8 @@ class POSIXFile(POSIXTool):
return POSIXTool.install(self, entry) and rv
- def _get_diffs(self, entry, interactive=False, sensitive=False,
- is_binary=False, content=None):
+ def _get_diffs(self, entry, interactive=False, # pylint: disable=R0912
+ sensitive=False, is_binary=False, content=None):
""" generate the necessary diffs for entry """
if not interactive and sensitive:
return
@@ -163,6 +174,8 @@ class POSIXFile(POSIXTool):
# prompts for -I and the reports
try:
content = open(entry.get('name')).read()
+ except UnicodeDecodeError:
+ content = open(entry.get('name'), encoding='utf-8').read()
except IOError:
self.logger.error("POSIX: Failed to read %s: %s" %
(entry.get("name"), sys.exc_info()[1]))
diff --git a/src/lib/Bcfg2/Client/Tools/POSIX/__init__.py b/src/lib/Bcfg2/Client/Tools/POSIX/__init__.py
index 7708c4f72..8d64cf84d 100644
--- a/src/lib/Bcfg2/Client/Tools/POSIX/__init__.py
+++ b/src/lib/Bcfg2/Client/Tools/POSIX/__init__.py
@@ -47,8 +47,11 @@ class POSIX(Bcfg2.Client.Tools.Tool):
mname = submodule[1].rsplit('.', 1)[-1]
if mname == 'base':
continue
- module = getattr(__import__(submodule[1]).Client.Tools.POSIX,
- mname)
+ try:
+ module = getattr(__import__(submodule[1]).Client.Tools.POSIX,
+ mname)
+ except ImportError:
+ continue
hdlr = getattr(module, "POSIX" + mname)
if POSIXTool in hdlr.__mro__:
# figure out what entry type this handler handles
diff --git a/src/lib/Bcfg2/Client/Tools/POSIX/base.py b/src/lib/Bcfg2/Client/Tools/POSIX/base.py
index 16fe0acb5..3243bbf50 100644
--- a/src/lib/Bcfg2/Client/Tools/POSIX/base.py
+++ b/src/lib/Bcfg2/Client/Tools/POSIX/base.py
@@ -232,6 +232,11 @@ class POSIXTool(Bcfg2.Client.Tools.Tool):
else:
defacl = None
+ if not acls:
+ self.logger.debug("POSIX: Removed ACLs from %s" %
+ entry.get("name"))
+ return True
+
for aclkey, perms in acls.items():
atype, scope, qualifier = aclkey
if atype == "default":
@@ -390,7 +395,10 @@ class POSIXTool(Bcfg2.Client.Tools.Tool):
acl_str.append("user")
elif scope == posix1e.ACL_GROUP:
acl_str.append("group")
- acl_str.append(qualifier)
+ if qualifier is None:
+ acl_str.append('')
+ else:
+ acl_str.append(qualifier)
acl_str.append(self._acl_perm2string(perms))
return ":".join(acl_str)
@@ -525,7 +533,8 @@ class POSIXTool(Bcfg2.Client.Tools.Tool):
if entry.get("secontext") == "__default__":
try:
wanted_secontext = \
- selinux.matchpathcon(path, 0)[1].split(":")[2]
+ selinux.matchpathcon(
+ path, ondisk[stat.ST_MODE])[1].split(":")[2]
except OSError:
errors.append("%s has no default SELinux context" %
entry.get("name"))
@@ -686,7 +695,7 @@ class POSIXTool(Bcfg2.Client.Tools.Tool):
""" os.makedirs helpfully creates all parent directories for
us, but it sets permissions according to umask, which is
probably wrong. we need to find out which directories were
- created and set permissions on those
+ created and try to set permissions on those
(http://trac.mcs.anl.gov/projects/bcfg2/ticket/1125 and
http://trac.mcs.anl.gov/projects/bcfg2/ticket/1134) """
created = []
@@ -706,22 +715,17 @@ class POSIXTool(Bcfg2.Client.Tools.Tool):
(path, err))
rv = False
- # we need to make sure that we give +x to everyone who needs
- # it. E.g., if the file that's been distributed is 0600, we
- # can't make the parent directories 0600 also; that'd be
- # pretty useless. They need to be 0700.
+ # set auto-created directories to mode 755 and use best effort for
+ # permissions. If you need something else, you should specify it in
+ # your config.
tmpentry = copy.deepcopy(entry)
- newmode = int(entry.get('mode'), 8)
- for i in range(0, 3):
- if newmode & (6 * pow(8, i)):
- newmode |= 1 * pow(8, i)
- tmpentry.set('mode', oct_mode(newmode))
+ tmpentry.set('mode', '0755')
for acl in tmpentry.findall('ACL'):
acl.set('perms',
oct_mode(self._norm_acl_perms(acl.get('perms')) |
ACL_MAP['x']))
for cpath in created:
- rv &= self._set_perms(tmpentry, path=cpath)
+ self._set_perms(tmpentry, path=cpath)
return rv
diff --git a/src/lib/Bcfg2/Client/Tools/POSIXUsers.py b/src/lib/Bcfg2/Client/Tools/POSIXUsers.py
index 8226392f9..6d18cd176 100644
--- a/src/lib/Bcfg2/Client/Tools/POSIXUsers.py
+++ b/src/lib/Bcfg2/Client/Tools/POSIXUsers.py
@@ -146,7 +146,8 @@ class POSIXUsers(Bcfg2.Client.Tools.Tool):
""" Get a list of supplmentary groups that the user in the
given entry is a member of """
return [g for g in self.existing['POSIXGroup'].values()
- if entry.get("name") in g[3] and g[0] != entry.get("group")]
+ if entry.get("name") in g[3] and g[0] != entry.get("group")
+ and self._in_managed_range('POSIXGroup', g[2])]
def VerifyPOSIXUser(self, entry, _):
""" Verify a POSIXUser entry """
@@ -189,14 +190,18 @@ class POSIXUsers(Bcfg2.Client.Tools.Tool):
else:
for attr, idx in self.attr_mapping[entry.tag].items():
val = str(self.existing[entry.tag][entry.get("name")][idx])
- entry.set("current_%s" % attr, val)
+ entry.set("current_%s" %
+ attr, val.decode(self.setup['encoding']))
if attr in ["uid", "gid"]:
if entry.get(attr) is None:
# no uid/gid specified, so we let the tool
# automatically determine one -- i.e., it always
# verifies
continue
- if val != entry.get(attr):
+ entval = entry.get(attr)
+ if not isinstance(entval, str):
+ entval = entval.encode('utf-8')
+ if val != entval:
errors.append("%s for %s %s is incorrect. Current %s is "
"%s, but should be %s" %
(attr.title(), entry.tag, entry.get("name"),
@@ -249,7 +254,6 @@ class POSIXUsers(Bcfg2.Client.Tools.Tool):
if entry.get('gid'):
cmd.extend(['-g', entry.get('gid')])
elif entry.tag == 'POSIXUser':
- cmd.append('-m')
if entry.get('uid'):
cmd.extend(['-u', entry.get('uid')])
cmd.extend(['-g', entry.get('group')])
diff --git a/src/lib/Bcfg2/Client/Tools/RcUpdate.py b/src/lib/Bcfg2/Client/Tools/RcUpdate.py
index 4b78581f7..e0c913dcd 100644
--- a/src/lib/Bcfg2/Client/Tools/RcUpdate.py
+++ b/src/lib/Bcfg2/Client/Tools/RcUpdate.py
@@ -21,21 +21,38 @@ class RcUpdate(Bcfg2.Client.Tools.SvcTool):
'-s']).stdout.splitlines()
if 'started' in line]
+ def get_default_svcs(self):
+ """Return a list of services in the 'default' runlevel."""
+ return [line.split()[0]
+ for line in self.cmd.run(['/sbin/rc-update',
+ 'show']).stdout.splitlines()
+ if 'default' in line]
+
+ def verify_bootstatus(self, entry, bootstatus):
+ """Verify bootstatus for entry."""
+ # get a list of all started services
+ allsrv = self.get_default_svcs()
+ # set current_bootstatus attribute
+ if entry.get('name') in allsrv:
+ entry.set('current_bootstatus', 'on')
+ else:
+ entry.set('current_bootstatus', 'off')
+ if bootstatus == 'on':
+ return entry.get('name') in allsrv
+ else:
+ return entry.get('name') not in allsrv
+
def VerifyService(self, entry, _):
"""
Verify Service status for entry.
Assumes we run in the "default" runlevel.
"""
- if entry.get('status') == 'ignore':
+ entry.set('target_status', entry.get('status')) # for reporting
+ bootstatus = self.get_bootstatus(entry)
+ if bootstatus is None:
return True
-
- # get a list of all started services
- allsrv = self.get_enabled_svcs()
-
- # check if service is enabled
- result = self.cmd.run(["/sbin/rc-update", "show", "default"]).stdout
- is_enabled = entry.get("name") in result
+ current_bootstatus = self.verify_bootstatus(entry, bootstatus)
# check if init script exists
try:
@@ -45,39 +62,58 @@ class RcUpdate(Bcfg2.Client.Tools.SvcTool):
entry.get('name'))
return False
- # check if service is enabled
- is_running = entry.get('name') in allsrv
-
- if entry.get('status') == 'on' and not (is_enabled and is_running):
- entry.set('current_status', 'off')
- return False
-
- elif entry.get('status') == 'off' and (is_enabled or is_running):
+ if entry.get('status') == 'ignore':
+ # 'ignore' should verify
+ current_svcstatus = True
+ svcstatus = True
+ else:
+ svcstatus = self.check_service(entry)
+ if entry.get('status') == 'on':
+ if svcstatus:
+ current_svcstatus = True
+ else:
+ current_svcstatus = False
+ elif entry.get('status') == 'off':
+ if svcstatus:
+ current_svcstatus = False
+ else:
+ current_svcstatus = True
+
+ if svcstatus:
entry.set('current_status', 'on')
- return False
+ else:
+ entry.set('current_status', 'off')
- return True
+ return current_bootstatus and current_svcstatus
def InstallService(self, entry):
- """
- Install Service entry
-
- """
+ """Install Service entry."""
self.logger.info('Installing Service %s' % entry.get('name'))
- if entry.get('status') == 'on':
- if entry.get('current_status') == 'off':
- self.start_service(entry)
- # make sure it's enabled
- cmd = '/sbin/rc-update add %s default'
- return self.cmd.run(cmd % entry.get('name')).success
- elif entry.get('status') == 'off':
- if entry.get('current_status') == 'on':
- self.stop_service(entry)
- # make sure it's disabled
- cmd = '/sbin/rc-update del %s default'
- return self.cmd.run(cmd % entry.get('name')).success
-
- return False
+ bootstatus = self.get_bootstatus(entry)
+ if bootstatus is not None:
+ if bootstatus == 'on':
+ # make sure service is enabled on boot
+ bootcmd = '/sbin/rc-update add %s default'
+ elif bootstatus == 'off':
+ # make sure service is disabled on boot
+ bootcmd = '/sbin/rc-update del %s default'
+ bootcmdrv = self.cmd.run(bootcmd % entry.get('name')).success
+ if self.setup['servicemode'] == 'disabled':
+ # 'disabled' means we don't attempt to modify running svcs
+ return bootcmdrv
+ buildmode = self.setup['servicemode'] == 'build'
+ if (entry.get('status') == 'on' and not buildmode) and \
+ entry.get('current_status') == 'off':
+ svccmdrv = self.start_service(entry)
+ elif (entry.get('status') == 'off' or buildmode) and \
+ entry.get('current_status') == 'on':
+ svccmdrv = self.stop_service(entry)
+ else:
+ svccmdrv = True # ignore status attribute
+ return bootcmdrv and svccmdrv
+ else:
+ # when bootstatus is 'None', status == 'ignore'
+ return True
def FindExtra(self):
"""Locate extra rc-update services."""
diff --git a/src/lib/Bcfg2/Client/Tools/VCS.py b/src/lib/Bcfg2/Client/Tools/VCS.py
index 1ab867215..aca5dbbc7 100644
--- a/src/lib/Bcfg2/Client/Tools/VCS.py
+++ b/src/lib/Bcfg2/Client/Tools/VCS.py
@@ -1,14 +1,15 @@
"""VCS support."""
# TODO:
-# * git_write_index
# * add svn support
# * integrate properly with reports
missing = []
+import errno
import os
import shutil
import sys
+import stat
# python-dulwich git imports
try:
@@ -26,6 +27,38 @@ except ImportError:
import Bcfg2.Client.Tools
+def cleanup_mode(mode):
+ """Cleanup a mode value.
+
+ This will return a mode that can be stored in a tree object.
+
+ :param mode: Mode to clean up.
+ """
+ if stat.S_ISLNK(mode):
+ return stat.S_IFLNK
+ elif stat.S_ISDIR(mode):
+ return stat.S_IFDIR
+ elif dulwich.index.S_ISGITLINK(mode):
+ return dulwich.index.S_IFGITLINK
+ ret = stat.S_IFREG | int('644', 8)
+ ret |= (mode & int('111', 8))
+ return ret
+
+
+def index_entry_from_stat(stat_val, hex_sha, flags, mode=None):
+ """Create a new index entry from a stat value.
+
+ :param stat_val: POSIX stat_result instance
+ :param hex_sha: Hex sha of the object
+ :param flags: Index flags
+ """
+ if mode is None:
+ mode = cleanup_mode(stat_val.st_mode)
+ return (stat_val.st_ctime, stat_val.st_mtime, stat_val.st_dev,
+ stat_val.st_ino, mode, stat_val.st_uid,
+ stat_val.st_gid, stat_val.st_size, hex_sha, flags)
+
+
class VCS(Bcfg2.Client.Tools.Tool):
"""VCS support."""
__handles__ = [('Path', 'vcs')]
@@ -47,11 +80,24 @@ class VCS(Bcfg2.Client.Tools.Tool):
self.logger.info("Repository %s does not exist" %
entry.get('name'))
return False
- cur_rev = repo.head()
- if cur_rev != entry.get('revision'):
+ try:
+ expected_rev = entry.get('revision')
+ cur_rev = repo.head()
+ except:
+ return False
+
+ try:
+ client, path = dulwich.client.get_transport_and_path(entry.get('sourceurl'))
+ remote_refs = client.fetch_pack(path, (lambda x: None), None, None, None)
+ if expected_rev in remote_refs:
+ expected_rev = remote_refs[expected_rev]
+ except:
+ pass
+
+ if cur_rev != expected_rev:
self.logger.info("At revision %s need to go to revision %s" %
- (cur_rev, entry.get('revision')))
+ (cur_rev.strip(), expected_rev.strip()))
return False
return True
@@ -71,45 +117,64 @@ class VCS(Bcfg2.Client.Tools.Tool):
destname)
return False
- destr = dulwich.repo.Repo.init(destname, mkdir=True)
+ dulwich.file.ensure_dir_exists(destname)
+ destr = dulwich.repo.Repo.init(destname)
cl, host_path = dulwich.client.get_transport_and_path(entry.get('sourceurl'))
remote_refs = cl.fetch(host_path,
destr,
determine_wants=destr.object_store.determine_wants_all,
progress=sys.stdout.write)
- destr.refs['refs/heads/master'] = entry.get('revision')
- dtree = destr[entry.get('revision')].tree
- obj_store = destr.object_store
- for fname, mode, sha in obj_store.iter_tree_contents(dtree):
- fullpath = os.path.join(destname, fname)
- try:
- f = open(os.path.join(destname, fname), 'wb')
- except IOError:
- dir = os.path.split(fullpath)[0]
- os.makedirs(dir)
- f = open(os.path.join(destname, fname), 'wb')
- f.write(destr[sha].data)
- f.close()
- os.chmod(os.path.join(destname, fname), mode)
+
+ if entry.get('revision') in remote_refs:
+ destr.refs['HEAD'] = remote_refs[entry.get('revision')]
+ else:
+ destr.refs['HEAD'] = entry.get('revision')
+
+ dtree = destr['HEAD'].tree
+ index = dulwich.index.Index(destr.index_path())
+ for fname, mode, sha in destr.object_store.iter_tree_contents(dtree):
+ full_path = os.path.join(destname, fname)
+ dulwich.file.ensure_dir_exists(os.path.dirname(full_path))
+
+ if stat.S_ISLNK(mode):
+ src_path = destr[sha].as_raw_string()
+ try:
+ os.symlink(src_path, full_path)
+ except OSError:
+ e = sys.exc_info()[1]
+ if e.errno == errno.EEXIST:
+ os.unlink(full_path)
+ os.symlink(src_path, full_path)
+ else:
+ raise
+ else:
+ file = open(full_path, 'wb')
+ file.write(destr[sha].as_raw_string())
+ file.close()
+ os.chmod(full_path, mode)
+
+ st = os.lstat(full_path)
+ index[fname] = index_entry_from_stat(st, sha, 0)
+
+ index.write()
return True
- # FIXME: figure out how to write the git index properly
- #iname = "%s/.git/index" % entry.get('name')
- #f = open(iname, 'w+')
- #entries = obj_store[sha].iteritems()
- #try:
- # dulwich.index.write_index(f, entries)
- #finally:
- # f.close()
def Verifysvn(self, entry, _):
"""Verify svn repositories"""
+ headrev = pysvn.Revision( pysvn.opt_revision_kind.head )
client = pysvn.Client()
try:
cur_rev = str(client.info(entry.get('name')).revision.number)
+ server = client.info2(entry.get('sourceurl'), headrev, recurse=False)
+ if server:
+ server_rev = str(server[0][1].rev.number)
except:
self.logger.info("Repository %s does not exist" % entry.get('name'))
return False
+ if entry.get('revision') == 'latest' and cur_rev == server_rev:
+ return True
+
if cur_rev != entry.get('revision'):
self.logger.info("At revision %s need to go to revision %s" %
(cur_rev, entry.get('revision')))
diff --git a/src/lib/Bcfg2/Client/Tools/__init__.py b/src/lib/Bcfg2/Client/Tools/__init__.py
index c5a5ee4d6..703b8ff57 100644
--- a/src/lib/Bcfg2/Client/Tools/__init__.py
+++ b/src/lib/Bcfg2/Client/Tools/__init__.py
@@ -519,6 +519,22 @@ class SvcTool(Tool):
"""
return '/etc/init.d/%s %s' % (service.get('name'), action)
+ def get_bootstatus(self, service):
+ """ Return the bootstatus attribute if it exists.
+
+ :param service: The service entry
+ :type service: lxml.etree._Element
+ :returns: string or None - Value of bootstatus if it exists. If
+ bootstatus is unspecified and status is not *ignore*,
+ return value of status. If bootstatus is unspecified
+ and status is *ignore*, return None.
+ """
+ if service.get('bootstatus') is not None:
+ return service.get('bootstatus')
+ elif service.get('status') != 'ignore':
+ return service.get('status')
+ return None
+
def start_service(self, service):
""" Start a service.
@@ -578,13 +594,14 @@ class SvcTool(Tool):
if not self.handlesEntry(entry):
continue
+ estatus = entry.get('status')
restart = entry.get("restart", "true").lower()
- if (restart == "false" or
+ if (restart == "false" or estatus == 'ignore' or
(restart == "interactive" and not self.setup['interactive'])):
continue
success = False
- if entry.get('status') == 'on':
+ if estatus == 'on':
if self.setup['servicemode'] == 'build':
success = self.stop_service(entry)
elif entry.get('name') not in self.restarted:
diff --git a/src/lib/Bcfg2/Client/XML.py b/src/lib/Bcfg2/Client/XML.py
index 91d4ac5c6..4ba06abae 100644
--- a/src/lib/Bcfg2/Client/XML.py
+++ b/src/lib/Bcfg2/Client/XML.py
@@ -5,9 +5,29 @@
# pylint: disable=E0611,W0611,W0613,C0103
try:
- from lxml.etree import Element, SubElement, XML, tostring
+ from lxml.etree import Element, SubElement, tostring, XMLParser
from lxml.etree import XMLSyntaxError as ParseError
+ from lxml.etree import XML as _XML
+ from Bcfg2.Compat import wraps
driver = 'lxml'
+
+ # libxml2 2.9.0+ doesn't parse 10M+ documents by default:
+ # https://mail.gnome.org/archives/commits-list/2012-August/msg00645.html
+ try:
+ _parser = XMLParser(huge_tree=True)
+ except TypeError:
+ _parser = XMLParser()
+
+ @wraps(_XML)
+ def XML(val, **kwargs):
+ """ unicode strings w/encoding declaration are not supported in
+ recent lxml.etree, so we try to read XML, and if it fails we try
+ encoding the string. """
+ kwargs.setdefault('parser', _parser)
+ try:
+ return _XML(val, **kwargs)
+ except ValueError:
+ return _XML(val.encode(), **kwargs)
except ImportError:
# lxml not available
from xml.parsers.expat import ExpatError as ParseError
diff --git a/src/lib/Bcfg2/Client/__init__.py b/src/lib/Bcfg2/Client/__init__.py
index 3bc261f2f..6d1cb9d40 100644
--- a/src/lib/Bcfg2/Client/__init__.py
+++ b/src/lib/Bcfg2/Client/__init__.py
@@ -21,9 +21,9 @@ def prompt(msg):
try:
ans = input(msg)
return ans in ['y', 'Y']
+ except UnicodeEncodeError:
+ ans = input(msg.encode('utf-8'))
+ return ans in ['y', 'Y']
except EOFError:
- # python 2.4.3 on CentOS doesn't like ^C for some reason
- return False
- except:
- print("Error while reading input: %s" % sys.exc_info()[1])
- return False
+ # handle ^C on rhel-based platforms
+ raise SystemExit(1)
diff --git a/src/lib/Bcfg2/Compat.py b/src/lib/Bcfg2/Compat.py
index d034c0777..049236e03 100644
--- a/src/lib/Bcfg2/Compat.py
+++ b/src/lib/Bcfg2/Compat.py
@@ -79,10 +79,7 @@ except NameError:
def u_str(string, encoding=None):
""" print to file compatibility """
if sys.hexversion >= 0x03000000:
- if encoding is not None:
- return string.encode(encoding)
- else:
- return string
+ return string
else:
if encoding is not None:
return unicode(string, encoding)
diff --git a/src/lib/Bcfg2/Encryption.py b/src/lib/Bcfg2/Encryption.py
index b4674d72f..69d40ea37 100755
--- a/src/lib/Bcfg2/Encryption.py
+++ b/src/lib/Bcfg2/Encryption.py
@@ -3,6 +3,7 @@ handling encryption in Bcfg2. See :ref:`server-encryption` for more
details. """
import os
+import sys
from M2Crypto import Rand
from M2Crypto.EVP import Cipher, EVPError
from Bcfg2.Compat import StringIO, md5, b64encode, b64decode
@@ -114,13 +115,21 @@ def ssl_decrypt(data, passwd, algorithm=ALGORITHM):
:returns: string - The decrypted data
"""
# base64-decode the data
- data = b64decode(data)
+ try:
+ data = b64decode(data)
+ except TypeError:
+ # we do not include the data in the error message, because one
+ # of the common causes of this is data that claims to be
+ # encrypted but is not. we don't want to include a plaintext
+ # secret in the error logs.
+ raise TypeError("Could not decode base64 data: %s" %
+ sys.exc_info()[1])
salt = data[8:16]
- # pylint: disable=E1101
+ # pylint: disable=E1101,E1121
hashes = [md5(passwd + salt).digest()]
for i in range(1, 3):
hashes.append(md5(hashes[i - 1] + passwd + salt).digest())
- # pylint: enable=E1101
+ # pylint: enable=E1101,E1121
key = hashes[0] + hashes[1]
iv = hashes[2]
@@ -146,11 +155,11 @@ def ssl_encrypt(plaintext, passwd, algorithm=ALGORITHM, salt=None):
if salt is None:
salt = Rand.rand_bytes(8)
- # pylint: disable=E1101
+ # pylint: disable=E1101,E1121
hashes = [md5(passwd + salt).digest()]
for i in range(1, 3):
hashes.append(md5(hashes[i - 1] + passwd + salt).digest())
- # pylint: enable=E1101
+ # pylint: enable=E1101,E1121
key = hashes[0] + hashes[1]
iv = hashes[2]
diff --git a/src/lib/Bcfg2/Options.py b/src/lib/Bcfg2/Options.py
index 243c4ed2a..206c63d4f 100644
--- a/src/lib/Bcfg2/Options.py
+++ b/src/lib/Bcfg2/Options.py
@@ -312,6 +312,15 @@ def list_split(c_string):
return []
+def list_split_anchored_regex(c_string):
+ """ like list_split but split on whitespace and compile each element as
+ anchored regex """
+ try:
+ return [re.compile('^' + x + '$') for x in re.split(r'\s+', c_string)]
+ except re.error:
+ raise ValueError("Not a list of regexes", c_string)
+
+
def colon_split(c_string):
""" split an option string on colons, returning a list """
if c_string:
@@ -319,6 +328,28 @@ def colon_split(c_string):
return []
+def dict_split(c_string):
+ """ split an option string on commas, optionally surrounded by
+ whitespace and split the resulting items again on equals signs,
+ returning a dict """
+ result = dict()
+ if c_string:
+ items = re.split(r'\s*,\s*', c_string)
+ for item in items:
+ if r'=' in item:
+ key, value = item.split(r'=', 1)
+ try:
+ result[key] = get_bool(value)
+ except ValueError:
+ try:
+ result[key] = get_int(value)
+ except ValueError:
+ result[key] = value
+ else:
+ result[item] = True
+ return result
+
+
def get_bool(val):
""" given a string value of a boolean configuration option, return
an actual bool (True or False) """
@@ -619,6 +650,12 @@ SERVER_CHILDREN = \
cf=('server', 'children'),
cook=get_int,
long_arg=True)
+SERVER_PROBE_ALLOWED_GROUPS = \
+ Option('Whitespace-separated list of group names (as regex) to which '
+ 'probes can assign a client by writing "group:" to stdout.',
+ default=[re.compile('.*')],
+ cf=('probes', 'allowed_groups'),
+ cook=list_split_anchored_regex)
# database options
DB_ENGINE = \
@@ -651,6 +688,15 @@ DB_PORT = \
default='',
cf=('database', 'port'),
deprecated_cf=('statistics', 'database_port'))
+DB_OPTIONS = \
+ Option('Database options',
+ default=dict(),
+ cf=('database', 'options'),
+ cook=dict_split)
+DB_SCHEMA = \
+ Option('Database schema',
+ default='public',
+ cf=('database', 'schema'))
# Django options
WEB_CFILE = \
@@ -1193,7 +1239,9 @@ SERVER_COMMON_OPTIONS = dict(repo=SERVER_REPOSITORY,
authentication=SERVER_AUTHENTICATION,
perflog=LOG_PERFORMANCE,
perflog_interval=PERFLOG_INTERVAL,
- children=SERVER_CHILDREN)
+ children=SERVER_CHILDREN,
+ client_timeout=CLIENT_TIMEOUT,
+ probe_allowed_groups=SERVER_PROBE_ALLOWED_GROUPS)
CRYPT_OPTIONS = dict(encrypt=ENCRYPT,
decrypt=DECRYPT,
@@ -1233,9 +1281,9 @@ DRIVER_OPTIONS = \
yum_verify_fail_action=CLIENT_YUM_VERIFY_FAIL_ACTION,
yum_verify_flags=CLIENT_YUM_VERIFY_FLAGS,
posix_uid_whitelist=CLIENT_POSIX_UID_WHITELIST,
- posix_gid_whitelist=CLIENT_POSIX_UID_WHITELIST,
+ posix_gid_whitelist=CLIENT_POSIX_GID_WHITELIST,
posix_uid_blacklist=CLIENT_POSIX_UID_BLACKLIST,
- posix_gid_blacklist=CLIENT_POSIX_UID_BLACKLIST)
+ posix_gid_blacklist=CLIENT_POSIX_GID_BLACKLIST)
CLIENT_COMMON_OPTIONS = \
dict(extra=CLIENT_EXTRA_DISPLAY,
@@ -1285,6 +1333,8 @@ DATABASE_COMMON_OPTIONS = dict(web_configfile=WEB_CFILE,
db_password=DB_PASSWORD,
db_host=DB_HOST,
db_port=DB_PORT,
+ db_options=DB_OPTIONS,
+ db_schema=DB_SCHEMA,
time_zone=DJANGO_TIME_ZONE,
django_debug=DJANGO_DEBUG,
web_prefix=DJANGO_WEB_PREFIX)
diff --git a/src/lib/Bcfg2/Proxy.py b/src/lib/Bcfg2/Proxy.py
index f6db66a93..34080da6b 100644
--- a/src/lib/Bcfg2/Proxy.py
+++ b/src/lib/Bcfg2/Proxy.py
@@ -104,7 +104,6 @@ class RetryMethod(xmlrpclib._Method):
err = sys.exc_info()[1]
msg = err
except:
- raise
etype, err = sys.exc_info()[:2]
msg = "Unknown failure: %s (%s)" % (err, etype.__name__)
if msg:
diff --git a/src/lib/Bcfg2/Reporting/Collector.py b/src/lib/Bcfg2/Reporting/Collector.py
index df82248d0..52700f917 100644
--- a/src/lib/Bcfg2/Reporting/Collector.py
+++ b/src/lib/Bcfg2/Reporting/Collector.py
@@ -20,10 +20,38 @@ from Bcfg2.Reporting.Transport.DirectStore import DirectStore
from Bcfg2.Reporting.Storage import load_storage_from_config, \
StorageError, StorageImportError
+
class ReportingError(Exception):
"""Generic reporting exception"""
pass
+
+class ReportingStoreThread(threading.Thread):
+ """Thread for calling the storage backend"""
+ def __init__(self, interaction, storage, group=None, target=None,
+ name=None, args=(), kwargs=None):
+ """Initialize the thread with a reference to the interaction
+ as well as the storage engine to use"""
+ threading.Thread.__init__(self, group, target, name, args,
+ kwargs or dict())
+ self.interaction = interaction
+ self.storage = storage
+ self.logger = logging.getLogger('bcfg2-report-collector')
+
+ def run(self):
+ """Call the database storage procedure (aka import)"""
+ try:
+ start = time.time()
+ self.storage.import_interaction(self.interaction)
+ self.logger.info("Imported interaction for %s in %ss" %
+ (self.interaction.get('hostname', '<unknown>'),
+ time.time() - start))
+ except:
+ #TODO requeue?
+ self.logger.error("Unhandled exception in import thread %s" %
+ traceback.format_exc().splitlines()[-1])
+
+
class ReportingCollector(object):
"""The collecting process for reports"""
@@ -35,6 +63,8 @@ class ReportingCollector(object):
self.encoding = setup['encoding']
self.terminate = None
self.context = None
+ self.children = []
+ self.cleanup_threshold = 25
if setup['debug']:
level = logging.DEBUG
@@ -77,12 +107,12 @@ class ReportingCollector(object):
(self.storage.__class__.__name__,
traceback.format_exc().splitlines()[-1]))
-
def run(self):
"""Startup the processing and go!"""
self.terminate = threading.Event()
atexit.register(self.shutdown)
- self.context = daemon.DaemonContext()
+ self.context = daemon.DaemonContext(detach_process=True)
+ iter = 0
if self.setup['daemon']:
self.logger.debug("Daemonizing")
@@ -103,15 +133,16 @@ class ReportingCollector(object):
interaction = self.transport.fetch()
if not interaction:
continue
- try:
- start = time.time()
- self.storage.import_interaction(interaction)
- self.logger.info("Imported interaction for %s in %ss" %
- (interaction.get('hostname', '<unknown>'),
- time.time() - start))
- except:
- #TODO requeue?
- raise
+
+ store_thread = ReportingStoreThread(interaction, self.storage)
+ store_thread.start()
+ self.children.append(store_thread)
+
+ iter += 1
+ if iter >= self.cleanup_threshold:
+ self.reap_children()
+ iter = 0
+
except (SystemExit, KeyboardInterrupt):
self.logger.info("Shutting down")
self.shutdown()
@@ -125,7 +156,22 @@ class ReportingCollector(object):
# this wil be missing if called from bcfg2-admin
self.terminate.set()
if self.transport:
- self.transport.shutdown()
+ try:
+ self.transport.shutdown()
+ except OSError:
+ pass
if self.storage:
self.storage.shutdown()
+ def reap_children(self):
+ """Join any non-live threads"""
+ newlist = []
+
+ self.logger.debug("Starting reap_children")
+ for child in self.children:
+ if child.isAlive():
+ newlist.append(child)
+ else:
+ child.join()
+ self.logger.debug("Joined child thread %s" % child.getName())
+ self.children = newlist
diff --git a/src/lib/Bcfg2/Reporting/Compat.py b/src/lib/Bcfg2/Reporting/Compat.py
new file mode 100644
index 000000000..9113fdb91
--- /dev/null
+++ b/src/lib/Bcfg2/Reporting/Compat.py
@@ -0,0 +1,16 @@
+""" Compatibility imports for Django. """
+
+from django import VERSION
+from django.db import transaction
+
+# Django 1.6 deprecated commit_on_success() and introduced atomic() with
+# similar semantics.
+if VERSION[0] == 1 and VERSION[1] < 6:
+ transaction.atomic = transaction.commit_on_success
+
+try:
+ # Django < 1.6
+ from django.conf.urls.defaults import url, patterns
+except ImportError:
+ # Django > 1.6
+ from django.conf.urls import url, patterns
diff --git a/src/lib/Bcfg2/Reporting/Storage/DjangoORM.py b/src/lib/Bcfg2/Reporting/Storage/DjangoORM.py
index aea5e9d4b..ef1e92103 100644
--- a/src/lib/Bcfg2/Reporting/Storage/DjangoORM.py
+++ b/src/lib/Bcfg2/Reporting/Storage/DjangoORM.py
@@ -18,12 +18,12 @@ from django.core import management
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.db.models import FieldDoesNotExist
from django.core.cache import cache
-from django.db import transaction
#Used by GetCurrentEntry
import difflib
from Bcfg2.Compat import b64decode
from Bcfg2.Reporting.models import *
+from Bcfg2.Reporting.Compat import transaction
class DjangoORM(StorageBase):
@@ -256,7 +256,7 @@ class DjangoORM(StorageBase):
entry.tag)
return None
- @transaction.commit_on_success
+ @transaction.atomic
def _import_interaction(self, interaction):
"""Real import function"""
hostname = interaction['hostname']
diff --git a/src/lib/Bcfg2/Reporting/models.py b/src/lib/Bcfg2/Reporting/models.py
index 598e1c6ec..fc9523067 100644
--- a/src/lib/Bcfg2/Reporting/models.py
+++ b/src/lib/Bcfg2/Reporting/models.py
@@ -88,7 +88,7 @@ class InteractionManager(models.Manager):
Returns the ids of most recent interactions for clients as of a date.
Arguments:
- maxdate -- datetime object. Most recent date to pull. (dafault None)
+ maxdate -- datetime object. Most recent date to pull. (default None)
"""
from django.db import connection
diff --git a/src/lib/Bcfg2/Reporting/templates/base.html b/src/lib/Bcfg2/Reporting/templates/base.html
index c73339911..7edf3a949 100644
--- a/src/lib/Bcfg2/Reporting/templates/base.html
+++ b/src/lib/Bcfg2/Reporting/templates/base.html
@@ -1,4 +1,8 @@
{% load bcfg2_tags %}
+{% comment %}
+This is needed for Django versions less than 1.5
+{% endcomment %}
+{% load url from future %}
<?xml version="1.0"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
@@ -25,8 +29,9 @@
<div id="header">
<a href="http://bcfg2.org"><img src='{% to_media_url bcfg2_logo.png %}'
- height='115' width='300' alt='Bcfg2' style='float:left; height: 115px' /></a>
- </div>
+ height='115' width='300' alt='Bcfg2'
+ style='float:left; height: 115px' /></a>
+ </div>
<div id="document">
<div id="content"><div id="contentwrapper">
@@ -46,26 +51,26 @@
<li>Overview</li>
</ul>
<ul class='menu-level2'>
- <li><a href="{% url reports_summary %}">Summary</a></li>
- <li><a href="{% url reports_history %}">Recent Interactions</a></li>
- <li><a href="{% url reports_timing %}">Timing</a></li>
+ <li><a href="{% url "reports_summary" %}">Summary</a></li>
+ <li><a href="{% url "reports_history" %}">Recent Interactions</a></li>
+ <li><a href="{% url "reports_timing" %}">Timing</a></li>
</ul>
<ul class='menu-level1'>
<li>Clients</li>
</ul>
<ul class='menu-level2'>
- <li><a href="{% url reports_grid_view %}">Grid View</a></li>
- <li><a href="{% url reports_detailed_list %}">Detailed List</a></li>
- <li><a href="{% url reports_client_manage %}">Manage</a></li>
+ <li><a href="{% url "reports_grid_view" %}">Grid View</a></li>
+ <li><a href="{% url "reports_detailed_list" %}">Detailed List</a></li>
+ <li><a href="{% url "reports_client_manage" %}">Manage</a></li>
</ul>
<ul class='menu-level1'>
<li>Entries Configured</li>
</ul>
<ul class='menu-level2'>
- <li><a href="{% url reports_common_problems %}">Common problems</a></li>
- <li><a href="{% url reports_item_list "bad" %}">Bad</a></li>
- <li><a href="{% url reports_item_list "modified" %}">Modified</a></li>
- <li><a href="{% url reports_item_list "extra" %}">Extra</a></li>
+ <li><a href="{% url "reports_common_problems" %}">Common problems</a></li>
+ <li><a href="{% url "reports_item_list" "bad" %}">Bad</a></li>
+ <li><a href="{% url "reports_item_list" "modified" %}">Modified</a></li>
+ <li><a href="{% url "reports_item_list" "extra" %}">Extra</a></li>
</ul>
{% comment %}
TODO
@@ -88,7 +93,7 @@
<div style='clear:both'></div>
</div><!-- document -->
<div id="footer">
- <span>Bcfg2 Version 1.3.1</span>
+ <span>Bcfg2 Version 1.3.3</span>
</div>
<div id="calendar_div" style='position:absolute; visibility:hidden; background-color:white; layer-background-color:white;'></div>
diff --git a/src/lib/Bcfg2/Reporting/templates/clients/detail.html b/src/lib/Bcfg2/Reporting/templates/clients/detail.html
index 4608ce6f1..e890589a7 100644
--- a/src/lib/Bcfg2/Reporting/templates/clients/detail.html
+++ b/src/lib/Bcfg2/Reporting/templates/clients/detail.html
@@ -1,24 +1,28 @@
{% extends "base.html" %}
{% load bcfg2_tags %}
+{% comment %}
+This is needed for Django versions less than 1.5
+{% endcomment %}
+{% load url from future %}
{% block title %}Bcfg2 - Client {{client.name}}{% endblock %}
{% block extra_header_info %}
<style type="text/css">
.node_data {
- border: 1px solid #98DBCC;
- margin: 10px;
- padding-left: 18px;
+ border: 1px solid #98DBCC;
+ margin: 10px;
+ padding-left: 18px;
}
.node_data td {
- padding: 1px 20px 1px 2px;
+ padding: 1px 20px 1px 2px;
}
span.history_links {
- font-size: 90%;
- margin-left: 50px;
+ font-size: 90%;
+ margin-left: 50px;
}
span.history_links a {
- font-size: 90%;
+ font-size: 90%;
}
</style>
{% endblock %}
@@ -30,12 +34,12 @@ span.history_links a {
{% block content %}
<div class='detail_header'>
<h2>{{client.name}}</h2>
- <a href='{% url reports_client_manage %}#{{ client.name }}'>[manage]</a>
- <span class='history_links'><a href="{% url reports_client_history client.name %}">View History</a> | Jump to&nbsp;
+ <a href='{% url "reports_client_manage" %}#{{ client.name }}'>[manage]</a>
+ <span class='history_links'><a href="{% url "reports_client_history" client.name %}">View History</a> | Jump to&nbsp;
<select id="quick" name="quick" onchange="javascript:pageJump('quick');">
<option value="" selected="selected">--- Time ---</option>
{% for i in client.interactions.all|slice:":25" %}
- <option value="{% url reports_client_detail_pk hostname=client.name, pk=i.id %}">{{i.timestamp|date:"c"}}</option>
+ <option value="{% url "reports_client_detail_pk" hostname=client.name pk=i.id %}">{{i.timestamp|date:"c"}}</option>
{% endfor %}
</select></span>
</div>
@@ -110,7 +114,7 @@ span.history_links a {
{% for entry in entry_list %}
<tr class='{% cycle listview,listview_alt %}'>
<td class='entry_list_type'>{{entry.entry_type}}</td>
- <td><a href="{% url reports_item entry.class_name entry.pk interaction.pk %}">
+ <td><a href="{% url "reports_item" entry.class_name entry.pk interaction.pk %}">
{{entry.name}}</a></td>
</tr>
{% endfor %}
@@ -129,7 +133,7 @@ span.history_links a {
{% for failure in interaction.failures.all %}
<tr class='{% cycle listview,listview_alt %}'>
<td class='entry_list_type'>{{failure.entry_type}}</td>
- <td><a href="{% url reports_item failure.class_name failure.pk interaction.pk %}">
+ <td><a href="{% url "reports_item" failure.class_name failure.pk interaction.pk %}">
{{failure.name}}</a></td>
</tr>
{% endfor %}
@@ -140,11 +144,11 @@ span.history_links a {
{% if entry_list %}
<div class="entry_list recent_history_wrapper">
<div class="entry_list_head" style="border-bottom: 2px solid #98DBCC;">
- <h4 style="display: inline"><a href="{% url reports_client_history client.name %}">Recent Interactions</a></h4>
+ <h4 style="display: inline"><a href="{% url "reports_client_history" client.name %}">Recent Interactions</a></h4>
</div>
<div class='recent_history_box'>
{% include "widgets/interaction_list.inc" %}
- <div style='padding-left: 5px'><a href="{% url reports_client_history client.name %}">more...</a></div>
+ <div style='padding-left: 5px'><a href="{% url "reports_client_history" client.name %}">more...</a></div>
</div>
</div>
{% endif %}
diff --git a/src/lib/Bcfg2/Reporting/templates/clients/detailed-list.html b/src/lib/Bcfg2/Reporting/templates/clients/detailed-list.html
index fd9a545ce..33c78a5f0 100644
--- a/src/lib/Bcfg2/Reporting/templates/clients/detailed-list.html
+++ b/src/lib/Bcfg2/Reporting/templates/clients/detailed-list.html
@@ -1,5 +1,9 @@
{% extends "base-timeview.html" %}
{% load bcfg2_tags %}
+{% comment %}
+This is needed for Django versions less than 1.5
+{% endcomment %}
+{% load url from future %}
{% block title %}Bcfg2 - Detailed Client Listing{% endblock %}
{% block pagebanner %}Clients - Detailed View{% endblock %}
@@ -21,7 +25,7 @@
</tr>
{% for entry in entry_list %}
<tr class='{% cycle listview,listview_alt %}'>
- <td class='left_column'><a href='{% url Bcfg2.Reporting.views.client_detail hostname=entry.client.name, pk=entry.id %}'>{{ entry.client.name }}</a></td>
+ <td class='left_column'><a href='{% url "Bcfg2.Reporting.views.client_detail" hostname=entry.client.name pk=entry.id %}'>{{ entry.client.name }}</a></td>
<td class='right_column' style='width:75px'><a href='{% add_url_filter state=entry.state %}'
class='{{entry|determine_client_state}}'>{{ entry.state }}</a></td>
<td class='right_column_narrow'>{{ entry.good_count }}</td>
diff --git a/src/lib/Bcfg2/Reporting/templates/clients/index.html b/src/lib/Bcfg2/Reporting/templates/clients/index.html
index d9c415c20..eba83670b 100644
--- a/src/lib/Bcfg2/Reporting/templates/clients/index.html
+++ b/src/lib/Bcfg2/Reporting/templates/clients/index.html
@@ -1,5 +1,9 @@
{% extends "base-timeview.html" %}
{% load bcfg2_tags %}
+{% comment %}
+This is needed for Django versions less than 1.5
+{% endcomment %}
+{% load url from future %}
{% block extra_header_info %}
{% endblock%}
@@ -17,9 +21,9 @@
<td class='{{ inter|determine_client_state }}'>
<a href="{% spaceless %}
{% if not timestamp %}
- {% url reports_client_detail inter.client.name %}
+ {% url "reports_client_detail" inter.client.name %}
{% else %}
- {% url reports_client_detail_pk inter.client.name,inter.id %}
+ {% url "reports_client_detail_pk" inter.client.name inter.id %}
{% endif %}
{% endspaceless %}">{{ inter.client.name }}</a>
</td>
diff --git a/src/lib/Bcfg2/Reporting/templates/clients/manage.html b/src/lib/Bcfg2/Reporting/templates/clients/manage.html
index 443ec8ccb..03918aad7 100644
--- a/src/lib/Bcfg2/Reporting/templates/clients/manage.html
+++ b/src/lib/Bcfg2/Reporting/templates/clients/manage.html
@@ -1,4 +1,8 @@
{% extends "base.html" %}
+{% comment %}
+This is needed for Django versions less than 1.5
+{% endcomment %}
+{% load url from future %}
{% block extra_header_info %}
{% endblock%}
@@ -24,10 +28,10 @@
<td><span id="{{ client.name }}"> </span>
<span id="ttag-{{ client.name }}"> </span>
<span id="s-ttag-{{ client.name }}"> </span>
- <a href="{% url reports_client_detail client.name %}">{{ client.name }}</a></td>
+ <a href='{% url "reports_client_detail" client.name %}'>{{ client.name }}</a></td>
<td>{% firstof client.expiration 'Active' %}</td>
<td>
- <form method="post" action="{% url reports_client_manage %}">
+ <form method="post" action='{% url "reports_client_manage" %}'>
<div> {# here for no reason other then to validate #}
<input type="hidden" name="client_name" value="{{ client.name }}" />
<input type="hidden" name="client_action" value="{% if client.expiration %}unexpire{% else %}expire{% endif %}" />
diff --git a/src/lib/Bcfg2/Reporting/templates/config_items/common.html b/src/lib/Bcfg2/Reporting/templates/config_items/common.html
index 57191ec39..91f37d7dc 100644
--- a/src/lib/Bcfg2/Reporting/templates/config_items/common.html
+++ b/src/lib/Bcfg2/Reporting/templates/config_items/common.html
@@ -1,5 +1,6 @@
{% extends "base-timeview.html" %}
{% load bcfg2_tags %}
+{% load url from future %}
{% block title %}Bcfg2 - Common Problems{% endblock %}
@@ -29,9 +30,9 @@
{% for item in type_list %}
<tr class='{% cycle listview,listview_alt %}'>
<td>{{ item.ENTRY_TYPE }}</td>
- <td><a href="{% url reports_entry item.class_name, item.pk %}">{{ item.name }}</a></td>
+ <td><a href='{% url "reports_entry" item.class_name item.pk %}'>{{ item.name }}</a></td>
<td>{{ item.num_entries }}</td>
- <td><a href="{% url reports_item item.ENTRY_TYPE, item.pk %}">{{ item.short_list|join:"," }}</a></td>
+ <td><a href='{% url "reports_item" item.ENTRY_TYPE item.pk %}'>{{ item.short_list|join:"," }}</a></td>
</tr>
{% endfor %}
</table>
diff --git a/src/lib/Bcfg2/Reporting/templates/config_items/entry_status.html b/src/lib/Bcfg2/Reporting/templates/config_items/entry_status.html
index e940889ab..e3befb0eb 100644
--- a/src/lib/Bcfg2/Reporting/templates/config_items/entry_status.html
+++ b/src/lib/Bcfg2/Reporting/templates/config_items/entry_status.html
@@ -1,5 +1,9 @@
{% extends "base-timeview.html" %}
{% load bcfg2_tags %}
+{% comment %}
+This is needed for Django versions less than 1.5
+{% endcomment %}
+{% load url from future %}
{% block title %}Bcfg2 - Entry Status{% endblock %}
@@ -17,10 +21,10 @@
{% for item, inters in items %}
{% for inter in inters %}
<tr class='{% cycle listview,listview_alt %}'>
- <td><a href='{% url reports_client_detail hostname=inter.client.name %}'>{{inter.client.name}}</a></td>
- <td><a href='{% url reports_client_detail_pk hostname=inter.client.name, pk=inter.pk %}'>{{inter.timestamp|date:"Y-m-d\&\n\b\s\p\;H:i"|safe}}</a></td>
+ <td><a href='{% url "reports_client_detail" hostname=inter.client.name %}'>{{inter.client.name}}</a></td>
+ <td><a href='{% url "reports_client_detail_pk" hostname=inter.client.name pk=inter.pk %}'>{{inter.timestamp|date:"Y-m-d\&\n\b\s\p\;H:i"|safe}}</a></td>
<td>{{ item.get_state_display }}</td>
- <td style='white-space: nowrap'><a href="{% url reports_item entry_type=item.class_name pk=item.pk %}">({{item.pk}}) {{item.short_list|join:","}}</a></td>
+ <td style='white-space: nowrap'><a href='{% url "reports_item" entry_type=item.class_name pk=item.pk %}'>({{item.pk}}) {{item.short_list|join:","}}</a></td>
</tr>
{% endfor %}
{% endfor %}
diff --git a/src/lib/Bcfg2/Reporting/templates/config_items/item.html b/src/lib/Bcfg2/Reporting/templates/config_items/item.html
index 259414399..b03d48045 100644
--- a/src/lib/Bcfg2/Reporting/templates/config_items/item.html
+++ b/src/lib/Bcfg2/Reporting/templates/config_items/item.html
@@ -1,6 +1,10 @@
{% extends "base.html" %}
{% load split %}
{% load syntax_coloring %}
+{% comment %}
+This is needed for Django versions less than 1.5
+{% endcomment %}
+{% load url from future %}
{% block title %}Bcfg2 - Element Details{% endblock %}
@@ -9,20 +13,20 @@
{% block extra_header_info %}
<style type="text/css">
#table_list_header {
- font-size: 100%;
+ font-size: 100%;
}
table.entry_list {
- width: auto;
+ width: auto;
}
div.information_wrapper {
- margin: 15px;
+ margin: 15px;
}
div.diff_wrapper {
- overflow: auto;
+ overflow: auto;
}
div.entry_list h3 {
- font-size: 90%;
- padding: 5px;
+ font-size: 90%;
+ padding: 5px;
}
</style>
{% endblock%}
@@ -131,9 +135,9 @@ div.entry_list h3 {
{% if associated_list %}
<table class="entry_list" cellpadding="3">
{% for inter in associated_list %}
- <tr><td><a href="{% url reports_client_detail inter.client.name %}"
+ <tr><td><a href='{% url "reports_client_detail" inter.client.name %}'
>{{inter.client.name}}</a></td>
- <td><a href="{% url reports_client_detail_pk hostname=inter.client.name,pk=inter.id %}"
+ <td><a href='{% url "reports_client_detail_pk" hostname=inter.client.name pk=inter.id %}'
>{{inter.timestamp}}</a></td>
</tr>
{% endfor %}
diff --git a/src/lib/Bcfg2/Reporting/templates/config_items/listing.html b/src/lib/Bcfg2/Reporting/templates/config_items/listing.html
index 864392754..0e4812e85 100644
--- a/src/lib/Bcfg2/Reporting/templates/config_items/listing.html
+++ b/src/lib/Bcfg2/Reporting/templates/config_items/listing.html
@@ -1,5 +1,9 @@
{% extends "base-timeview.html" %}
{% load bcfg2_tags %}
+{% comment %}
+This is needed for Django versions less than 1.5
+{% endcomment %}
+{% load url from future %}
{% block title %}Bcfg2 - Element Listing{% endblock %}
@@ -21,9 +25,9 @@
<tr style='text-align: left' ><th>Name</th><th>Count</th><th>Reason</th></tr>
{% for entry in type_data %}
<tr class='{% cycle listview,listview_alt %}'>
- <td><a href="{% url reports_entry entry.class_name entry.pk %}">{{entry.name}}</a></td>
+ <td><a href='{% url "reports_entry" entry.class_name entry.pk %}'>{{entry.name}}</a></td>
<td>{{entry.num_entries}}</td>
- <td><a href="{% url reports_item entry.class_name entry.pk %}">{{entry.short_list|join:","}}</a></td>
+ <td><a href='{% url "reports_item" entry.class_name entry.pk %}'>{{entry.short_list|join:","}}</a></td>
</tr>
{% endfor %}
</table>
diff --git a/src/lib/Bcfg2/Reporting/templates/displays/summary.html b/src/lib/Bcfg2/Reporting/templates/displays/summary.html
index b9847cf96..ffafd52e0 100644
--- a/src/lib/Bcfg2/Reporting/templates/displays/summary.html
+++ b/src/lib/Bcfg2/Reporting/templates/displays/summary.html
@@ -1,5 +1,9 @@
{% extends "base-timeview.html" %}
{% load bcfg2_tags %}
+{% comment %}
+This is needed for Django versions less than 1.5
+{% endcomment %}
+{% load url from future %}
{% block title %}Bcfg2 - Client Summary{% endblock %}
{% block pagebanner %}Clients - Summary{% endblock %}
@@ -30,7 +34,7 @@ hide_tables[{{ forloop.counter0 }}] = "table_{{ summary.name }}";
<table id='table_{{ summary.name }}' class='entry_list'>
{% for node in summary.nodes|sort_interactions_by_name %}
<tr class='{% cycle listview,listview_alt %}'>
- <td><a href="{% url reports_client_detail_pk hostname=node.client.name,pk=node.id %}">{{ node.client.name }}</a></td>
+ <td><a href='{% url "reports_client_detail_pk" hostname=node.client.name pk=node.id %}'>{{ node.client.name }}</a></td>
</tr>
{% endfor %}
</table>
diff --git a/src/lib/Bcfg2/Reporting/templates/displays/timing.html b/src/lib/Bcfg2/Reporting/templates/displays/timing.html
index ff775ded5..8ac5e49bb 100644
--- a/src/lib/Bcfg2/Reporting/templates/displays/timing.html
+++ b/src/lib/Bcfg2/Reporting/templates/displays/timing.html
@@ -1,5 +1,9 @@
{% extends "base-timeview.html" %}
{% load bcfg2_tags %}
+{% comment %}
+This is needed for Django versions less than 1.5
+{% endcomment %}
+{% load url from future %}
{% block title %}Bcfg2 - Performance Metrics{% endblock %}
{% block pagebanner %}Performance Metrics{% endblock %}
@@ -12,7 +16,7 @@
<div class='client_list_box'>
{% if metrics %}
<table cellpadding="3">
- <tr id='table_list_header' class='listview'>
+ <tr id='table_list_header' class='listview'>
<td>Name</td>
<td>Parse</td>
<td>Probe</td>
@@ -21,15 +25,15 @@
<td>Config</td>
<td>Total</td>
</tr>
- {% for metric in metrics|dictsort:"name" %}
+ {% for metric in metrics|dictsort:"name" %}
<tr class='{% cycle listview,listview_alt %}'>
<td><a style='font-size: 100%'
- href="{% url reports_client_detail hostname=metric.name %}">{{ metric.name }}</a></td>
+ href='{% url "reports_client_detail" hostname=metric.name %}'>{{ metric.name }}</a></td>
{% for mitem in metric|build_metric_list %}
<td>{{ mitem }}</td>
{% endfor %}
- </tr>
- {% endfor %}
+ </tr>
+ {% endfor %}
</table>
{% else %}
<p>No metric data available</p>
diff --git a/src/lib/Bcfg2/Reporting/templatetags/bcfg2_tags.py b/src/lib/Bcfg2/Reporting/templatetags/bcfg2_tags.py
index f5f2e7528..489682f30 100644
--- a/src/lib/Bcfg2/Reporting/templatetags/bcfg2_tags.py
+++ b/src/lib/Bcfg2/Reporting/templatetags/bcfg2_tags.py
@@ -5,9 +5,8 @@ from django import template
from django.conf import settings
from django.core.urlresolvers import resolve, reverse, \
Resolver404, NoReverseMatch
-from django.template.loader import get_template, \
- get_template_from_string,TemplateDoesNotExist
-from django.utils.encoding import smart_unicode, smart_str
+from django.template.loader import get_template_from_string
+from django.utils.encoding import smart_str
from django.utils.safestring import mark_safe
from datetime import datetime, timedelta
from Bcfg2.Reporting.utils import filter_list
@@ -133,19 +132,22 @@ def filter_navigator(context):
del myargs[filter]
filters.append((filter,
reverse(view, args=args, kwargs=myargs) + qs))
- filters.sort(lambda x, y: cmp(x[0], y[0]))
+ filters.sort(key=lambda x: x[0])
myargs = kwargs.copy()
- selected=True
+ selected = True
if 'group' in myargs:
del myargs['group']
- selected=False
- groups = [('---', reverse(view, args=args, kwargs=myargs) + qs, selected)]
+ selected = False
+ groups = [('---',
+ reverse(view, args=args, kwargs=myargs) + qs,
+ selected)]
for group in Group.objects.values('name'):
myargs['group'] = group['name']
- groups.append((group['name'], reverse(view, args=args, kwargs=myargs) + qs,
- group['name'] == kwargs.get('group', '')))
-
+ groups.append((group['name'],
+ reverse(view, args=args, kwargs=myargs) + qs,
+ group['name'] == kwargs.get('group', '')))
+
return {'filters': filters, 'groups': groups}
except (Resolver404, NoReverseMatch, ValueError, KeyError):
pass
@@ -205,7 +207,7 @@ def sort_interactions_by_name(value):
Sort an interaction list by client name
"""
inters = list(value)
- inters.sort(lambda a, b: cmp(a.client.name, b.client.name))
+ inters.sort(key=lambda a: a.client.name)
return inters
@@ -223,7 +225,7 @@ class AddUrlFilter(template.Node):
filter_value = self.filter_value.resolve(context, True)
if filter_value:
filter_name = smart_str(self.filter_name)
- filter_value = smart_unicode(filter_value)
+ filter_value = smart_str(filter_value)
kwargs[filter_name] = filter_value
# These two don't make sense
if filter_name == 'server' and 'hostname' in kwargs:
@@ -306,6 +308,7 @@ def to_media_url(parser, token):
return MediaTag(filter_value)
+
@register.filter
def determine_client_state(entry):
"""
@@ -338,10 +341,11 @@ def do_qs(parser, token):
try:
tag, name, value = token.split_contents()
except ValueError:
- raise template.TemplateSyntaxError, "%r tag requires exactly two arguments" \
- % token.contents.split()[0]
+ raise template.TemplateSyntaxError("%r tag requires exactly two arguments"
+ % token.contents.split()[0])
return QsNode(name, value)
+
class QsNode(template.Node):
def __init__(self, name, value):
self.name = template.Variable(name)
@@ -359,7 +363,7 @@ class QsNode(template.Node):
return ''
except KeyError:
if settings.TEMPLATE_DEBUG:
- raise Exception, "'qs' tag requires context['request']"
+ raise Exception("'qs' tag requires context['request']")
return ''
except:
return ''
@@ -380,6 +384,7 @@ def sort_link(parser, token):
return SortLinkNode(sort_key, text)
+
class SortLinkNode(template.Node):
__TMPL__ = "{% load bcfg2_tags %}<a href='{% qs 'sort' key %}'>{{ text }}</a>"
@@ -420,4 +425,3 @@ class SortLinkNode(template.Node):
raise
raise
return ''
-
diff --git a/src/lib/Bcfg2/Reporting/templatetags/syntax_coloring.py b/src/lib/Bcfg2/Reporting/templatetags/syntax_coloring.py
index 2712d6395..22700689f 100644
--- a/src/lib/Bcfg2/Reporting/templatetags/syntax_coloring.py
+++ b/src/lib/Bcfg2/Reporting/templatetags/syntax_coloring.py
@@ -1,11 +1,8 @@
-import sys
from django import template
-from django.utils.encoding import smart_unicode
+from django.utils.encoding import smart_str
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
-from Bcfg2.Compat import u_str
-
register = template.Library()
# pylint: disable=E0611
@@ -33,9 +30,9 @@ def syntaxhilight(value, arg="diff", autoescape=None):
if colorize:
try:
- output = u_str('<style type="text/css">') \
- + smart_unicode(HtmlFormatter().get_style_defs('.highlight')) \
- + u_str('</style>')
+ output = smart_str('<style type="text/css">') \
+ + smart_str(HtmlFormatter().get_style_defs('.highlight')) \
+ + smart_str('</style>')
lexer = get_lexer_by_name(arg)
output += highlight(value, lexer, HtmlFormatter())
@@ -43,6 +40,7 @@ def syntaxhilight(value, arg="diff", autoescape=None):
except:
return value
else:
- return mark_safe(u_str('<div class="note-box">Tip: Install pygments '
- 'for highlighting</div><pre>%s</pre>') % value)
+ return mark_safe(smart_str(
+ '<div class="note-box">Tip: Install pygments '
+ 'for highlighting</div><pre>%s</pre>') % value)
syntaxhilight.needs_autoescape = True
diff --git a/src/lib/Bcfg2/Reporting/urls.py b/src/lib/Bcfg2/Reporting/urls.py
index 8330fef7b..3a40cb932 100644
--- a/src/lib/Bcfg2/Reporting/urls.py
+++ b/src/lib/Bcfg2/Reporting/urls.py
@@ -1,4 +1,4 @@
-from django.conf.urls.defaults import *
+from Bcfg2.Reporting.Compat import url, patterns # django compat imports
from django.core.urlresolvers import reverse, NoReverseMatch
from django.http import HttpResponsePermanentRedirect
from Bcfg2.Reporting.utils import filteredUrls, paginatedUrls, timeviewUrls
diff --git a/src/lib/Bcfg2/Reporting/utils.py b/src/lib/Bcfg2/Reporting/utils.py
index 619501d8b..0d394fcd8 100755
--- a/src/lib/Bcfg2/Reporting/utils.py
+++ b/src/lib/Bcfg2/Reporting/utils.py
@@ -1,5 +1,4 @@
"""Helper functions for reports"""
-from django.conf.urls.defaults import *
import re
"""List of filters provided by filteredUrls"""
diff --git a/src/lib/Bcfg2/Reporting/views.py b/src/lib/Bcfg2/Reporting/views.py
index 6cba7bf8c..c7c2a503f 100644
--- a/src/lib/Bcfg2/Reporting/views.py
+++ b/src/lib/Bcfg2/Reporting/views.py
@@ -338,6 +338,8 @@ def client_detail(request, hostname=None, pk=None):
for label in etypes.values():
edict[label] = []
for ekind in inter.entry_types:
+ if ekind == 'failures':
+ continue
for ent in getattr(inter, ekind).all():
edict[etypes[ent.state]].append(ent)
context['entry_types'] = edict
diff --git a/src/lib/Bcfg2/SSLServer.py b/src/lib/Bcfg2/SSLServer.py
index 316c2f86c..ab7e56f33 100644
--- a/src/lib/Bcfg2/SSLServer.py
+++ b/src/lib/Bcfg2/SSLServer.py
@@ -5,7 +5,6 @@ better. """
import os
import sys
import socket
-import select
import signal
import logging
import ssl
@@ -183,7 +182,6 @@ class XMLRPCRequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
Adds support for HTTP authentication.
"""
-
logger = logging.getLogger("Bcfg2.SSLServer.XMLRPCRequestHandler")
def authenticate(self):
@@ -228,22 +226,22 @@ class XMLRPCRequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
return False
return True
- ### need to override do_POST here
def do_POST(self):
try:
max_chunk_size = 10 * 1024 * 1024
size_remaining = int(self.headers["content-length"])
L = []
while size_remaining:
- try:
- select.select([self.rfile.fileno()], [], [], 3)
- except select.error:
- print("got select timeout")
- raise
chunk_size = min(size_remaining, max_chunk_size)
- L.append(self.rfile.read(chunk_size).decode('utf-8'))
+ chunk = self.rfile.read(chunk_size).decode('utf-8')
+ if not chunk:
+ break
+ L.append(chunk)
size_remaining -= len(L[-1])
data = ''.join(L)
+ if data is None:
+ return # response has been sent
+
response = self.server._marshaled_dispatch(self.client_address,
data)
if sys.hexversion >= 0x03000000:
@@ -251,6 +249,7 @@ class XMLRPCRequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
except: # pylint: disable=W0702
try:
self.send_response(500)
+ self.send_header("Content-length", "0")
self.end_headers()
except:
(etype, msg) = sys.exc_info()[:2]
@@ -306,14 +305,11 @@ class XMLRPCRequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
def finish(self):
# shut down the connection
- if not self.wfile.closed:
- try:
- self.wfile.flush()
- self.wfile.close()
- except socket.error:
- err = sys.exc_info()[1]
- self.logger.warning("Error closing connection: %s" % err)
- self.rfile.close()
+ try:
+ SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.finish(self)
+ except socket.error:
+ err = sys.exc_info()[1]
+ self.logger.warning("Error closing connection: %s" % err)
class XMLRPCServer(SocketServer.ThreadingMixIn, SSLServer,
@@ -431,8 +427,6 @@ class XMLRPCServer(SocketServer.ThreadingMixIn, SSLServer,
self.handle_request()
except socket.timeout:
pass
- except select.error:
- pass
except:
self.logger.error("Got unexpected error in handle_request",
exc_info=1)
diff --git a/src/lib/Bcfg2/Server/Admin/Client.py b/src/lib/Bcfg2/Server/Admin/Client.py
index 570e993ed..325b7ae6e 100644
--- a/src/lib/Bcfg2/Server/Admin/Client.py
+++ b/src/lib/Bcfg2/Server/Admin/Client.py
@@ -5,9 +5,22 @@ import Bcfg2.Server.Admin
from Bcfg2.Server.Plugin import MetadataConsistencyError
+def get_attribs(args):
+ """ Get a list of attributes to set on a client when adding/updating it """
+ attr_d = {}
+ for i in args[2:]:
+ attr, val = i.split('=', 1)
+ if attr not in ['profile', 'uuid', 'password', 'floating', 'secure',
+ 'address', 'auth']:
+ print("Attribute %s unknown" % attr)
+ raise SystemExit(1)
+ attr_d[attr] = val
+ return attr_d
+
+
class Client(Bcfg2.Server.Admin.MetadataCore):
""" Create, delete, or list client entries """
- __usage__ = "[options] [add|del|list] [attr=val]"
+ __usage__ = "[options] [add|del|update|list] [attr=val]"
__plugin_whitelist__ = ["Metadata"]
def __call__(self, args):
@@ -16,21 +29,22 @@ class Client(Bcfg2.Server.Admin.MetadataCore):
"Usage: %s" % self.__usage__)
if args[0] == 'add':
try:
- self.metadata.add_client(args[1])
+ self.metadata.add_client(args[1], get_attribs(args))
except MetadataConsistencyError:
- err = sys.exc_info()[1]
- print("Error in adding client: %s" % err)
- raise SystemExit(1)
+ self.errExit("Error adding client: %s" % sys.exc_info()[1])
+ elif args[0] in ['update', 'up']:
+ try:
+ self.metadata.update_client(args[1], get_attribs(args))
+ except MetadataConsistencyError:
+ self.errExit("Error updating client: %s" % sys.exc_info()[1])
elif args[0] in ['delete', 'remove', 'del', 'rm']:
try:
self.metadata.remove_client(args[1])
except MetadataConsistencyError:
- err = sys.exc_info()[1]
- print("Error in deleting client: %s" % err)
- raise SystemExit(1)
+ self.errExit("Error deleting client: %s" %
+ sys.exc_info()[1])
elif args[0] in ['list', 'ls']:
for client in self.metadata.list_clients():
print(client)
else:
- print("No command specified")
- raise SystemExit(1)
+ self.errExit("No command specified")
diff --git a/src/lib/Bcfg2/Server/Admin/Compare.py b/src/lib/Bcfg2/Server/Admin/Compare.py
index c56dd0a8f..d7285284a 100644
--- a/src/lib/Bcfg2/Server/Admin/Compare.py
+++ b/src/lib/Bcfg2/Server/Admin/Compare.py
@@ -115,7 +115,6 @@ class Compare(Bcfg2.Server.Admin.Mode):
return identical
def __call__(self, args):
- Bcfg2.Server.Admin.Mode.__call__(self, args)
if len(args) == 0:
self.errExit("No argument specified.\n"
"Please see bcfg2-admin compare help for usage.")
@@ -145,5 +144,4 @@ class Compare(Bcfg2.Server.Admin.Mode):
(old, new) = args
return self.compareSpecifications(new, old)
except IndexError:
- print(self.__call__.__doc__)
- raise SystemExit(1)
+ self.errExit(self.__call__.__doc__)
diff --git a/src/lib/Bcfg2/Server/Admin/Init.py b/src/lib/Bcfg2/Server/Admin/Init.py
index 6175d8ed0..153d7bea6 100644
--- a/src/lib/Bcfg2/Server/Admin/Init.py
+++ b/src/lib/Bcfg2/Server/Admin/Init.py
@@ -20,6 +20,8 @@ from Bcfg2.Compat import input # pylint: disable=W0622
CONFIG = '''[server]
repository = %s
plugins = %s
+# Uncomment the following to listen on all interfaces
+#listen_all = true
[statistics]
sendmailpath = %s
@@ -31,7 +33,7 @@ sendmailpath = %s
# 'postgresql', 'mysql', 'mysql_old', 'sqlite3' or 'ado_mssql'.
#name =
# Or path to database file if using sqlite3.
-#<repository>/bcfg2.sqlite is default path if left empty
+#<repository>/etc/bcfg2.sqlite is default path if left empty
#user =
# Not used with sqlite3.
#password =
@@ -78,7 +80,7 @@ CLIENTS = '''<Clients version="3.0">
'''
# Mapping of operating system names to groups
-OS_LIST = [('Red Hat/Fedora/RHEL/RHAS/Centos', 'redhat'),
+OS_LIST = [('Red Hat/Fedora/RHEL/RHAS/CentOS', 'redhat'),
('SUSE/SLES', 'suse'),
('Mandrake', 'mandrake'),
('Debian', 'debian'),
@@ -234,8 +236,9 @@ class Init(Bcfg2.Server.Admin.Mode):
def _prompt_server(self):
"""Ask for the server name."""
- newserver = safe_input("Input the server location [%s]: " %
- self.data['server_uri'])
+ newserver = safe_input(
+ "Input the server location (the server listens on a single "
+ "interface by default) [%s]: " % self.data['server_uri'])
if newserver != '':
self.data['server_uri'] = newserver
diff --git a/src/lib/Bcfg2/Server/Admin/Minestruct.py b/src/lib/Bcfg2/Server/Admin/Minestruct.py
index 93e42305c..37ca74894 100644
--- a/src/lib/Bcfg2/Server/Admin/Minestruct.py
+++ b/src/lib/Bcfg2/Server/Admin/Minestruct.py
@@ -20,9 +20,8 @@ class Minestruct(Bcfg2.Server.Admin.StructureMode):
"Please see bcfg2-admin minestruct help for usage.")
try:
(opts, args) = getopt.getopt(args, 'f:g:h')
- except:
- self.log.error(self.__doc__)
- raise SystemExit(1)
+ except getopt.GetoptError:
+ self.errExit(self.__doc__)
client = args[0]
output = sys.stdout
@@ -33,8 +32,7 @@ class Minestruct(Bcfg2.Server.Admin.StructureMode):
try:
output = open(optarg, 'w')
except IOError:
- self.log.error("Failed to open file: %s" % (optarg))
- raise SystemExit(1)
+ self.errExit("Failed to open file: %s" % (optarg))
elif opt == '-g':
groups = optarg.split(':')
@@ -43,10 +41,9 @@ class Minestruct(Bcfg2.Server.Admin.StructureMode):
for source in self.bcore.plugins_by_type(PullSource):
for item in source.GetExtra(client):
extra.add(item)
- except:
- self.log.error("Failed to find extra entry info for client %s" %
- client)
- raise SystemExit(1)
+ except: # pylint: disable=W0702
+ self.errExit("Failed to find extra entry info for client %s" %
+ client)
root = lxml.etree.Element("Base")
self.log.info("Found %d extra entries" % (len(extra)))
add_point = root
diff --git a/src/lib/Bcfg2/Server/Admin/Pull.py b/src/lib/Bcfg2/Server/Admin/Pull.py
index 8001425df..459fcec65 100644
--- a/src/lib/Bcfg2/Server/Admin/Pull.py
+++ b/src/lib/Bcfg2/Server/Admin/Pull.py
@@ -32,9 +32,8 @@ class Pull(Bcfg2.Server.Admin.MetadataCore):
use_stdin = False
try:
opts, gargs = getopt.getopt(args, 'vfIs')
- except:
- print(self.__doc__)
- raise SystemExit(1)
+ except getopt.GetoptError:
+ self.errExit(self.__doc__)
for opt in opts:
if opt[0] == '-v':
self.log = True
diff --git a/src/lib/Bcfg2/Server/Admin/Reports.py b/src/lib/Bcfg2/Server/Admin/Reports.py
index 6e313e84b..eb97123f7 100644
--- a/src/lib/Bcfg2/Server/Admin/Reports.py
+++ b/src/lib/Bcfg2/Server/Admin/Reports.py
@@ -19,11 +19,11 @@ sys.path.pop()
# Set DJANGO_SETTINGS_MODULE appropriately.
os.environ['DJANGO_SETTINGS_MODULE'] = '%s.settings' % project_name
-from django.db import transaction
from Bcfg2.Reporting.models import Client, Interaction, \
Performance, Bundle, Group, FailureEntry, PathEntry, \
PackageEntry, ServiceEntry, ActionEntry
+from Bcfg2.Reporting.Compat import transaction
def printStats(fn):
@@ -79,8 +79,7 @@ class Reports(Bcfg2.Server.Admin.Mode):
def __call__(self, args):
if len(args) == 0 or args[0] == '-h':
- print(self.__usage__)
- raise SystemExit(0)
+ self.errExit(self.__usage__)
# FIXME - dry run
@@ -101,9 +100,7 @@ class Reports(Bcfg2.Server.Admin.Mode):
management.call_command("syncdb", verbosity=vrb)
management.call_command("migrate", verbosity=vrb)
except:
- print("Update failed: %s" %
- traceback.format_exc().splitlines()[-1])
- raise SystemExit(1)
+ self.errExit("Update failed: %s" % sys.exc_info()[1])
elif args[0] == 'purge':
expired = False
client = None
@@ -124,24 +121,22 @@ class Reports(Bcfg2.Server.Admin.Mode):
maxdate = datetime.datetime.now() - \
datetime.timedelta(days=int(args[i + 1]))
except:
- self.log.error("Invalid number of days: %s" %
- args[i + 1])
- raise SystemExit(-1)
+ self.errExit("Invalid number of days: %s" %
+ args[i + 1])
i = i + 1
elif args[i] == '--expired':
expired = True
i = i + 1
if expired:
if state:
- self.log.error("--state is not valid with --expired")
- raise SystemExit(-1)
+ self.errExit("--state is not valid with --expired")
self.purge_expired(maxdate)
else:
self.purge(client, maxdate, state)
else:
- print("Unknown command: %s" % args[0])
+ self.errExit("Unknown command: %s" % args[0])
- @transaction.commit_on_success
+ @transaction.atomic
def scrub(self):
''' Perform a thorough scrub and cleanup of the database '''
@@ -155,8 +150,7 @@ class Reports(Bcfg2.Server.Admin.Mode):
(start_count - cls.objects.count(), cls.__class__.__name__))
except:
print("Failed to prune %s: %s" %
- (cls.__class__.__name__,
- traceback.format_exc().splitlines()[-1]))
+ (cls.__class__.__name__, sys.exc_info()[1]))
def django_command_proxy(self, command):
'''Call a django command'''
@@ -180,8 +174,7 @@ class Reports(Bcfg2.Server.Admin.Mode):
cobj = Client.objects.get(name=client)
ipurge = ipurge.filter(client=cobj)
except Client.DoesNotExist:
- self.log.error("Client %s not in database" % client)
- raise SystemExit(-1)
+ self.errExit("Client %s not in database" % client)
self.log.debug("Filtering by client: %s" % client)
if maxdate:
diff --git a/src/lib/Bcfg2/Server/Admin/Snapshots.py b/src/lib/Bcfg2/Server/Admin/Snapshots.py
index c2d279391..fcb240352 100644
--- a/src/lib/Bcfg2/Server/Admin/Snapshots.py
+++ b/src/lib/Bcfg2/Server/Admin/Snapshots.py
@@ -27,7 +27,6 @@ class Snapshots(Bcfg2.Server.Admin.Mode):
self.cfile = self.configfile
def __call__(self, args):
- Bcfg2.Server.Admin.Mode.__call__(self, args)
if len(args) == 0 or args[0] == '-h':
print(self.__usage__)
raise SystemExit(0)
diff --git a/src/lib/Bcfg2/Server/Admin/Syncdb.py b/src/lib/Bcfg2/Server/Admin/Syncdb.py
index 4ba840b86..eb417966d 100644
--- a/src/lib/Bcfg2/Server/Admin/Syncdb.py
+++ b/src/lib/Bcfg2/Server/Admin/Syncdb.py
@@ -3,6 +3,7 @@ import Bcfg2.settings
import Bcfg2.Options
import Bcfg2.Server.Admin
import Bcfg2.Server.models
+from django.core.exceptions import ImproperlyConfigured
from django.core.management import setup_environ, call_command
@@ -22,10 +23,7 @@ class Syncdb(Bcfg2.Server.Admin.Mode):
call_command("syncdb", interactive=False, verbosity=0)
self._database_available = True
except ImproperlyConfigured:
- err = sys.exc_info()[1]
- self.log.error("Django configuration problem: %s" % err)
- raise SystemExit(1)
+ self.errExit("Django configuration problem: %s" %
+ sys.exc_info()[1])
except:
- err = sys.exc_info()[1]
- self.log.error("Database update failed: %s" % err)
- raise SystemExit(1)
+ self.errExit("Database update failed: %s" % sys.exc_info()[1])
diff --git a/src/lib/Bcfg2/Server/Admin/Viz.py b/src/lib/Bcfg2/Server/Admin/Viz.py
index 1d9d25f16..2cbd7eaf6 100644
--- a/src/lib/Bcfg2/Server/Admin/Viz.py
+++ b/src/lib/Bcfg2/Server/Admin/Viz.py
@@ -102,6 +102,7 @@ class Viz(Bcfg2.Server.Admin.MetadataCore):
dotpipe.stdin.write('\tcolor="lightblue";\n')
dotpipe.stdin.write('\tBundle [ shape="septagon" ];\n')
dotpipe.stdin.write('\tGroup [shape="ellipse"];\n')
+ dotpipe.stdin.write('\tGroup Category [shape="trapezium"];\n')
dotpipe.stdin.write('\tProfile [style="bold", shape="ellipse"];\n')
dotpipe.stdin.write('\tHblock [label="Host1|Host2|Host3", '
'shape="record"];\n')
diff --git a/src/lib/Bcfg2/Server/Admin/Xcmd.py b/src/lib/Bcfg2/Server/Admin/Xcmd.py
index be556bed4..036129a1b 100644
--- a/src/lib/Bcfg2/Server/Admin/Xcmd.py
+++ b/src/lib/Bcfg2/Server/Admin/Xcmd.py
@@ -4,7 +4,6 @@ import sys
import Bcfg2.Options
import Bcfg2.Proxy
import Bcfg2.Server.Admin
-from Bcfg2.Compat import xmlrpclib
class Xcmd(Bcfg2.Server.Admin.Mode):
@@ -31,27 +30,15 @@ class Xcmd(Bcfg2.Server.Admin.Mode):
ca=setup['ca'],
timeout=setup['timeout'])
if len(setup['args']) == 0:
- print("Usage: xcmd <xmlrpc method> <optional arguments>")
- return
+ self.errExit("Usage: xcmd <xmlrpc method> <optional arguments>")
cmd = setup['args'][0]
args = ()
if len(setup['args']) > 1:
args = tuple(setup['args'][1:])
try:
data = getattr(proxy, cmd)(*args)
- except xmlrpclib.Fault:
- flt = sys.exc_info()[1]
- if flt.faultCode == 7:
- print("Unknown method %s" % cmd)
- return
- elif flt.faultCode == 20:
- return
- else:
- raise
except Bcfg2.Proxy.ProxyError:
- err = sys.exc_info()[1]
- print("Proxy Error: %s" % err)
- return
+ self.errExit("Proxy Error: %s" % sys.exc_info()[1])
if data is not None:
print(data)
diff --git a/src/lib/Bcfg2/Server/BuiltinCore.py b/src/lib/Bcfg2/Server/BuiltinCore.py
index e69a92b64..93da767c7 100644
--- a/src/lib/Bcfg2/Server/BuiltinCore.py
+++ b/src/lib/Bcfg2/Server/BuiltinCore.py
@@ -31,7 +31,8 @@ class Core(BaseCore):
daemon_args = dict(uid=self.setup['daemon_uid'],
gid=self.setup['daemon_gid'],
- umask=int(self.setup['umask'], 8))
+ umask=int(self.setup['umask'], 8),
+ detach_process=True)
if self.setup['daemon']:
daemon_args['pidfile'] = TimeoutPIDLockFile(self.setup['daemon'],
acquire_timeout=5)
@@ -109,7 +110,6 @@ class Core(BaseCore):
keyfile=self.setup['key'],
certfile=self.setup['cert'],
register=False,
- timeout=1,
ca=self.setup['ca'],
protocol=self.setup['protocol'])
except: # pylint: disable=W0702
diff --git a/src/lib/Bcfg2/Server/Core.py b/src/lib/Bcfg2/Server/Core.py
index c246860c1..c2cf6b7a4 100644
--- a/src/lib/Bcfg2/Server/Core.py
+++ b/src/lib/Bcfg2/Server/Core.py
@@ -2,14 +2,14 @@
implementations inherit from. """
import os
-import sys
-import time
+import pwd
import atexit
-import select
-import signal
import logging
-import inspect
+import select
+import sys
import threading
+import time
+import inspect
import lxml.etree
import Bcfg2.settings
import Bcfg2.Server
@@ -200,6 +200,10 @@ class BaseCore(object):
# load plugins
Bcfg2.settings.read_config(repo=self.datastore)
+ # mapping of group name => plugin name to record where groups
+ # that are created by Connector plugins came from
+ self._dynamic_groups = dict()
+
#: Whether or not it's possible to use the Django database
#: backend for plugins that have that capability
self._database_available = False
@@ -224,11 +228,11 @@ class BaseCore(object):
verbosity=0)
self._database_available = True
except ImproperlyConfigured:
- err = sys.exc_info()[1]
- self.logger.error("Django configuration problem: %s" % err)
+ self.logger.error("Django configuration problem: %s" %
+ sys.exc_info()[1])
except:
- err = sys.exc_info()[1]
- self.logger.error("Database update failed: %s" % err)
+ self.logger.error("Database update failed: %s" %
+ sys.exc_info()[1])
if do_chown and self._database_available:
try:
@@ -243,14 +247,6 @@ class BaseCore(object):
#: The CA that signed the server cert
self.ca = setup['ca']
- def hdlr(sig, frame): # pylint: disable=W0613
- """ Handle SIGINT/Ctrl-C by shutting down the core and exiting
- properly. """
- self.shutdown()
- os._exit(1) # pylint: disable=W0212
-
- signal.signal(signal.SIGINT, hdlr)
-
#: The FAM :class:`threading.Thread`,
#: :func:`_file_monitor_thread`
self.fam_thread = \
@@ -271,6 +267,20 @@ class BaseCore(object):
#: metadata
self.metadata_cache = Cache()
+ def expire_caches_by_type(self, base_cls, key=None):
+ """ Expire caches for all
+ :class:`Bcfg2.Server.Plugin.interfaces.Caching` plugins that
+ are instances of ``base_cls``.
+
+ :param base_cls: The base plugin interface class to match (see
+ :mod:`Bcfg2.Server.Plugin.interfaces`)
+ :type base_cls: type
+ :param key: The cache key to expire
+ """
+ for plugin in self.plugins_by_type(base_cls):
+ if isinstance(plugin, Bcfg2.Server.Plugin.Caching):
+ plugin.expire_cache(key)
+
def plugins_by_type(self, base_cls):
""" Return a list of loaded plugins that match the passed type.
@@ -297,11 +307,12 @@ class BaseCore(object):
self.logger.debug("Performance logging thread starting")
while not self.terminate.isSet():
self.terminate.wait(self.setup['perflog_interval'])
- for name, stats in self.get_statistics(None).items():
- self.logger.info("Performance statistics: "
- "%s min=%.06f, max=%.06f, average=%.06f, "
- "count=%d" % ((name, ) + stats))
- self.logger.debug("Performance logging thread terminated")
+ if not self.terminate.isSet():
+ for name, stats in self.get_statistics(None).items():
+ self.logger.info("Performance statistics: "
+ "%s min=%.06f, max=%.06f, average=%.06f, "
+ "count=%d" % ((name, ) + stats))
+ self.logger.info("Performance logging thread terminated")
def _file_monitor_thread(self):
""" The thread that runs the
@@ -318,11 +329,12 @@ class BaseCore(object):
else:
if not self.fam.pending():
terminate.wait(15)
+ if self.fam.pending():
+ self._update_vcs_revision()
self.fam.handle_event_set(self.lock)
except:
continue
- self._update_vcs_revision()
- self.logger.debug("File monitor thread terminated")
+ self.logger.info("File monitor thread terminated")
@track_statistics()
def _update_vcs_revision(self):
@@ -438,14 +450,14 @@ class BaseCore(object):
def shutdown(self):
""" Perform plugin and FAM shutdown tasks. """
- self.logger.debug("Shutting down core...")
+ self.logger.info("Shutting down core...")
if not self.terminate.isSet():
self.terminate.set()
self.fam.shutdown()
- self.logger.debug("FAM shut down")
+ self.logger.info("FAM shut down")
for plugin in list(self.plugins.values()):
plugin.shutdown()
- self.logger.debug("All plugins shut down")
+ self.logger.info("All plugins shut down")
@property
def metadata_cache_mode(self):
@@ -636,10 +648,10 @@ class BaseCore(object):
del entry.attrib['realname']
return ret
except:
- entry.set('name', oldname)
self.logger.error("Failed binding entry %s:%s with altsrc %s" %
- (entry.tag, entry.get('name'),
- entry.get('altsrc')))
+ (entry.tag, entry.get('realname'),
+ entry.get('name')))
+ entry.set('name', oldname)
self.logger.error("Falling back to %s:%s" %
(entry.tag, entry.get('name')))
@@ -734,7 +746,27 @@ class BaseCore(object):
if event.code2str() == 'deleted':
return
self.setup.reparse()
- self.metadata_cache.expire()
+ self.expire_caches_by_type(Bcfg2.Server.Plugin.Metadata)
+
+ def block_for_fam_events(self, handle_events=False):
+ """ Block until all fam events have been handleed, optionally
+ handling events as well. (Setting ``handle_events=True`` is
+ useful for local server cores that don't spawn an event
+ handling thread.)"""
+ slept = 0
+ log_interval = 3
+ if handle_events:
+ self.fam.handle_events_in_interval(1)
+ slept += 1
+ if self.setup['fam_blocking']:
+ time.sleep(1)
+ slept += 1
+ while self.fam.pending() != 0:
+ time.sleep(1)
+ slept += 1
+ if slept % log_interval == 0:
+ self.logger.debug("Sleeping to handle FAM events...")
+ self.logger.debug("Slept %s seconds while handling FAM events" % slept)
def run(self):
""" Run the server core. This calls :func:`_daemonize`,
@@ -758,6 +790,11 @@ class BaseCore(object):
os.chmod(piddir, 493) # 0775
if not self._daemonize():
return False
+
+ # rewrite $HOME. pulp stores its auth creds in ~/.pulp, so
+ # this is necessary to make that work when privileges are
+ # dropped
+ os.environ['HOME'] = pwd.getpwuid(self.setup['daemon_uid'])[5]
else:
os.umask(int(self.setup['umask'], 8))
@@ -780,12 +817,9 @@ class BaseCore(object):
self.shutdown()
raise
- if self.setup['fam_blocking']:
- time.sleep(1)
- while self.fam.pending() != 0:
- time.sleep(1)
-
- self.set_debug(None, self.debug_flag)
+ if self.debug_flag:
+ self.set_debug(None, self.debug_flag)
+ self.block_for_fam_events()
self._block()
def _daemonize(self):
@@ -844,17 +878,52 @@ class BaseCore(object):
imd = self.metadata_cache.get(client_name, None)
if not imd:
self.logger.debug("Building metadata for %s" % client_name)
- imd = self.metadata.get_initial_metadata(client_name)
+ try:
+ imd = self.metadata.get_initial_metadata(client_name)
+ except MetadataConsistencyError:
+ self.critical_error(
+ "Client metadata resolution error for %s: %s" %
+ (client_name, sys.exc_info()[1]))
connectors = self.plugins_by_type(Connector)
for conn in connectors:
- grps = conn.get_additional_groups(imd)
- self.metadata.merge_additional_groups(imd, grps)
+ groups = conn.get_additional_groups(imd)
+ groupnames = []
+ for group in groups:
+ if hasattr(group, "name"):
+ groupname = group.name
+ if groupname in self._dynamic_groups:
+ if self._dynamic_groups[groupname] == conn.name:
+ self.metadata.groups[groupname] = group
+ else:
+ self.logger.warning(
+ "Refusing to clobber dynamic group %s "
+ "defined by %s" %
+ (self._dynamic_groups[groupname],
+ groupname))
+ elif groupname in self.metadata.groups:
+ # not recorded as a dynamic group, but
+ # present in metadata.groups -- i.e., a
+ # static group
+ self.logger.warning(
+ "Refusing to clobber predefined group %s" %
+ groupname)
+ else:
+ self.metadata.groups[groupname] = group
+ self._dynamic_groups[groupname] = conn.name
+ groupnames.append(groupname)
+ else:
+ groupnames.append(group)
+
+ self.metadata.merge_additional_groups(imd, groupnames)
for conn in connectors:
data = conn.get_additional_data(imd)
self.metadata.merge_additional_data(imd, conn.name, data)
imd.query.by_name = self.build_metadata
if self.metadata_cache_mode in ['cautious', 'aggressive']:
self.metadata_cache[client_name] = imd
+ else:
+ self.logger.debug("Using cached metadata object for %s" %
+ client_name)
return imd
def process_statistics(self, client_name, statistics):
@@ -882,6 +951,7 @@ class BaseCore(object):
state.get('state')))
self.client_run_hook("end_statistics", meta)
+ @track_statistics()
def resolve_client(self, address, cleanup_cache=False, metadata=True):
""" Given a client address, get the client hostname and
optionally metadata.
@@ -934,15 +1004,19 @@ class BaseCore(object):
raise xmlrpclib.Fault(xmlrpclib.APPLICATION_ERROR,
"Critical failure: %s" % message)
+ def _get_rmi_objects(self):
+ """ Get a dict (name: object) of all objects that may have RMI
+ calls. Currently, that includes all plugins and the FAM. """
+ rv = {self.fam.__class__.__name__: self.fam}
+ rv.update(self.plugins)
+ return rv
+
def _get_rmi(self):
""" Get a list of RMI calls exposed by plugins """
rmi = dict()
- for pname, pinst in list(self.plugins.items()):
+ for pname, pinst in self._get_rmi_objects().items():
for mname in pinst.__rmi__:
rmi["%s.%s" % (pname, mname)] = getattr(pinst, mname)
- famname = self.fam.__class__.__name__
- for mname in self.fam.__rmi__:
- rmi["%s.%s" % (famname, mname)] = getattr(self.fam, mname)
return rmi
def _resolve_exposed_method(self, method_name):
@@ -1033,6 +1107,7 @@ class BaseCore(object):
for plugin in self.plugins_by_type(Probing):
for probe in plugin.GetProbes(metadata):
resp.append(probe)
+ self.logger.debug("Sending probe list to %s" % client)
return lxml.etree.tostring(resp,
xml_declaration=False).decode('UTF-8')
except:
@@ -1058,7 +1133,7 @@ class BaseCore(object):
# that's created for RecvProbeData doesn't get cached.
# I.e., the next metadata object that's built, after probe
# data is processed, is cached.
- self.metadata_cache.expire(client)
+ self.expire_caches_by_type(Bcfg2.Server.Plugin.Metadata)
try:
xpdata = lxml.etree.XML(probedata.encode('utf-8'),
parser=Bcfg2.Server.XMLParser)
@@ -1255,9 +1330,14 @@ class BaseCore(object):
self.logger.info("Core: debug = %s" % debug)
levels = self._loglevels[self.debug_flag]
for handler in logging.root.handlers:
- level = levels.get(handler.name, levels['default'])
- self.logger.debug("Setting %s log handler to %s" %
- (handler.name, logging.getLevelName(level)))
+ try:
+ level = levels.get(handler.name, levels['default'])
+ self.logger.debug("Setting %s log handler to %s" %
+ (handler.name, logging.getLevelName(level)))
+ except AttributeError:
+ level = levels['default']
+ self.logger.debug("Setting unknown log handler %s to %s" %
+ (handler, logging.getLevelName(level)))
handler.setLevel(level)
return self.debug_flag
diff --git a/src/lib/Bcfg2/Server/FileMonitor/__init__.py b/src/lib/Bcfg2/Server/FileMonitor/__init__.py
index e430e3160..52c3906fa 100644
--- a/src/lib/Bcfg2/Server/FileMonitor/__init__.py
+++ b/src/lib/Bcfg2/Server/FileMonitor/__init__.py
@@ -237,7 +237,8 @@ class FileMonitor(Debuggable):
except: # pylint: disable=W0702
err = sys.exc_info()[1]
LOGGER.error("Error in handling of event %s for %s: %s" %
- (event.code2str(), event.filename, err))
+ (event.code2str(), event.filename, err),
+ exc_info=1)
def handle_event_set(self, lock=None):
""" Handle all pending events.
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/urls.py b/src/lib/Bcfg2/Server/Hostbase/hostbase/urls.py
index 0ee204abe..a03d2c919 100644
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/urls.py
+++ b/src/lib/Bcfg2/Server/Hostbase/hostbase/urls.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-from django.conf.urls.defaults import *
+from Bcfg2.Reporting.Compat.django_urls import *
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.views.generic.create_update import create_object, update_object, delete_object
diff --git a/src/lib/Bcfg2/Server/Hostbase/urls.py b/src/lib/Bcfg2/Server/Hostbase/urls.py
index 01fe97d4f..4a0c33f98 100644
--- a/src/lib/Bcfg2/Server/Hostbase/urls.py
+++ b/src/lib/Bcfg2/Server/Hostbase/urls.py
@@ -1,4 +1,4 @@
-from django.conf.urls.defaults import *
+from Bcfg2.Reporting.Compat.django_urls import *
from django.conf import settings
from django.views.generic.simple import direct_to_template
from django.contrib import admin
diff --git a/src/lib/Bcfg2/Server/Lint/Comments.py b/src/lib/Bcfg2/Server/Lint/Comments.py
index 7c3b2d9cc..f028e225e 100644
--- a/src/lib/Bcfg2/Server/Lint/Comments.py
+++ b/src/lib/Bcfg2/Server/Lint/Comments.py
@@ -143,10 +143,11 @@ class Comments(Bcfg2.Server.Lint.ServerPlugin):
self.check_xml(os.path.join(self.metadata.data, "groups.xml"),
self.metadata.groups_xml.data,
"metadata")
- if self.has_all_xincludes("clients.xml"):
- self.check_xml(os.path.join(self.metadata.data, "clients.xml"),
- self.metadata.clients_xml.data,
- "metadata")
+ if hasattr(self.metadata, "clients_xml"):
+ if self.has_all_xincludes("clients.xml"):
+ self.check_xml(os.path.join(self.metadata.data, "clients.xml"),
+ self.metadata.clients_xml.data,
+ "metadata")
def check_cfg(self):
""" Check Cfg files and ``info.xml`` files for required
diff --git a/src/lib/Bcfg2/Server/Lint/Genshi.py b/src/lib/Bcfg2/Server/Lint/Genshi.py
index 7edeb8a49..1ecb6da42 100755
--- a/src/lib/Bcfg2/Server/Lint/Genshi.py
+++ b/src/lib/Bcfg2/Server/Lint/Genshi.py
@@ -37,6 +37,12 @@ class Genshi(Bcfg2.Server.Lint.ServerPlugin):
err = sys.exc_info()[1]
self.LintError("genshi-syntax-error",
"Genshi syntax error: %s" % err)
+ except:
+ etype, err = sys.exc_info()[:2]
+ self.LintError(
+ "genshi-syntax-error",
+ "Unexpected Genshi error on %s: %s: %s" %
+ (entry.name, etype.__name__, err))
def check_tgenshi(self):
""" Check templates in TGenshi for syntax errors. """
diff --git a/src/lib/Bcfg2/Server/Lint/RequiredAttrs.py b/src/lib/Bcfg2/Server/Lint/RequiredAttrs.py
index 6ffdd33a0..e49779a10 100644
--- a/src/lib/Bcfg2/Server/Lint/RequiredAttrs.py
+++ b/src/lib/Bcfg2/Server/Lint/RequiredAttrs.py
@@ -76,7 +76,7 @@ class RequiredAttrs(Bcfg2.Server.Lint.ServerPlugin):
permissions=dict(name=is_filename, owner=is_username,
group=is_username, mode=is_octal_mode),
vcs=dict(vcstype=lambda v: (v != 'Path' and
- hasattr(Bcfg2.Client.Tools.VCS,
+ hasattr(Bcfg2.Client.Tools.VCS.VCS,
"Install%s" % v)),
revision=None, sourceurl=None)),
Service={"__any__": dict(name=None),
diff --git a/src/lib/Bcfg2/Server/Lint/TemplateAbuse.py b/src/lib/Bcfg2/Server/Lint/TemplateAbuse.py
new file mode 100644
index 000000000..fca9d14a9
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Lint/TemplateAbuse.py
@@ -0,0 +1,75 @@
+""" Check for templated scripts or executables. """
+
+import os
+import stat
+import Bcfg2.Server.Lint
+from Bcfg2.Compat import any # pylint: disable=W0622
+from Bcfg2.Server.Plugin import DEFAULT_FILE_METADATA
+from Bcfg2.Server.Plugins.Cfg.CfgInfoXML import CfgInfoXML
+from Bcfg2.Server.Plugins.Cfg.CfgGenshiGenerator import CfgGenshiGenerator
+from Bcfg2.Server.Plugins.Cfg.CfgCheetahGenerator import CfgCheetahGenerator
+from Bcfg2.Server.Plugins.Cfg.CfgEncryptedGenshiGenerator import \
+ CfgEncryptedGenshiGenerator
+from Bcfg2.Server.Plugins.Cfg.CfgEncryptedCheetahGenerator import \
+ CfgEncryptedCheetahGenerator
+
+
+class TemplateAbuse(Bcfg2.Server.Lint.ServerPlugin):
+ """ Check for templated scripts or executables. """
+ templates = [CfgGenshiGenerator, CfgCheetahGenerator,
+ CfgEncryptedGenshiGenerator, CfgEncryptedCheetahGenerator]
+ extensions = [".pl", ".py", ".sh", ".rb"]
+
+ def Run(self):
+ if 'Cfg' in self.core.plugins:
+ for entryset in self.core.plugins['Cfg'].entries.values():
+ for entry in entryset.entries.values():
+ if (self.HandlesFile(entry.name) and
+ any(isinstance(entry, t) for t in self.templates)):
+ self.check_template(entryset, entry)
+
+ @classmethod
+ def Errors(cls):
+ return {"templated-script": "warning",
+ "templated-executable": "warning"}
+
+ def check_template(self, entryset, entry):
+ """ Check a template to see if it's a script or an executable. """
+ # first, check for a known script extension
+ ext = os.path.splitext(entryset.path)[1]
+ if ext in self.extensions:
+ self.LintError("templated-script",
+ "Templated script found: %s\n"
+ "File has a known script extension: %s\n"
+ "Template a config file for the script instead" %
+ (entry.name, ext))
+ return
+
+ # next, check for a shebang line
+ firstline = open(entry.name).readline()
+ if firstline.startswith("#!"):
+ self.LintError("templated-script",
+ "Templated script found: %s\n"
+ "File starts with a shebang: %s\n"
+ "Template a config file for the script instead" %
+ (entry.name, firstline))
+ return
+
+ # finally, check for executable permissions in info.xml
+ for entry in entryset.entries.values():
+ if isinstance(entry, CfgInfoXML):
+ for pinfo in entry.infoxml.pnode.data.xpath("//FileInfo"):
+ try:
+ mode = int(pinfo.get("mode",
+ DEFAULT_FILE_METADATA['mode']), 8)
+ except ValueError:
+ # LintError will be produced by RequiredAttrs plugin
+ self.logger.warning("Non-octal mode: %s" % mode)
+ continue
+ if mode & stat.S_IXUSR != 0:
+ self.LintError(
+ "templated-executable",
+ "Templated executable found: %s\n"
+ "Template a config file for the executable instead"
+ % entry.name)
+ return
diff --git a/src/lib/Bcfg2/Server/Lint/Validate.py b/src/lib/Bcfg2/Server/Lint/Validate.py
index 09f3f3d25..3efcc890d 100644
--- a/src/lib/Bcfg2/Server/Lint/Validate.py
+++ b/src/lib/Bcfg2/Server/Lint/Validate.py
@@ -47,6 +47,7 @@ class Validate(Bcfg2.Server.Lint.ServerlessPlugin):
"Decisions/*.xml": "decisions.xsd",
"Packages/sources.xml": "packages.xsd",
"GroupPatterns/config.xml": "grouppatterns.xsd",
+ "AWSTags/config.xml": "awstags.xsd",
"NagiosGen/config.xml": "nagiosgen.xsd",
"FileProbes/config.xml": "fileprobes.xsd",
"SSLCA/**/cert.xml": "sslca-cert.xsd",
@@ -83,6 +84,7 @@ class Validate(Bcfg2.Server.Lint.ServerlessPlugin):
"xml-failed-to-parse": "error",
"xml-failed-to-read": "error",
"xml-failed-to-verify": "error",
+ "xinclude-does-not-exist": "error",
"input-output-error": "error"}
def check_properties(self):
@@ -106,9 +108,17 @@ class Validate(Bcfg2.Server.Lint.ServerlessPlugin):
:type filename: string
:returns: lxml.etree._ElementTree - the parsed data"""
try:
- return lxml.etree.parse(filename)
- except SyntaxError:
- lint = Popen(["xmllint", filename], stdout=PIPE, stderr=STDOUT)
+ xdata = lxml.etree.parse(filename)
+ if self.files is None:
+ self._expand_wildcard_xincludes(xdata)
+ xdata.xinclude()
+ return xdata
+ except (lxml.etree.XIncludeError, SyntaxError):
+ cmd = ["xmllint", "--noout"]
+ if self.files is None:
+ cmd.append("--xinclude")
+ cmd.append(filename)
+ lint = Popen(cmd, stdout=PIPE, stderr=STDOUT)
self.LintError("xml-failed-to-parse",
"%s fails to parse:\n%s" % (filename,
lint.communicate()[0]))
@@ -119,6 +129,33 @@ class Validate(Bcfg2.Server.Lint.ServerlessPlugin):
"Failed to open file %s" % filename)
return False
+ def _expand_wildcard_xincludes(self, xdata):
+ """ a lightweight version of
+ :func:`Bcfg2.Server.Plugin.helpers.XMLFileBacked._follow_xincludes` """
+ xinclude = '%sinclude' % Bcfg2.Server.XI_NAMESPACE
+ for el in xdata.findall('//' + xinclude):
+ name = el.get("href")
+ if name.startswith("/"):
+ fpath = name
+ else:
+ fpath = os.path.join(os.path.dirname(xdata.docinfo.URL), name)
+
+ # expand globs in xinclude, a bcfg2-specific extension
+ extras = glob.glob(fpath)
+ if not extras:
+ msg = "%s: %s does not exist, skipping: %s" % \
+ (xdata.docinfo.URL, name, self.RenderXML(el))
+ if el.findall('./%sfallback' % Bcfg2.Server.XI_NAMESPACE):
+ self.logger.debug(msg)
+ else:
+ self.LintError("xinclude-does-not-exist", msg)
+
+ parent = el.getparent()
+ parent.remove(el)
+ for extra in extras:
+ if extra != xdata.docinfo.URL:
+ lxml.etree.SubElement(parent, xinclude, href=extra)
+
def validate(self, filename, schemafile, schema=None):
""" Validate a file against the given schema.
@@ -140,6 +177,8 @@ class Validate(Bcfg2.Server.Lint.ServerlessPlugin):
if not schema:
return False
datafile = self.parse(filename)
+ if not datafile:
+ return False
if not schema.validate(datafile):
cmd = ["xmllint"]
if self.files is None:
diff --git a/src/lib/Bcfg2/Server/Lint/ValidateJSON.py b/src/lib/Bcfg2/Server/Lint/ValidateJSON.py
new file mode 100644
index 000000000..1f55962eb
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Lint/ValidateJSON.py
@@ -0,0 +1,72 @@
+"""Ensure that all JSON files in the Bcfg2 repository are
+valid. Currently, the only plugins that uses JSON are Ohai and
+Properties."""
+
+import os
+import sys
+import glob
+import fnmatch
+import Bcfg2.Server.Lint
+
+try:
+ import json
+ # py2.4 json library is structured differently
+ json.loads # pylint: disable=W0104
+except (ImportError, AttributeError):
+ import simplejson as json
+
+
+class ValidateJSON(Bcfg2.Server.Lint.ServerlessPlugin):
+ """Ensure that all JSON files in the Bcfg2 repository are
+ valid. Currently, the only plugins that uses JSON are Ohai and
+ Properties. """
+
+ def __init__(self, *args, **kwargs):
+ Bcfg2.Server.Lint.ServerlessPlugin.__init__(self, *args, **kwargs)
+
+ #: A list of file globs that give the path to JSON files. The
+ #: globs are extended :mod:`fnmatch` globs that also support
+ #: ``**``, which matches any number of any characters,
+ #: including forward slashes.
+ self.globs = ["Properties/*.json", "Ohai/*.json"]
+ self.files = self.get_files()
+
+ def Run(self):
+ for path in self.files:
+ self.logger.debug("Validating JSON in %s" % path)
+ try:
+ json.load(open(path))
+ except ValueError:
+ self.LintError("json-failed-to-parse",
+ "%s does not contain valid JSON: %s" %
+ (path, sys.exc_info()[1]))
+
+ @classmethod
+ def Errors(cls):
+ return {"json-failed-to-parse": "error"}
+
+ def get_files(self):
+ """Return a list of all JSON files to validate, based on
+ :attr:`Bcfg2.Server.Lint.ValidateJSON.ValidateJSON.globs`. """
+ if self.files is not None:
+ listfiles = lambda p: fnmatch.filter(self.files,
+ os.path.join('*', p))
+ else:
+ listfiles = lambda p: glob.glob(os.path.join(self.config['repo'],
+ p))
+
+ rv = []
+ for path in self.globs:
+ if '/**/' in path:
+ if self.files is not None:
+ rv.extend(listfiles(path))
+ else: # self.files is None
+ fpath, fname = path.split('/**/')
+ for root, _, files in \
+ os.walk(os.path.join(self.config['repo'],
+ fpath)):
+ rv.extend([os.path.join(root, f)
+ for f in files if f == fname])
+ else:
+ rv.extend(listfiles(path))
+ return rv
diff --git a/src/lib/Bcfg2/Server/MultiprocessingCore.py b/src/lib/Bcfg2/Server/MultiprocessingCore.py
index 81fba7092..6d41bbcbb 100644
--- a/src/lib/Bcfg2/Server/MultiprocessingCore.py
+++ b/src/lib/Bcfg2/Server/MultiprocessingCore.py
@@ -2,14 +2,114 @@
:mod:`Bcfg2.Server.BuiltinCore` that uses the Python
:mod:`multiprocessing` library to offload work to multiple child
processes. As such, it requires Python 2.6+.
+
+The parent communicates with the children over
+:class:`multiprocessing.Queue` objects via a
+:class:`Bcfg2.Server.MultiprocessingCore.RPCQueue` object.
+
+A method being called via the RPCQueue must be exposed by the child by
+decorating it with :func:`Bcfg2.Server.Core.exposed`.
"""
+import time
import threading
import lxml.etree
import multiprocessing
-from Bcfg2.Compat import Queue
+import Bcfg2.Server.Plugin
+from itertools import cycle
+from Bcfg2.Cache import Cache
+from Bcfg2.Compat import Empty, wraps
from Bcfg2.Server.Core import BaseCore, exposed
from Bcfg2.Server.BuiltinCore import Core as BuiltinCore
+from multiprocessing.connection import Listener, Client
+
+
+class DispatchingCache(Cache, Bcfg2.Server.Plugin.Debuggable):
+ """ Implementation of :class:`Bcfg2.Cache.Cache` that propagates
+ cache expiration events to child nodes. """
+
+ #: The method to send over the pipe to expire the cache
+ method = "expire_metadata_cache"
+
+ def __init__(self, *args, **kwargs):
+ self.rpc_q = kwargs.pop("queue")
+ Bcfg2.Server.Plugin.Debuggable.__init__(self)
+ Cache.__init__(self, *args, **kwargs)
+
+ def expire(self, key=None):
+ self.rpc_q.publish(self.method, args=[key])
+ Cache.expire(self, key=key)
+
+
+class RPCQueue(Bcfg2.Server.Plugin.Debuggable):
+ """ An implementation of a :class:`multiprocessing.Queue` designed
+ for several additional use patterns:
+
+ * Random-access reads, based on a key that identifies the data;
+ * Publish-subscribe, where a datum is sent to all hosts.
+
+ The subscribers can deal with this as a normal Queue with no
+ special handling.
+ """
+ poll_wait = 3.0
+
+ def __init__(self):
+ Bcfg2.Server.Plugin.Debuggable.__init__(self)
+ self._terminate = threading.Event()
+ self._queues = dict()
+ self._listeners = []
+
+ def add_subscriber(self, name):
+ """ Add a subscriber to the queue. This returns the
+ :class:`multiprocessing.Queue` object that the subscriber
+ should read from. """
+ self._queues[name] = multiprocessing.Queue()
+ return self._queues[name]
+
+ def publish(self, method, args=None, kwargs=None):
+ """ Publish an RPC call to the queue for consumption by all
+ subscribers. """
+ for queue in self._queues.values():
+ queue.put((None, (method, args or [], kwargs or dict())))
+
+ def rpc(self, dest, method, args=None, kwargs=None):
+ """ Make an RPC call to the named subscriber, expecting a
+ response. This opens a
+ :class:`multiprocessing.connection.Listener` and passes the
+ Listener address to the child as part of the RPC call, so that
+ the child can connect to the Listener to submit its results.
+ """
+ listener = Listener()
+ self.logger.debug("Created new RPC listener at %s" % listener.address)
+ self._listeners.append(listener)
+ try:
+ self._queues[dest].put((listener.address,
+ (method, args or [], kwargs or dict())))
+ conn = listener.accept()
+ try:
+ while not self._terminate.is_set():
+ if conn.poll(self.poll_wait):
+ return conn.recv()
+ finally:
+ conn.close()
+ finally:
+ listener.close()
+ self._listeners.remove(listener)
+
+ def close(self):
+ """ Close queues and connections. """
+ self._terminate.set()
+ self.logger.debug("Closing RPC queues")
+ for name, queue in self._queues.items():
+ self.logger.debug("Closing RPC queue to %s" % name)
+ queue.close()
+
+ # close any listeners that are waiting for connections
+ self.logger.debug("Closing RPC connections")
+ for listener in self._listeners:
+ self.logger.debug("Closing RPC connection at %s" %
+ listener.address)
+ listener.close()
class DualEvent(object):
@@ -60,68 +160,153 @@ class ChildCore(BaseCore):
those, though, if the pipe communication "protocol" were made more
robust. """
- #: How long to wait while polling for new clients to build. This
- #: doesn't affect the speed with which a client is built, but
+ #: How long to wait while polling for new RPC commands. This
+ #: doesn't affect the speed with which a command is processed, but
#: setting it too high will result in longer shutdown times, since
#: we only check for the termination event from the main process
#: every ``poll_wait`` seconds.
- poll_wait = 5.0
+ poll_wait = 3.0
- def __init__(self, setup, pipe, terminate):
+ def __init__(self, name, setup, rpc_q, terminate):
"""
+ :param name: The name of this child
+ :type name: string
:param setup: A Bcfg2 options dict
:type setup: Bcfg2.Options.OptionParser
- :param pipe: The pipe to which client hostnames are added for
- ChildCore objects to build configurations, and to
- which client configurations are added after
- having been built by ChildCore objects.
- :type pipe: multiprocessing.Pipe
+ :param read_q: The queue the child will read from for RPC
+ communications from the parent process.
+ :type read_q: multiprocessing.Queue
+ :param write_q: The queue the child will write the results of
+ RPC calls to.
+ :type write_q: multiprocessing.Queue
:param terminate: An event that flags ChildCore objects to shut
themselves down.
:type terminate: multiprocessing.Event
"""
BaseCore.__init__(self, setup)
- #: The pipe to which client hostnames are added for ChildCore
- #: objects to build configurations, and to which client
- #: configurations are added after having been built by
- #: ChildCore objects.
- self.pipe = pipe
+ #: The name of this child
+ self.name = name
#: The :class:`multiprocessing.Event` that will be monitored
#: to determine when this child should shut down.
self.terminate = terminate
- def _daemonize(self):
- return True
+ #: The queue used for RPC communication
+ self.rpc_q = rpc_q
+
+ # override this setting so that the child doesn't try to write
+ # the pidfile
+ self.setup['daemon'] = False
+
+ # ensure that the child doesn't start a perflog thread
+ self.perflog_thread = None
+
+ self._rmi = dict()
def _run(self):
return True
+ def _daemonize(self):
+ return True
+
+ def _dispatch(self, address, data):
+ """ Method dispatcher used for commands received from
+ the RPC queue. """
+ if address is not None:
+ # if the key is None, then no response is expected. we
+ # make the return connection before dispatching the actual
+ # RPC call so that the parent is blocking for a connection
+ # as briefly as possible
+ self.logger.debug("Connecting to parent via %s" % address)
+ client = Client(address)
+ method, args, kwargs = data
+ func = None
+ rv = None
+ if "." in method:
+ if method in self._rmi:
+ func = self._rmi[method]
+ else:
+ self.logger.error("%s: Method %s does not exist" % (self.name,
+ method))
+ elif not hasattr(self, method):
+ self.logger.error("%s: Method %s does not exist" % (self.name,
+ method))
+ else: # method is not a plugin RMI, and exists
+ func = getattr(self, method)
+ if not func.exposed:
+ self.logger.error("%s: Method %s is not exposed" % (self.name,
+ method))
+ func = None
+ if func is not None:
+ self.logger.debug("%s: Calling RPC method %s" % (self.name,
+ method))
+ rv = func(*args, **kwargs)
+ if address is not None:
+ # if the key is None, then no response is expected
+ self.logger.debug("Returning data to parent via %s" % address)
+ client.send(rv)
+
def _block(self):
- while not self.terminate.isSet():
+ self._rmi = self._get_rmi()
+ while not self.terminate.is_set():
try:
- if self.pipe.poll(self.poll_wait):
- if not self.metadata.use_database:
- # handle FAM events, in case (for instance) the
- # client has just been added to clients.xml, or a
- # profile has just been asserted. but really, you
- # should be using the metadata database if you're
- # using this core.
- self.fam.handle_events_in_interval(0.1)
- client = self.pipe.recv()
- self.logger.debug("Building configuration for %s" % client)
- config = \
- lxml.etree.tostring(self.BuildConfiguration(client))
- self.logger.debug("Returning configuration for %s to main "
- "process" % client)
- self.pipe.send(config)
- self.logger.debug("Returned configuration for %s to main "
- "process" % client)
+ address, data = self.rpc_q.get(timeout=self.poll_wait)
+ threadname = "-".join(str(i) for i in data)
+ rpc_thread = threading.Thread(name=threadname,
+ target=self._dispatch,
+ args=[address, data])
+ rpc_thread.start()
+ except Empty:
+ pass
except KeyboardInterrupt:
break
self.shutdown()
+ def shutdown(self):
+ BaseCore.shutdown(self)
+ self.logger.info("%s: Closing RPC command queue" % self.name)
+ self.rpc_q.close()
+
+ while len(threading.enumerate()) > 1:
+ threads = [t for t in threading.enumerate()
+ if t != threading.current_thread()]
+ self.logger.info("%s: Waiting for %d thread(s): %s" %
+ (self.name, len(threads),
+ [t.name for t in threads]))
+ time.sleep(1)
+ self.logger.info("%s: All threads stopped" % self.name)
+
+ def _get_rmi(self):
+ rmi = dict()
+ for pname, pinst in self._get_rmi_objects().items():
+ for crmi in pinst.__child_rmi__:
+ if isinstance(crmi, tuple):
+ mname = crmi[1]
+ else:
+ mname = crmi
+ rmi["%s.%s" % (pname, mname)] = getattr(pinst, mname)
+ return rmi
+
+ @exposed
+ def expire_metadata_cache(self, client=None):
+ """ Expire the metadata cache for a client """
+ self.metadata_cache.expire(client)
+
+ @exposed
+ def RecvProbeData(self, address, _):
+ """ Expire the probe cache for a client """
+ self.expire_caches_by_type(Bcfg2.Server.Plugin.Probing,
+ key=self.resolve_client(address,
+ metadata=False)[0])
+
+ @exposed
+ def GetConfig(self, client):
+ """ Render the configuration for a client """
+ self.logger.debug("%s: Building configuration for %s" %
+ (self.name, client))
+ return lxml.etree.tostring(self.BuildConfiguration(client))
+
class Core(BuiltinCore):
""" A multiprocessing core that delegates building the actual
@@ -140,65 +325,163 @@ class Core(BuiltinCore):
if setup['children'] is None:
setup['children'] = multiprocessing.cpu_count()
- #: A dict of child name -> one end of the
- #: :class:`multiprocessing.Pipe` object used to communicate
- #: with that child. (The child is given the other end of the
- #: Pipe.)
- self.pipes = dict()
-
- #: A queue that keeps track of which children are available to
- #: render a configuration. A child is popped from the queue
- #: when it starts to render a config, then it's pushed back on
- #: when it's done. This lets us use a blocking call to
- #: :func:`Queue.Queue.get` when waiting for an available
- #: child.
- self.available_children = Queue(maxsize=self.setup['children'])
-
- # sigh. multiprocessing was added in py2.6, which is when the
- # camelCase methods for threading objects were deprecated in
- # favor of the Pythonic under_score methods. So
- # multiprocessing.Event *only* has is_set(), while
- # threading.Event has *both* isSet() and is_set(). In order
- # to make the core work with Python 2.4+, and with both
- # multiprocessing and threading Event objects, we just
- # monkeypatch self.terminate to have isSet().
+ #: The flag that indicates when to stop child threads and
+ #: processes
self.terminate = DualEvent(threading_event=self.terminate)
+ #: A :class:`Bcfg2.Server.MultiprocessingCore.RPCQueue` object
+ #: used to send or publish commands to children.
+ self.rpc_q = RPCQueue()
+
+ self.metadata_cache = DispatchingCache(queue=self.rpc_q)
+
+ #: A list of children that will be cycled through
+ self._all_children = []
+
+ #: An iterator that each child will be taken from in sequence,
+ #: to provide a round-robin distribution of render requests
+ self.children = None
+
def _run(self):
for cnum in range(self.setup['children']):
name = "Child-%s" % cnum
- (mainpipe, childpipe) = multiprocessing.Pipe()
- self.pipes[name] = mainpipe
+
self.logger.debug("Starting child %s" % name)
- childcore = ChildCore(self.setup, childpipe, self.terminate)
+ child_q = self.rpc_q.add_subscriber(name)
+ childcore = ChildCore(name, self.setup, child_q, self.terminate)
child = multiprocessing.Process(target=childcore.run, name=name)
child.start()
self.logger.debug("Child %s started with PID %s" % (name,
child.pid))
- self.available_children.put(name)
+ self._all_children.append(name)
+ self.logger.debug("Started %s children: %s" % (len(self._all_children),
+ self._all_children))
+ self.children = cycle(self._all_children)
return BuiltinCore._run(self)
def shutdown(self):
BuiltinCore.shutdown(self)
- for child in multiprocessing.active_children():
- self.logger.debug("Shutting down child %s" % child.name)
- child.join(self.shutdown_timeout)
- if child.is_alive():
+ self.logger.info("Closing RPC command queues")
+ self.rpc_q.close()
+
+ def term_children():
+ """ Terminate all remaining multiprocessing children. """
+ for child in multiprocessing.active_children():
self.logger.error("Waited %s seconds to shut down %s, "
"terminating" % (self.shutdown_timeout,
child.name))
child.terminate()
- else:
- self.logger.debug("Child %s shut down" % child.name)
- self.logger.debug("All children shut down")
+
+ timer = threading.Timer(self.shutdown_timeout, term_children)
+ timer.start()
+ while len(multiprocessing.active_children()):
+ self.logger.info("Waiting for %s child(ren): %s" %
+ (len(multiprocessing.active_children()),
+ [c.name
+ for c in multiprocessing.active_children()]))
+ time.sleep(1)
+ timer.cancel()
+ self.logger.info("All children shut down")
+
+ while len(threading.enumerate()) > 1:
+ threads = [t for t in threading.enumerate()
+ if t != threading.current_thread()]
+ self.logger.info("Waiting for %s thread(s): %s" %
+ (len(threads), [t.name for t in threads]))
+ time.sleep(1)
+ self.logger.info("Shutdown complete")
+
+ def _get_rmi(self):
+ child_rmi = dict()
+ for pname, pinst in self._get_rmi_objects().items():
+ for crmi in pinst.__child_rmi__:
+ if isinstance(crmi, tuple):
+ parentname, childname = crmi
+ else:
+ parentname = childname = crmi
+ child_rmi["%s.%s" % (pname, parentname)] = \
+ "%s.%s" % (pname, childname)
+
+ rmi = BuiltinCore._get_rmi(self)
+ for method in rmi.keys():
+ if method in child_rmi:
+ rmi[method] = self._child_rmi_wrapper(method,
+ rmi[method],
+ child_rmi[method])
+ return rmi
+
+ def _child_rmi_wrapper(self, method, parent_rmi, child_rmi):
+ """ Returns a callable that dispatches a call to the given
+ child RMI to child processes, and calls the parent RMI locally
+ (i.e., in the parent process). """
+ @wraps(parent_rmi)
+ def inner(*args, **kwargs):
+ """ Function that dispatches an RMI call to child
+ processes and to the (original) parent function. """
+ self.logger.debug("Dispatching RMI call to %s to children: %s" %
+ (method, child_rmi))
+ self.rpc_q.publish(child_rmi, args=args, kwargs=kwargs)
+ return parent_rmi(*args, **kwargs)
+
+ return inner
+
+ @exposed
+ def set_debug(self, address, debug):
+ self.rpc_q.set_debug(debug)
+ self.rpc_q.publish("set_debug", args=[address, debug])
+ self.metadata_cache.set_debug(debug)
+ return BuiltinCore.set_debug(self, address, debug)
+
+ @exposed
+ def RecvProbeData(self, address, probedata):
+ rv = BuiltinCore.RecvProbeData(self, address, probedata)
+ # we don't want the children to actually process probe data,
+ # so we don't send the data, just the fact that we got some.
+ self.rpc_q.publish("RecvProbeData", args=[address, None])
+ return rv
@exposed
def GetConfig(self, address):
client = self.resolve_client(address)[0]
- childname = self.available_children.get()
- self.logger.debug("Building configuration on child %s" % childname)
- pipe = self.pipes[childname]
- pipe.send(client)
- config = pipe.recv()
- self.available_children.put_nowait(childname)
- return config
+ childname = self.children.next()
+ self.logger.debug("Building configuration for %s on %s" % (client,
+ childname))
+ return self.rpc_q.rpc(childname, "GetConfig", args=[client])
+
+ @exposed
+ def get_statistics(self, address):
+ stats = dict()
+
+ def _aggregate_statistics(newstats, prefix=None):
+ """ Aggregate a set of statistics from a child or parent
+ server core. This adds the statistics to the overall
+ statistics dict (optionally prepending a prefix, such as
+ "Child-1", to uniquely identify this set of statistics),
+ and aggregates it with the set of running totals that are
+ kept from all cores. """
+ for statname, vals in newstats.items():
+ if statname.startswith("ChildCore:"):
+ statname = statname[5:]
+ if prefix:
+ prettyname = "%s:%s" % (prefix, statname)
+ else:
+ prettyname = statname
+ stats[prettyname] = vals
+ totalname = "Total:%s" % statname
+ if totalname not in stats:
+ stats[totalname] = vals
+ else:
+ newmin = min(stats[totalname][0], vals[0])
+ newmax = max(stats[totalname][1], vals[1])
+ newcount = stats[totalname][3] + vals[3]
+ newmean = ((stats[totalname][2] * stats[totalname][3]) +
+ (vals[2] * vals[3])) / newcount
+ stats[totalname] = (newmin, newmax, newmean, newcount)
+
+ stats = dict()
+ for childname in self._all_children:
+ _aggregate_statistics(
+ self.rpc_q.rpc(childname, "get_statistics", args=[address]),
+ prefix=childname)
+ _aggregate_statistics(BuiltinCore.get_statistics(self, address))
+ return stats
diff --git a/src/lib/Bcfg2/Server/Plugin/base.py b/src/lib/Bcfg2/Server/Plugin/base.py
index ecd970b54..03feceb6f 100644
--- a/src/lib/Bcfg2/Server/Plugin/base.py
+++ b/src/lib/Bcfg2/Server/Plugin/base.py
@@ -12,6 +12,10 @@ class Debuggable(object):
#: List of names of methods to be exposed as XML-RPC functions
__rmi__ = ['toggle_debug', 'set_debug']
+ #: How exposed XML-RPC functions should be dispatched to child
+ #: processes.
+ __child_rmi__ = __rmi__[:]
+
def __init__(self, name=None):
"""
:param name: The name of the logger object to get. If none is
@@ -34,9 +38,6 @@ class Debuggable(object):
:returns: bool - The new value of the debug flag
"""
self.debug_flag = debug
- self.debug_log("%s: debug = %s" % (self.__class__.__name__,
- self.debug_flag),
- flag=True)
return debug
def toggle_debug(self):
@@ -87,9 +88,27 @@ class Plugin(Debuggable):
#: alphabetically by their name.
sort_order = 500
+ #: Whether or not to automatically create a data directory for
+ #: this plugin
+ create = True
+
#: List of names of methods to be exposed as XML-RPC functions
__rmi__ = Debuggable.__rmi__
+ #: How exposed XML-RPC functions should be dispatched to child
+ #: processes, if :mod:`Bcfg2.Server.MultiprocessingCore` is in
+ #: use. Items ``__child_rmi__`` can either be strings (in which
+ #: case the same function is called on child processes as on the
+ #: parent) or 2-tuples, in which case the first element is the
+ #: name of the RPC function called on the parent process, and the
+ #: second element is the name of the function to call on child
+ #: processes. Functions that are not listed in the list will not
+ #: be dispatched to child processes, i.e., they will only be
+ #: called on the parent. A function must be listed in ``__rmi__``
+ #: in order to be exposed; functions listed in ``_child_rmi__``
+ #: but not ``__rmi__`` will be ignored.
+ __child_rmi__ = Debuggable.__child_rmi__
+
def __init__(self, core, datastore):
"""
:param core: The Bcfg2.Server.Core initializing the plugin
@@ -107,7 +126,7 @@ class Plugin(Debuggable):
self.Entries = {}
self.core = core
self.data = os.path.join(datastore, self.name)
- if not os.path.exists(self.data):
+ if self.create and not os.path.exists(self.data):
self.logger.warning("%s: %s does not exist, creating" %
(self.name, self.data))
os.makedirs(self.data)
@@ -132,6 +151,8 @@ class Plugin(Debuggable):
self.running = False
def set_debug(self, debug):
+ self.debug_log("%s: debug = %s" % (self.name, self.debug_flag),
+ flag=True)
for entry in self.Entries.values():
if isinstance(entry, Debuggable):
entry.set_debug(debug)
diff --git a/src/lib/Bcfg2/Server/Plugin/helpers.py b/src/lib/Bcfg2/Server/Plugin/helpers.py
index 81dc1d736..3e7d68cd8 100644
--- a/src/lib/Bcfg2/Server/Plugin/helpers.py
+++ b/src/lib/Bcfg2/Server/Plugin/helpers.py
@@ -16,7 +16,7 @@ from Bcfg2.Compat import CmpMixin, wraps
from Bcfg2.Server.Plugin.base import Debuggable, Plugin
from Bcfg2.Server.Plugin.interfaces import Generator
from Bcfg2.Server.Plugin.exceptions import SpecificityError, \
- PluginExecutionError
+ PluginExecutionError, PluginInitError
try:
import django # pylint: disable=W0611
@@ -131,6 +131,19 @@ class DatabaseBacked(Plugin):
#: conform to the possible values that function can handle.
option = "use_database"
+ def __init__(self, core, datastore):
+ Plugin.__init__(self, core, datastore)
+ use_db = self.core.setup.cfp.getboolean(self.section,
+ self.option,
+ default=False)
+ if use_db and not HAS_DJANGO:
+ raise PluginInitError("%s.%s is True but Django not found" %
+ (self.section, self.option))
+ elif use_db and not self.core.database_available:
+ raise PluginInitError("%s.%s is True but the database is "
+ "unavailable due to prior errors" %
+ (self.section, self.option))
+
def _section(self):
""" The section to look in for :attr:`DatabaseBacked.option`
"""
@@ -146,10 +159,7 @@ class DatabaseBacked(Plugin):
default=False)
if use_db and HAS_DJANGO and self.core.database_available:
return True
- elif not use_db:
- return False
else:
- self.logger.error("%s is true but django not found" % self.option)
return False
@property
@@ -555,16 +565,12 @@ class XMLFileBacked(FileBacked):
xdata = self.xdata.getroottree()
else:
xdata = lxml.etree.parse(fname)
- included = [el for el in xdata.findall('//' + xinclude)]
- for el in included:
+ for el in xdata.findall('//' + xinclude):
name = el.get("href")
if name.startswith("/"):
fpath = name
else:
- if fname:
- rel = fname
- else:
- rel = self.name
+ rel = fname or self.name
fpath = os.path.join(os.path.dirname(rel), name)
# expand globs in xinclude, a bcfg2-specific extension
@@ -579,12 +585,13 @@ class XMLFileBacked(FileBacked):
parent = el.getparent()
parent.remove(el)
for extra in extras:
- if extra != self.name and extra not in self.extras:
- self.extras.append(extra)
+ if extra != self.name:
lxml.etree.SubElement(parent, xinclude, href=extra)
- self._follow_xincludes(fname=extra)
- if extra not in self.extra_monitors:
- self.add_monitor(extra)
+ if extra not in self.extras:
+ self.extras.append(extra)
+ self._follow_xincludes(fname=extra)
+ if extra not in self.extra_monitors:
+ self.add_monitor(extra)
def Index(self):
self.xdata = lxml.etree.XML(self.data, base_url=self.name,
@@ -606,15 +613,16 @@ class XMLFileBacked(FileBacked):
def add_monitor(self, fpath):
""" Add a FAM monitor to a file that has been XIncluded. This
- is only done if the constructor got both a ``fam`` object and
- ``should_monitor`` set to True.
+ is only done if the constructor got a ``fam`` object,
+ regardless of whether ``should_monitor`` is set to True (i.e.,
+ whether or not the base file is monitored).
:param fpath: The full path to the file to monitor
:type fpath: string
:returns: None
"""
self.extra_monitors.append(fpath)
- if self.fam and self.should_monitor:
+ if self.fam:
self.fam.AddMonitor(fpath, self)
def __iter__(self):
@@ -832,15 +840,10 @@ class XMLSrc(XMLFileBacked):
def HandleEvent(self, _=None):
"""Read file upon update."""
- try:
- data = open(self.name).read()
- except IOError:
- msg = "Failed to read file %s: %s" % (self.name, sys.exc_info()[1])
- self.logger.error(msg)
- raise PluginExecutionError(msg)
self.items = {}
try:
- xdata = lxml.etree.XML(data, parser=Bcfg2.Server.XMLParser)
+ xdata = lxml.etree.parse(self.name,
+ parser=Bcfg2.Server.XMLParser).getroot()
except lxml.etree.XMLSyntaxError:
msg = "Failed to parse file %s: %s" % (self.name,
sys.exc_info()[1])
@@ -857,8 +860,6 @@ class XMLSrc(XMLFileBacked):
self.logger.error(msg)
raise PluginExecutionError(msg)
- del xdata, data
-
def Cache(self, metadata):
"""Build a package dict for a given host."""
if self.cache is None or self.cache[0] != metadata:
diff --git a/src/lib/Bcfg2/Server/Plugin/interfaces.py b/src/lib/Bcfg2/Server/Plugin/interfaces.py
index 0fd711be9..33f6d338c 100644
--- a/src/lib/Bcfg2/Server/Plugin/interfaces.py
+++ b/src/lib/Bcfg2/Server/Plugin/interfaces.py
@@ -220,10 +220,32 @@ class Connector(object):
def get_additional_groups(self, metadata): # pylint: disable=W0613
""" Return a list of additional groups for the given client.
+ Each group can be either the name of a group (a string), or a
+ :class:`Bcfg2.Server.Plugins.Metadata.MetadataGroup` object
+ that defines other data besides just the name. Note that you
+ cannot return a
+ :class:`Bcfg2.Server.Plugins.Metadata.MetadataGroup` object
+ that clobbers a group defined by another plugin; the original
+ group will be used instead. For instance, assume the
+ following in ``Metadata/groups.xml``:
+
+ .. code-block:: xml
+
+ <Groups>
+ ...
+ <Group name="foo" public="false"/>
+ </Groups>
+
+ You could not subsequently return a
+ :class:`Bcfg2.Server.Plugins.Metadata.MetadataGroup` object
+ with ``public=True``; a warning would be issued, and the
+ original (non-public) ``foo`` group would be used.
:param metadata: The client metadata
:type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
- :return: list of strings
+ :return: list of strings or
+ :class:`Bcfg2.Server.Plugins.Metadata.MetadataGroup`
+ objects.
"""
return list()
@@ -286,6 +308,8 @@ class Statistics(Plugin):
you should avoid using Statistics and use
:class:`ThreadedStatistics` instead."""
+ create = False
+
def process_statistics(self, client, xdata):
""" Process the given XML statistics data for the specified
client.
@@ -526,6 +550,8 @@ class GoalValidator(object):
class Version(Plugin):
""" Version plugins interact with various version control systems. """
+ create = False
+
#: The path to the VCS metadata file or directory, relative to the
#: base of the Bcfg2 repository. E.g., for Subversion this would
#: be ".svn"
@@ -594,3 +620,22 @@ class ClientRunHooks(object):
:returns: None
"""
pass
+
+
+class Caching(object):
+ """ A plugin that caches more than just the data received from the
+ FAM. This presents a unified interface to clear the cache. """
+
+ def expire_cache(self, key=None):
+ """ Expire the cache associated with the given key.
+
+ :param key: The key to expire the cache for. Because cache
+ implementations vary tremendously between plugins,
+ this could be any number of things, but generally
+ a hostname. It also may or may not be possible to
+ expire the cache for a single host; this interface
+ does not require any guarantee about that.
+ :type key: varies
+ :returns: None
+ """
+ raise NotImplementedError
diff --git a/src/lib/Bcfg2/Server/Plugins/AWSTags.py b/src/lib/Bcfg2/Server/Plugins/AWSTags.py
new file mode 100644
index 000000000..147f37fbf
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/AWSTags.py
@@ -0,0 +1,217 @@
+""" Query tags from AWS via boto, optionally setting group membership """
+
+import os
+import re
+import sys
+import Bcfg2.Server.Lint
+import Bcfg2.Server.Plugin
+from boto import connect_ec2
+from Bcfg2.Cache import Cache
+from Bcfg2.Compat import ConfigParser
+
+
+class NoInstanceFound(Exception):
+ """ Raised when there's no AWS instance for a given hostname """
+
+
+class AWSTagPattern(object):
+ """ Handler for a single Tag entry """
+
+ def __init__(self, name, value, groups):
+ self.name = re.compile(name)
+ if value is not None:
+ self.value = re.compile(value)
+ else:
+ self.value = value
+ self.groups = groups
+
+ def get_groups(self, tags):
+ """ Get groups that apply to the given tag set """
+ for key, value in tags.items():
+ name_match = self.name.search(key)
+ if name_match:
+ if self.value is not None:
+ value_match = self.value.search(value)
+ if value_match:
+ return self._munge_groups(value_match)
+ else:
+ return self._munge_groups(name_match)
+ break
+ return []
+
+ def _munge_groups(self, match):
+ """ Replace backreferences (``$1``, ``$2``) in Group tags with
+ their values in the regex. """
+ rv = []
+ sub = match.groups()
+ for group in self.groups:
+ newg = group
+ for idx in range(len(sub)):
+ newg = newg.replace('$%s' % (idx + 1), sub[idx])
+ rv.append(newg)
+ return rv
+
+ def __str__(self):
+ if self.value:
+ return "%s: %s=%s: %s" % (self.__class__.__name__, self.name,
+ self.value, self.groups)
+ else:
+ return "%s: %s: %s" % (self.__class__.__name__, self.name,
+ self.groups)
+
+
+class PatternFile(Bcfg2.Server.Plugin.XMLFileBacked):
+ """ representation of AWSTags config.xml """
+ __identifier__ = None
+ create = 'AWSTags'
+
+ def __init__(self, filename, core=None):
+ try:
+ fam = core.fam
+ except AttributeError:
+ fam = None
+ Bcfg2.Server.Plugin.XMLFileBacked.__init__(self, filename, fam=fam,
+ should_monitor=True)
+ self.core = core
+ self.tags = []
+
+ def Index(self):
+ Bcfg2.Server.Plugin.XMLFileBacked.Index(self)
+ if (self.core and
+ self.core.metadata_cache_mode in ['cautious', 'aggressive']):
+ self.core.metadata_cache.expire()
+ self.tags = []
+ for entry in self.xdata.xpath('//Tag'):
+ try:
+ groups = [g.text for g in entry.findall('Group')]
+ self.tags.append(AWSTagPattern(entry.get("name"),
+ entry.get("value"),
+ groups))
+ except: # pylint: disable=W0702
+ self.logger.error("AWSTags: Failed to initialize pattern %s: "
+ "%s" % (entry.get("name"),
+ sys.exc_info()[1]))
+
+ def get_groups(self, hostname, tags):
+ """ return a list of groups that should be added to the given
+ client based on patterns that match the hostname """
+ ret = []
+ for pattern in self.tags:
+ try:
+ ret.extend(pattern.get_groups(tags))
+ except: # pylint: disable=W0702
+ self.logger.error("AWSTags: Failed to process pattern %s for "
+ "%s" % (pattern, hostname),
+ exc_info=1)
+ return ret
+
+
+class AWSTags(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Caching,
+ Bcfg2.Server.Plugin.ClientRunHooks,
+ Bcfg2.Server.Plugin.Connector):
+ """ Query tags from AWS via boto, optionally setting group membership """
+ __rmi__ = Bcfg2.Server.Plugin.Plugin.__rmi__ + ['expire_cache']
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Caching.__init__(self)
+ Bcfg2.Server.Plugin.ClientRunHooks.__init__(self)
+ Bcfg2.Server.Plugin.Connector.__init__(self)
+ try:
+ key_id = self.core.setup.cfp.get("awstags", "access_key_id")
+ secret_key = self.core.setup.cfp.get("awstags",
+ "secret_access_key")
+ except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
+ err = sys.exc_info()[1]
+ raise Bcfg2.Server.Plugin.PluginInitError(
+ "AWSTags is not configured in bcfg2.conf: %s" % err)
+ self.debug_log("%s: Connecting to EC2" % self.name)
+ self._ec2 = connect_ec2(aws_access_key_id=key_id,
+ aws_secret_access_key=secret_key)
+ self._tagcache = Cache()
+ try:
+ self._keep_cache = self.core.setup.cfp.getboolean("awstags",
+ "cache")
+ except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
+ self._keep_cache = True
+
+ self.config = PatternFile(os.path.join(self.data, 'config.xml'),
+ core=core)
+
+ def _load_instance(self, hostname):
+ """ Load an instance from EC2 whose private DNS name matches
+ the given hostname """
+ self.debug_log("AWSTags: Loading instance with private-dns-name=%s" %
+ hostname)
+ filters = {'private-dns-name': hostname}
+ reservations = self._ec2.get_all_instances(filters=filters)
+ if reservations:
+ res = reservations[0]
+ if res.instances:
+ return res.instances[0]
+ raise NoInstanceFound(
+ "AWSTags: No instance found with private-dns-name=%s" %
+ hostname)
+
+ def _get_tags_from_ec2(self, hostname):
+ """ Get tags for the given host from EC2. This does not use
+ the local caching layer. """
+ self.debug_log("AWSTags: Getting tags for %s from AWS" %
+ hostname)
+ try:
+ return self._load_instance(hostname).tags
+ except NoInstanceFound:
+ self.debug_log(sys.exc_info()[1])
+ return dict()
+
+ def get_tags(self, metadata):
+ """ Get tags for the given host. This caches the tags locally
+ if 'cache' in the ``[awstags]`` section of ``bcfg2.conf`` is
+ true. """
+ if not self._keep_cache:
+ return self._get_tags_from_ec2(metadata)
+
+ if metadata.hostname not in self._tagcache:
+ self._tagcache[metadata.hostname] = \
+ self._get_tags_from_ec2(metadata.hostname)
+ return self._tagcache[metadata.hostname]
+
+ def expire_cache(self, key=None):
+ self._tagcache.expire(key=key)
+
+ def start_client_run(self, metadata):
+ self.expire_cache(key=metadata.hostname)
+
+ def get_additional_data(self, metadata):
+ return self.get_tags(metadata)
+
+ def get_additional_groups(self, metadata):
+ return self.config.get_groups(metadata.hostname,
+ self.get_tags(metadata))
+
+
+class AWSTagsLint(Bcfg2.Server.Lint.ServerPlugin):
+ """ ``bcfg2-lint`` plugin to check all given :ref:`AWSTags
+ <server-plugins-connectors-awstags>` patterns for validity. """
+
+ def Run(self):
+ cfg = self.core.plugins['AWSTags'].config
+ for entry in cfg.xdata.xpath('//Tag'):
+ self.check(entry, "name")
+ if entry.get("value"):
+ self.check(entry, "value")
+
+ @classmethod
+ def Errors(cls):
+ return {"pattern-fails-to-initialize": "error"}
+
+ def check(self, entry, attr):
+ """ Check a single attribute (``name`` or ``value``) of a
+ single entry for validity. """
+ try:
+ re.compile(entry.get(attr))
+ except re.error:
+ self.LintError("pattern-fails-to-initialize",
+ "'%s' regex could not be compiled: %s\n %s" %
+ (attr, sys.exc_info()[1], entry.get("name")))
diff --git a/src/lib/Bcfg2/Server/Plugins/Bundler.py b/src/lib/Bcfg2/Server/Plugins/Bundler.py
index eef176cca..58f8f4430 100644
--- a/src/lib/Bcfg2/Server/Plugins/Bundler.py
+++ b/src/lib/Bcfg2/Server/Plugins/Bundler.py
@@ -38,9 +38,9 @@ if HAS_GENSHI:
Bcfg2.Server.Plugin.StructFile):
""" Representation of a Genshi-templated bundle XML file """
- def __init__(self, name, specific, encoding):
+ def __init__(self, name, specific, encoding, fam=None):
TemplateFile.__init__(self, name, specific, encoding)
- Bcfg2.Server.Plugin.StructFile.__init__(self, name)
+ Bcfg2.Server.Plugin.StructFile.__init__(self, name, fam=fam)
self.logger = logging.getLogger(name)
def get_xml_value(self, metadata):
@@ -53,9 +53,9 @@ if HAS_GENSHI:
stream = self.template.generate(
metadata=metadata,
repo=SETUP['repo']).filter(removecomment)
- data = lxml.etree.XML(stream.render('xml',
- strip_whitespace=False),
- parser=Bcfg2.Server.XMLParser)
+ data = lxml.etree.XML(
+ stream.render('xml', strip_whitespace=False).encode(),
+ parser=Bcfg2.Server.XMLParser)
bundlename = os.path.splitext(os.path.basename(self.name))[0]
bundle = lxml.etree.Element('Bundle', name=bundlename)
for item in self.Match(metadata, data):
@@ -106,13 +106,14 @@ class Bundler(Bcfg2.Server.Plugin.Plugin,
nsmap['py'] == 'http://genshi.edgewall.org/')):
if HAS_GENSHI:
spec = Bcfg2.Server.Plugin.Specificity()
- return BundleTemplateFile(name, spec, self.encoding)
+ return BundleTemplateFile(name, spec, self.encoding,
+ fam=self.core.fam)
else:
raise Bcfg2.Server.Plugin.PluginExecutionError("Genshi not "
"available: %s"
% name)
else:
- return BundleFile(name, self.fam)
+ return BundleFile(name, fam=self.fam)
def BuildStructures(self, metadata):
"""Build all structures for client (metadata)."""
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgAuthorizedKeysGenerator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgAuthorizedKeysGenerator.py
index 824d01023..41d5588e4 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgAuthorizedKeysGenerator.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgAuthorizedKeysGenerator.py
@@ -50,27 +50,36 @@ class CfgAuthorizedKeysGenerator(CfgGenerator, StructFile):
spec = self.XMLMatch(metadata)
rv = []
for allow in spec.findall("Allow"):
- params = ''
+ options = []
if allow.find("Params") is not None:
- params = ",".join("=".join(p)
- for p in allow.find("Params").attrib.items())
+ self.logger.warning("Use of <Params> in authorized_keys.xml "
+ "is deprecated; use <Option> instead")
+ options.extend("=".join(p)
+ for p in allow.find("Params").attrib.items())
+
+ for opt in allow.findall("Option"):
+ if opt.get("value"):
+ options.append("%s=%s" % (opt.get("name"),
+ opt.get("value")))
+ else:
+ options.append(opt.get("name"))
pubkey_name = allow.get("from")
if pubkey_name:
host = allow.get("host")
group = allow.get("group")
+ category = allow.get("category", self.category)
if host:
key_md = self.core.build_metadata(host)
elif group:
key_md = ClientMetadata("dummy", group, [group], [],
set(), set(), dict(), None,
None, None, None)
- elif (self.category and
- not metadata.group_in_category(self.category)):
+ elif category and not metadata.group_in_category(category):
self.logger.warning("Cfg: %s ignoring Allow from %s: "
"No group in category %s" %
(metadata.hostname, pubkey_name,
- self.category))
+ category))
continue
else:
key_md = metadata
@@ -96,6 +105,6 @@ class CfgAuthorizedKeysGenerator(CfgGenerator, StructFile):
(metadata.hostname,
lxml.etree.tostring(allow)))
continue
- rv.append(" ".join([params, pubkey]).strip())
+ rv.append(" ".join([",".join(options), pubkey]).strip())
return "\n".join(rv)
get_data.__doc__ = CfgGenerator.get_data.__doc__
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenerator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenerator.py
index 3b4703ddb..cf7eae75b 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenerator.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenerator.py
@@ -1,8 +1,9 @@
""" CfgEncryptedGenerator lets you encrypt your plaintext
:ref:`server-plugins-generators-cfg` files on the server. """
+import Bcfg2.Server.Plugins.Cfg
from Bcfg2.Server.Plugin import PluginExecutionError
-from Bcfg2.Server.Plugins.Cfg import CfgGenerator, SETUP
+from Bcfg2.Server.Plugins.Cfg import CfgGenerator
try:
from Bcfg2.Encryption import bruteforce_decrypt, EVPError, \
get_algorithm
@@ -34,8 +35,10 @@ class CfgEncryptedGenerator(CfgGenerator):
return
# todo: let the user specify a passphrase by name
try:
- self.data = bruteforce_decrypt(self.data, setup=SETUP,
- algorithm=get_algorithm(SETUP))
+ self.data = bruteforce_decrypt(
+ self.data,
+ setup=Bcfg2.Server.Plugins.Cfg.SETUP,
+ algorithm=get_algorithm(Bcfg2.Server.Plugins.Cfg.SETUP))
except EVPError:
raise PluginExecutionError("Failed to decrypt %s" % self.name)
handle_event.__doc__ = CfgGenerator.handle_event.__doc__
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgPrivateKeyCreator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgPrivateKeyCreator.py
index c7b62f352..e890fdecb 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgPrivateKeyCreator.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgPrivateKeyCreator.py
@@ -159,7 +159,7 @@ class CfgPrivateKeyCreator(CfgCreator, StructFile):
return specificity
# pylint: disable=W0221
- def create_data(self, entry, metadata, return_pair=False):
+ def create_data(self, entry, metadata):
""" Create data for the given entry on the given client
:param entry: The abstract entry to create data for. This
@@ -167,15 +167,7 @@ class CfgPrivateKeyCreator(CfgCreator, StructFile):
:type entry: lxml.etree._Element
:param metadata: The client metadata to create data for
:type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
- :param return_pair: Return a tuple of ``(public key, private
- key)`` instead of just the private key.
- This is used by
- :class:`Bcfg2.Server.Plugins.Cfg.CfgPublicKeyCreator.CfgPublicKeyCreator`
- to create public keys as requested.
- :type return_pair: bool
:returns: string - The private key data
- :returns: tuple - Tuple of ``(public key, private key)``, if
- ``return_pair`` is set to True
"""
spec = self.XMLMatch(metadata)
specificity = self.get_specificity(metadata, spec)
@@ -201,11 +193,7 @@ class CfgPrivateKeyCreator(CfgCreator, StructFile):
specificity['ext'] = '.crypt'
self.write_data(privkey, **specificity)
-
- if return_pair:
- return (pubkey, privkey)
- else:
- return privkey
+ return privkey
finally:
shutil.rmtree(os.path.dirname(filename))
# pylint: enable=W0221
@@ -230,7 +218,7 @@ class CfgPrivateKeyCreator(CfgCreator, StructFile):
if strict:
raise PluginExecutionError(msg)
else:
- self.logger.warning(msg)
+ self.logger.info(msg)
Index.__doc__ = StructFile.Index.__doc__
def _decrypt(self, element):
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgPublicKeyCreator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgPublicKeyCreator.py
index 6be438462..4bd8690ed 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgPublicKeyCreator.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgPublicKeyCreator.py
@@ -2,7 +2,11 @@
:class:`Bcfg2.Server.Plugins.Cfg.CfgPrivateKeyCreator.CfgPrivateKeyCreator`
to create SSH keys on the fly. """
+import os
+import sys
+import tempfile
import lxml.etree
+from Bcfg2.Utils import Executor
from Bcfg2.Server.Plugin import StructFile, PluginExecutionError
from Bcfg2.Server.Plugins.Cfg import CfgCreator, CfgCreationError, CFG
@@ -27,7 +31,8 @@ class CfgPublicKeyCreator(CfgCreator, StructFile):
CfgCreator.__init__(self, fname)
StructFile.__init__(self, fname)
self.cfg = CFG
- __init__.__doc__ = CfgCreator.__init__.__doc__
+ self.core = CFG.core
+ self.cmd = Executor()
def create_data(self, entry, metadata):
if entry.get("name").endswith(".pub"):
@@ -37,25 +42,51 @@ class CfgPublicKeyCreator(CfgCreator, StructFile):
"%s: Filename does not end in .pub" %
entry.get("name"))
- if privkey not in self.cfg.entries:
- raise CfgCreationError("Cfg: Could not find Cfg entry for %s "
- "(private key for %s)" % (privkey,
- self.name))
- eset = self.cfg.entries[privkey]
+ privkey_entry = lxml.etree.Element("Path", name=privkey)
try:
+ self.core.Bind(privkey_entry, metadata)
+ except PluginExecutionError:
+ raise CfgCreationError("Cfg: Could not bind %s (private key for "
+ "%s): %s" % (privkey, self.name,
+ sys.exc_info()[1]))
+
+ try:
+ eset = self.cfg.entries[privkey]
creator = eset.best_matching(metadata,
eset.get_handlers(metadata,
CfgCreator))
+ except KeyError:
+ raise CfgCreationError("Cfg: No private key defined for %s (%s)" %
+ (self.name, privkey))
except PluginExecutionError:
raise CfgCreationError("Cfg: No privkey.xml defined for %s "
"(private key for %s)" % (privkey,
self.name))
- privkey_entry = lxml.etree.Element("Path", name=privkey)
- pubkey = creator.create_data(privkey_entry, metadata,
- return_pair=True)[0]
- return pubkey
- create_data.__doc__ = CfgCreator.create_data.__doc__
+ specificity = creator.get_specificity(metadata)
+ fname = self.get_filename(**specificity)
+
+ # if the private key didn't exist, then creating it may have
+ # created the private key, too. check for it first.
+ if os.path.exists(fname):
+ return open(fname).read()
+ else:
+ # generate public key from private key
+ fd, privfile = tempfile.mkstemp()
+ try:
+ os.fdopen(fd, 'w').write(privkey_entry.text)
+ cmd = ["ssh-keygen", "-y", "-f", privfile]
+ self.debug_log("Cfg: Extracting SSH public key from %s: %s" %
+ (privkey, " ".join(cmd)))
+ result = self.cmd.run(cmd)
+ if not result.success:
+ raise CfgCreationError("Cfg: Failed to extract public key "
+ "from %s: %s" % (privkey,
+ result.error))
+ self.write_data(result.stdout, **specificity)
+ return result.stdout
+ finally:
+ os.unlink(privfile)
def handle_event(self, event):
CfgCreator.handle_event(self, event)
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py b/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py
index c6ac9d8dc..c6e2d0acb 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py
@@ -10,6 +10,7 @@ import lxml.etree
import Bcfg2.Options
import Bcfg2.Server.Plugin
import Bcfg2.Server.Lint
+from fnmatch import fnmatch
from Bcfg2.Server.Plugin import PluginExecutionError
# pylint: disable=W0622
from Bcfg2.Compat import u_str, unicode, b64encode, walk_packages, \
@@ -35,6 +36,24 @@ SETUP = None
#: facility for passing it otherwise.
CFG = None
+_HANDLERS = []
+
+
+def handlers():
+ """ A list of Cfg handler classes. Loading the handlers must
+ be done at run-time, not at compile-time, or it causes a
+ circular import and Bad Things Happen."""
+ if not _HANDLERS:
+ for submodule in walk_packages(path=__path__, prefix=__name__ + "."):
+ mname = submodule[1].rsplit('.', 1)[-1]
+ module = getattr(__import__(submodule[1]).Server.Plugins.Cfg,
+ mname)
+ hdlr = getattr(module, mname)
+ if issubclass(hdlr, CfgBaseFileMatcher):
+ _HANDLERS.append(hdlr)
+ _HANDLERS.sort(key=operator.attrgetter("__priority__"))
+ return _HANDLERS
+
class CfgBaseFileMatcher(Bcfg2.Server.Plugin.SpecificData,
Bcfg2.Server.Plugin.Debuggable):
@@ -82,6 +101,8 @@ class CfgBaseFileMatcher(Bcfg2.Server.Plugin.SpecificData,
experimental = False
def __init__(self, name, specific, encoding):
+ if not self.__specific__ and not specific:
+ specific = Bcfg2.Server.Plugin.Specificity(all=True)
Bcfg2.Server.Plugin.SpecificData.__init__(self, name, specific,
encoding)
Bcfg2.Server.Plugin.Debuggable.__init__(self)
@@ -459,7 +480,6 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet,
entry_type, encoding)
Bcfg2.Server.Plugin.Debuggable.__init__(self)
self.specific = None
- self._handlers = None
__init__.__doc__ = Bcfg2.Server.Plugin.EntrySet.__doc__
def set_debug(self, debug):
@@ -468,24 +488,6 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet,
entry.set_debug(debug)
return rv
- @property
- def handlers(self):
- """ A list of Cfg handler classes. Loading the handlers must
- be done at run-time, not at compile-time, or it causes a
- circular import and Bad Things Happen."""
- if self._handlers is None:
- self._handlers = []
- for submodule in walk_packages(path=__path__,
- prefix=__name__ + "."):
- mname = submodule[1].rsplit('.', 1)[-1]
- module = getattr(__import__(submodule[1]).Server.Plugins.Cfg,
- mname)
- hdlr = getattr(module, mname)
- if CfgBaseFileMatcher in hdlr.__mro__:
- self._handlers.append(hdlr)
- self._handlers.sort(key=operator.attrgetter("__priority__"))
- return self._handlers
-
def handle_event(self, event):
""" Dispatch a FAM event to :func:`entry_init` or the
appropriate child handler object.
@@ -502,7 +504,7 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet,
# process a bogus changed event like a created
return
- for hdlr in self.handlers:
+ for hdlr in handlers():
if hdlr.handles(event, basename=self.path):
if action == 'changed':
# warn about a bogus 'changed' event, but
@@ -582,10 +584,18 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet,
def bind_entry(self, entry, metadata):
self.bind_info_to_entry(entry, metadata)
- data = self._generate_data(entry, metadata)
-
- for fltr in self.get_handlers(metadata, CfgFilter):
- data = fltr.modify_data(entry, metadata, data)
+ data, generator = self._generate_data(entry, metadata)
+
+ if generator is not None:
+ # apply no filters if the data was created by a CfgCreator
+ for fltr in self.get_handlers(metadata, CfgFilter):
+ if fltr.specific <= generator.specific:
+ # only apply filters that are as specific or more
+ # specific than the generator used for this entry.
+ # Note that specificity comparison is backwards in
+ # this sense, since it's designed to sort from
+ # most specific to least specific.
+ data = fltr.modify_data(entry, metadata, data)
if SETUP['validate']:
try:
@@ -694,7 +704,9 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet,
:type entry: lxml.etree._Element
:param metadata: The client metadata to generate data for
:type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
- :returns: string - the data for the entry
+ :returns: tuple of (string, generator) - the data for the
+ entry and the generator used to generate it (or
+ None, if data was created)
"""
try:
generator = self.best_matching(metadata,
@@ -703,7 +715,7 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet,
except PluginExecutionError:
# if no creators or generators exist, _create_data()
# raises an appropriate exception
- return self._create_data(entry, metadata)
+ return (self._create_data(entry, metadata), None)
if entry.get('mode').lower() == 'inherit':
# use on-disk permissions
@@ -713,7 +725,7 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet,
entry.set('mode',
oct_mode(stat.S_IMODE(os.stat(fname).st_mode)))
try:
- return generator.get_data(entry, metadata)
+ return (generator.get_data(entry, metadata), generator)
except:
msg = "Cfg: Error rendering %s: %s" % (entry.get("name"),
sys.exc_info()[1])
@@ -888,12 +900,17 @@ class CfgLint(Bcfg2.Server.Lint.ServerPlugin):
for basename, entry in list(self.core.plugins['Cfg'].entries.items()):
self.check_delta(basename, entry)
self.check_pubkey(basename, entry)
+ self.check_missing_files()
+ self.check_conflicting_handlers()
@classmethod
def Errors(cls):
return {"cat-file-used": "warning",
"diff-file-used": "warning",
- "no-pubkey-xml": "warning"}
+ "no-pubkey-xml": "warning",
+ "unknown-cfg-files": "error",
+ "extra-cfg-files": "error",
+ "multiple-global-handlers": "error"}
def check_delta(self, basename, entry):
""" check that no .cat or .diff files are in use """
@@ -927,3 +944,74 @@ class CfgLint(Bcfg2.Server.Lint.ServerPlugin):
self.LintError("no-pubkey-xml",
"%s has no corresponding pubkey.xml at %s" %
(basename, pubkey))
+
+ def _list_path_components(self, path):
+ """ Get a list of all components of a path. E.g.,
+ ``self._list_path_components("/foo/bar/foobaz")`` would return
+ ``["foo", "bar", "foo", "baz"]``. The list is not guaranteed
+ to be in order."""
+ rv = []
+ remaining, component = os.path.split(path)
+ while component != '':
+ rv.append(component)
+ remaining, component = os.path.split(remaining)
+ return rv
+
+ def check_conflicting_handlers(self):
+ """ Check that a single entryset doesn't have multiple
+ non-specific (i.e., 'all') handlers. """
+ cfg = self.core.plugins['Cfg']
+ for eset in cfg.entries.values():
+ alls = [e for e in eset.entries.values()
+ if (e.specific.all and
+ issubclass(e.__class__, CfgGenerator))]
+ if len(alls) > 1:
+ self.LintError("multiple-global-handlers",
+ "%s has multiple global handlers: %s" %
+ (eset.path, ", ".join(os.path.basename(e.name)
+ for e in alls)))
+
+ def check_missing_files(self):
+ """ check that all files on the filesystem are known to Cfg """
+ cfg = self.core.plugins['Cfg']
+
+ # first, collect ignore patterns from handlers
+ ignore = set()
+ for hdlr in handlers():
+ ignore.update(hdlr.__ignore__)
+
+ # next, get a list of all non-ignored files on the filesystem
+ all_files = set()
+ for root, _, files in os.walk(cfg.data):
+ for fname in files:
+ fpath = os.path.join(root, fname)
+ # check against the handler ignore patterns and the
+ # global FAM ignore list
+ if (not any(fname.endswith("." + i) for i in ignore) and
+ not any(fnmatch(fpath, p)
+ for p in self.config['ignore']) and
+ not any(fnmatch(c, p)
+ for p in self.config['ignore']
+ for c in self._list_path_components(fpath))):
+ all_files.add(fpath)
+
+ # next, get a list of all files known to Cfg
+ cfg_files = set()
+ for root, eset in cfg.entries.items():
+ cfg_files.update(os.path.join(cfg.data, root.lstrip("/"), fname)
+ for fname in eset.entries.keys())
+
+ # finally, compare the two
+ unknown_files = all_files - cfg_files
+ extra_files = cfg_files - all_files
+ if unknown_files:
+ self.LintError(
+ "unknown-cfg-files",
+ "Files on the filesystem could not be understood by Cfg: %s" %
+ "; ".join(unknown_files))
+ if extra_files:
+ self.LintError(
+ "extra-cfg-files",
+ "Cfg has entries for files that do not exist on the "
+ "filesystem: %s\nThis is probably a bug." %
+ "; ".join(extra_files))
diff --git a/src/lib/Bcfg2/Server/Plugins/GroupLogic.py b/src/lib/Bcfg2/Server/Plugins/GroupLogic.py
index 810b273af..d74c16e8b 100644
--- a/src/lib/Bcfg2/Server/Plugins/GroupLogic.py
+++ b/src/lib/Bcfg2/Server/Plugins/GroupLogic.py
@@ -3,7 +3,9 @@ template to dynamically set additional groups for clients. """
import os
import lxml.etree
+from threading import local
import Bcfg2.Server.Plugin
+from Bcfg2.Server.Plugins.Metadata import MetadataGroup
try:
from Bcfg2.Server.Plugins.Bundler import BundleTemplateFile
except ImportError:
@@ -35,13 +37,40 @@ class GroupLogic(Bcfg2.Server.Plugin.Plugin,
""" GroupLogic is a connector plugin that lets you use an XML
Genshi template to dynamically set additional groups for
clients. """
+ # perform grouplogic later than other Connector plugins, so it can
+ # use groups set by them
+ sort_order = 1000
def __init__(self, core, datastore):
Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
Bcfg2.Server.Plugin.Connector.__init__(self)
self.config = GroupLogicConfig(os.path.join(self.data, "groups.xml"),
core.fam)
+ self._local = local()
def get_additional_groups(self, metadata):
- return [el.get("name")
- for el in self.config.get_xml_value(metadata).findall("Group")]
+ if not hasattr(self._local, "building"):
+ # building is a thread-local set that tracks which
+ # machines GroupLogic is getting additional groups for.
+ # If a get_additional_groups() is called twice for a
+ # machine before the first call has completed, the second
+ # call returns an empty list. This is for infinite
+ # recursion protection; without this check, it'd be
+ # impossible to use things like metadata.query.in_group()
+ # in GroupLogic, since that requires building all
+ # metadata, which requires running
+ # GroupLogic.get_additional_groups() for all hosts, which
+ # requires building all metadata...
+ self._local.building = set()
+ if metadata.hostname in self._local.building:
+ return []
+ self._local.building.add(metadata.hostname)
+ rv = []
+ for el in self.config.get_xml_value(metadata).findall("Group"):
+ if el.get("category"):
+ rv.append(MetadataGroup(el.get("name"),
+ category=el.get("category")))
+ else:
+ rv.append(el.get("name"))
+ self._local.building.discard(metadata.hostname)
+ return rv
diff --git a/src/lib/Bcfg2/Server/Plugins/Guppy.py b/src/lib/Bcfg2/Server/Plugins/Guppy.py
index 4f2601f15..3c9b8a459 100644
--- a/src/lib/Bcfg2/Server/Plugins/Guppy.py
+++ b/src/lib/Bcfg2/Server/Plugins/Guppy.py
@@ -37,6 +37,7 @@ class Guppy(Bcfg2.Server.Plugin.Plugin):
experimental = True
__rmi__ = Bcfg2.Server.Plugin.Plugin.__rmi__ + ['Enable', 'Disable']
+ __child_rmi__ = __rmi__[:]
def __init__(self, core, datastore):
Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
diff --git a/src/lib/Bcfg2/Server/Plugins/Metadata.py b/src/lib/Bcfg2/Server/Plugins/Metadata.py
index 3b8361c76..f734c98d0 100644
--- a/src/lib/Bcfg2/Server/Plugins/Metadata.py
+++ b/src/lib/Bcfg2/Server/Plugins/Metadata.py
@@ -16,7 +16,9 @@ import Bcfg2.Server.Lint
import Bcfg2.Server.Plugin
import Bcfg2.Server.FileMonitor
from Bcfg2.Utils import locked
-from Bcfg2.Compat import MutableMapping, all, wraps # pylint: disable=W0622
+# pylint: disable=W0622
+from Bcfg2.Compat import MutableMapping, all, any, wraps
+# pylint: enable=W0622
from Bcfg2.version import Bcfg2VersionInfo
try:
@@ -40,6 +42,8 @@ if HAS_DJANGO:
""" dict-like object to make it easier to access client bcfg2
versions from the database """
+ create = False
+
def __getitem__(self, key):
try:
return MetadataClientModel.objects.get(hostname=key).version
@@ -217,6 +221,7 @@ class XMLMetadataConfig(Bcfg2.Server.Plugin.XMLFileBacked):
sys.exc_info()[1])
self.logger.error(msg)
raise Bcfg2.Server.Plugin.MetadataRuntimeError(msg)
+ self.load_xml()
def find_xml_for_xpath(self, xpath):
"""Find and load xml file containing the xpath query"""
@@ -485,6 +490,7 @@ class MetadataGroup(tuple): # pylint: disable=E0012,R0924
class Metadata(Bcfg2.Server.Plugin.Metadata,
+ Bcfg2.Server.Plugin.Caching,
Bcfg2.Server.Plugin.ClientRunHooks,
Bcfg2.Server.Plugin.DatabaseBacked):
"""This class contains data for bcfg2 server metadata."""
@@ -493,6 +499,7 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
def __init__(self, core, datastore, watch_clients=True):
Bcfg2.Server.Plugin.Metadata.__init__(self)
+ Bcfg2.Server.Plugin.Caching.__init__(self)
Bcfg2.Server.Plugin.ClientRunHooks.__init__(self)
Bcfg2.Server.Plugin.DatabaseBacked.__init__(self, core, datastore)
self.watch_clients = watch_clients
@@ -526,21 +533,24 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
self.raliases = {}
# mapping of groupname -> MetadataGroup object
self.groups = {}
- # mappings of predicate -> MetadataGroup object
+ # mappings of groupname -> [predicates]
self.group_membership = dict()
self.negated_groups = dict()
+ # list of group names in document order
+ self.ordered_groups = []
# mapping of hostname -> version string
if self._use_db:
self.versions = ClientVersions(core, datastore)
else:
self.versions = dict()
+
self.uuid = {}
self.session_cache = {}
self.default = None
self.pdirty = False
self.password = core.setup['password']
self.query = MetadataQuery(core.build_metadata,
- lambda: list(self.clients),
+ self.list_clients,
self.get_client_names_by_groups,
self.get_client_names_by_profiles,
self.get_all_group_names,
@@ -648,6 +658,11 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
if attribs is None:
attribs = dict()
if self._use_db:
+ if attribs:
+ msg = "Metadata does not support setting client attributes " +\
+ "with use_database enabled"
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
try:
client = MetadataClientModel.objects.get(hostname=client_name)
except MetadataClientModel.DoesNotExist:
@@ -670,14 +685,15 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
""" Generic method to modify XML data (group, client, etc.) """
node = self._search_xdata(tag, name, config.xdata, alias=alias)
if node is None:
- self.logger.error("%s \"%s\" does not exist" % (tag, name))
- raise Bcfg2.Server.Plugin.MetadataConsistencyError
+ msg = "%s \"%s\" does not exist" % (tag, name)
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.MetadataConsistencyError(msg)
xdict = config.find_xml_for_xpath('.//%s[@name="%s"]' %
(tag, node.get('name')))
if not xdict:
- self.logger.error("Unexpected error finding %s \"%s\"" %
- (tag, name))
- raise Bcfg2.Server.Plugin.MetadataConsistencyError
+ msg = 'Unexpected error finding %s "%s"' % (tag, name)
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.MetadataConsistencyError(msg)
for key, val in list(attribs.items()):
xdict['xquery'][0].set(key, val)
config.write_xml(xdict['filename'], xdict['xmltree'])
@@ -747,7 +763,7 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
return self._remove_xdata(self.groups_xml, "Bundle", bundle_name)
def remove_client(self, client_name):
- """Remove a bundle."""
+ """Remove a client."""
if self._use_db:
try:
client = MetadataClientModel.objects.get(hostname=client_name)
@@ -828,51 +844,34 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
if self._use_db:
self.clients = self.list_clients()
+ def _get_condition(self, element):
+ """ Return a predicate that returns True if a client meets
+ the condition specified in the given Group or Client
+ element """
+ negate = element.get('negate', 'false').lower() == 'true'
+ pname = element.get("name")
+ if element.tag == 'Group':
+ return lambda c, g, _: negate != (pname in g)
+ elif element.tag == 'Client':
+ return lambda c, g, _: negate != (pname == c)
+
+ def _get_category_condition(self, grpname):
+ """ get a predicate that returns False if a client is already
+ a member of a group in the given group's category, True
+ otherwise"""
+ return lambda client, _, categories: \
+ bool(self._check_category(client, grpname, categories))
+
+ def _aggregate_conditions(self, conditions):
+ """ aggregate all conditions on a given group declaration
+ into a single predicate """
+ return lambda client, groups, cats: \
+ all(cond(client, groups, cats) for cond in conditions)
+
def _handle_groups_xml_event(self, _): # pylint: disable=R0912
""" re-read groups.xml on any event on it """
self.groups = {}
- # these three functions must be separate functions in order to
- # ensure that the scope is right for the closures they return
- def get_condition(element):
- """ Return a predicate that returns True if a client meets
- the condition specified in the given Group or Client
- element """
- negate = element.get('negate', 'false').lower() == 'true'
- pname = element.get("name")
- if element.tag == 'Group':
- return lambda c, g, _: negate != (pname in g)
- elif element.tag == 'Client':
- return lambda c, g, _: negate != (pname == c)
-
- def get_category_condition(category, gname):
- """ get a predicate that returns False if a client is
- already a member of a group in the given category, True
- otherwise """
- def in_cat(client, groups, categories): # pylint: disable=W0613
- """ return True if the client is already a member of a
- group in the category given in the enclosing function,
- False otherwise """
- if category in categories:
- if (gname not in self.groups or
- client not in self.groups[gname].warned):
- self.logger.warning("%s: Group %s suppressed by "
- "category %s; %s already a member "
- "of %s" %
- (self.name, gname, category,
- client, categories[category]))
- if gname in self.groups:
- self.groups[gname].warned.append(client)
- return False
- return True
- return in_cat
-
- def aggregate_conditions(conditions):
- """ aggregate all conditions on a given group declaration
- into a single predicate """
- return lambda client, groups, cats: \
- all(cond(client, groups, cats) for cond in conditions)
-
# first, we get a list of all of the groups declared in the
# file. we do this in two stages because the old way of
# parsing groups.xml didn't support nested groups; in the old
@@ -898,6 +897,7 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
self.group_membership = dict()
self.negated_groups = dict()
+ self.ordered_groups = []
# confusing loop condition; the XPath query asks for all
# elements under a Group tag under a Groups tag; that is
@@ -908,37 +908,47 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
# XPath. We do the same thing for Client tags.
for el in self.groups_xml.xdata.xpath("//Groups/Group//*") + \
self.groups_xml.xdata.xpath("//Groups/Client//*"):
- if ((el.tag != 'Group' and el.tag != 'Client') or
- el.getchildren()):
+ if (el.tag != 'Group' and el.tag != 'Client') or el.getchildren():
continue
conditions = []
for parent in el.iterancestors():
- cond = get_condition(parent)
+ cond = self._get_condition(parent)
if cond:
conditions.append(cond)
gname = el.get("name")
if el.get("negate", "false").lower() == "true":
- self.negated_groups[aggregate_conditions(conditions)] = \
- self.groups[gname]
+ self.negated_groups.setdefault(gname, [])
+ self.negated_groups[gname].append(
+ self._aggregate_conditions(conditions))
else:
if self.groups[gname].category:
- conditions.append(
- get_category_condition(self.groups[gname].category,
- gname))
+ conditions.append(self._get_category_condition(gname))
- self.group_membership[aggregate_conditions(conditions)] = \
- self.groups[gname]
+ if gname not in self.ordered_groups:
+ self.ordered_groups.append(gname)
+ self.group_membership.setdefault(gname, [])
+ self.group_membership[gname].append(
+ self._aggregate_conditions(conditions))
self.states['groups.xml'] = True
+ def expire_cache(self, key=None):
+ self.core.metadata_cache.expire(key)
+
def HandleEvent(self, event):
"""Handle update events for data files."""
for handles, event_handler in self.handlers.items():
if handles(event):
# clear the entire cache when we get an event for any
# metadata file
- self.core.metadata_cache.expire()
+ self.expire_cache()
+
+ # clear out the list of category suppressions that
+ # have been warned about, since this may change when
+ # clients.xml or groups.xml changes.
+ for group in self.groups.values():
+ group.warned = []
event_handler(event)
if False not in list(self.states.values()) and self.debug_flag:
@@ -976,17 +986,21 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
self.logger.error(msg)
raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
- profiles = [g for g in self.clientgroups[client]
- if g in self.groups and self.groups[g].is_profile]
- self.logger.info("Changing %s profile from %s to %s" %
- (client, profiles, profile))
- self.update_client(client, dict(profile=profile))
- if client in self.clientgroups:
- for prof in profiles:
- self.clientgroups[client].remove(prof)
- self.clientgroups[client].append(profile)
+ metadata = self.core.build_metadata(client)
+ if metadata.profile != profile:
+ self.logger.info("Changing %s profile from %s to %s" %
+ (client, metadata.profile, profile))
+ self.update_client(client, dict(profile=profile))
+ if client in self.clientgroups:
+ if metadata.profile in self.clientgroups[client]:
+ self.clientgroups[client].remove(metadata.profile)
+ self.clientgroups[client].append(profile)
+ else:
+ self.clientgroups[client] = [profile]
else:
- self.clientgroups[client] = [profile]
+ self.logger.debug(
+ "Ignoring %s request to change profile from %s to %s"
+ % (client, metadata.profile, profile))
else:
self.logger.info("Creating new client: %s, profile %s" %
(client, profile))
@@ -1002,8 +1016,8 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
self.add_client(client, dict(profile=profile))
self.clients.append(client)
self.clientgroups[client] = [profile]
- if not self._use_db:
- self.clients_xml.write()
+ if not self._use_db:
+ self.clients_xml.write()
def set_version(self, client, version):
"""Set version for provided client."""
@@ -1053,11 +1067,12 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
raise Bcfg2.Server.Plugin.MetadataConsistencyError(err)
return self.addresses[address][0]
try:
- cname = socket.gethostbyaddr(address)[0].lower()
+ cname = socket.getnameinfo(addresspair,
+ socket.NI_NAMEREQD)[0].lower()
if cname in self.aliases:
return self.aliases[cname]
return cname
- except socket.herror:
+ except (socket.gaierror, socket.herror):
err = "Address resolution error for %s: %s" % (address,
sys.exc_info()[1])
self.logger.error(err)
@@ -1072,22 +1087,77 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
categories = dict()
while numgroups != len(groups):
numgroups = len(groups)
- for predicate, group in self.group_membership.items():
- if group.name in groups:
+ newgroups = set()
+ removegroups = set()
+ for grpname in self.ordered_groups:
+ if grpname in groups:
continue
- if predicate(client, groups, categories):
- groups.add(group.name)
- if group.category:
- categories[group.category] = group.name
- for predicate, group in self.negated_groups.items():
- if group.name not in groups:
+ if any(p(client, groups, categories)
+ for p in self.group_membership[grpname]):
+ newgroups.add(grpname)
+ if (grpname in self.groups and
+ self.groups[grpname].category):
+ categories[self.groups[grpname].category] = grpname
+ groups.update(newgroups)
+ for grpname, predicates in self.negated_groups.items():
+ if grpname not in groups:
continue
- if predicate(client, groups, categories):
- groups.remove(group.name)
- if group.category:
- del categories[group.category]
+ if any(p(client, groups, categories) for p in predicates):
+ removegroups.add(grpname)
+ if (grpname in self.groups and
+ self.groups[grpname].category):
+ del categories[self.groups[grpname].category]
+ groups.difference_update(removegroups)
return (groups, categories)
+ def _check_category(self, client, grpname, categories):
+ """ Determine if the given client is already a member of a
+ group in the same category as the named group.
+
+ The return value is one of three possibilities:
+
+ * If the client is already a member of a group in the same
+ category, then False is returned (i.e., the category check
+ failed);
+ * If the group is not in any categories, then True is returned;
+ * If the group is not a member of a group in the category,
+ then the name of the category is returned. This makes it
+ easy to add the category to the ClientMetadata object (or
+ other category list).
+
+ If a pure boolean value is required, you can do
+ ``bool(self._check_category(...))``.
+ """
+ if grpname not in self.groups:
+ return True
+ category = self.groups[grpname].category
+ if not category:
+ return True
+ if category in categories:
+ if client not in self.groups[grpname].warned:
+ self.logger.warning("%s: Group %s suppressed by category %s; "
+ "%s already a member of %s" %
+ (self.name, grpname, category,
+ client, categories[category]))
+ self.groups[grpname].warned.append(client)
+ return False
+ return category
+
+ def _check_and_add_category(self, client, grpname, categories):
+ """ If the client is not a member of a group in the same
+ category as the named group, then the category is added to
+ ``categories``.
+ :func:`Bcfg2.Server.Plugins.Metadata._check_category` is used
+ to determine if the category can be added.
+
+ If the category check failed, returns False; otherwise,
+ returns True. """
+ rv = self._check_category(client, grpname, categories)
+ if rv and rv is not True:
+ categories[rv] = grpname
+ return True
+ return rv
+
def get_initial_metadata(self, client): # pylint: disable=R0914,R0912
"""Return the metadata for a given client."""
if False in list(self.states.values()):
@@ -1109,39 +1179,37 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
Handles setting categories and category suppression.
Returns the new profile for the client (which might be
unchanged). """
- groups.add(grpname)
if grpname in self.groups:
- group = self.groups[grpname]
- category = group.category
- if category:
- if category in categories:
- self.logger.warning("%s: Group %s suppressed by "
- "category %s; %s already a member "
- "of %s" %
- (self.name, grpname, category,
- client, categories[category]))
- return
- categories[category] = grpname
- if not profile and group.is_profile:
+ if not self._check_and_add_category(client, grpname,
+ categories):
+ return profile
+ groups.add(grpname)
+ if not profile and self.groups[grpname].is_profile:
return grpname
else:
return profile
+ else:
+ groups.add(grpname)
+ return profile
if client not in self.clients:
pgroup = None
if client in self.clientgroups:
pgroup = self.clientgroups[client][0]
+ self.debug_log("%s: Adding new client with profile %s" %
+ (self.name, pgroup))
elif self.default:
pgroup = self.default
+ self.debug_log("%s: Adding new client with default profile %s"
+ % (self.name, pgroup))
if pgroup:
self.set_profile(client, pgroup, (None, None),
require_public=False)
profile = _add_group(pgroup)
else:
- msg = "Cannot add new client %s; no default group set" % client
- self.logger.error(msg)
- raise Bcfg2.Server.Plugin.MetadataConsistencyError(msg)
+ raise Bcfg2.Server.Plugin.MetadataConsistencyError(
+ "Cannot add new client %s; no default group set" % client)
for cgroup in self.clientgroups.get(client, []):
if cgroup in groups:
@@ -1150,6 +1218,9 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
self.groups[cgroup] = MetadataGroup(cgroup)
profile = _add_group(cgroup)
+ # we do this before setting the default because there may be
+ # groups set in <Client> tags in groups.xml that we want to
+ # set
groups, categories = self._merge_groups(client, groups,
categories=categories)
@@ -1198,8 +1269,8 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
""" return a list of all group names """
all_groups = set()
all_groups.update(self.groups.keys())
- all_groups.update([g.name for g in self.group_membership.values()])
- all_groups.update([g.name for g in self.negated_groups.values()])
+ all_groups.update(self.group_membership.keys())
+ all_groups.update(self.negated_groups.keys())
for grp in self.clientgroups.values():
all_groups.update(grp)
return all_groups
@@ -1212,7 +1283,7 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
def get_client_names_by_profiles(self, profiles):
""" return a list of names of clients in the given profile groups """
rv = []
- for client in list(self.clients):
+ for client in self.list_clients():
mdata = self.core.build_metadata(client)
if mdata.profile in profiles:
rv.append(client)
@@ -1220,34 +1291,33 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
def get_client_names_by_groups(self, groups):
""" return a list of names of clients in the given groups """
- mdata = [self.core.build_metadata(client) for client in self.clients]
- return [md.hostname for md in mdata if md.groups.issuperset(groups)]
+ rv = []
+ for client in self.list_clients():
+ mdata = self.core.build_metadata(client)
+ if mdata.groups.issuperset(groups):
+ rv.append(client)
+ return rv
def get_client_names_by_bundles(self, bundles):
""" given a list of bundles, return a list of names of clients
that use those bundles """
- mdata = [self.core.build_metadata(client) for client in self.clients]
- return [md.hostname for md in mdata if md.bundles.issuperset(bundles)]
+ rv = []
+ for client in self.list_clients():
+ mdata = self.core.build_metadata(client)
+ if mdata.bundles.issuperset(bundles):
+ rv.append(client)
+ return rv
def merge_additional_groups(self, imd, groups):
for group in groups:
if group in imd.groups:
continue
- if group in self.groups and self.groups[group].category:
- category = self.groups[group].category
- if self.groups[group].category in imd.categories:
- self.logger.warning("%s: Group %s suppressed by category "
- "%s; %s already a member of %s" %
- (self.name, group, category,
- imd.hostname,
- imd.categories[category]))
- continue
- imd.categories[category] = group
+ if not self._check_and_add_category(imd.hostname, group,
+ imd.categories):
+ continue
imd.groups.add(group)
- self._merge_groups(imd.hostname, imd.groups,
- categories=imd.categories)
-
+ self._merge_groups(imd.hostname, imd.groups, categories=imd.categories)
for group in imd.groups:
if group in self.groups:
imd.bundles.update(self.groups[group].bundles)
@@ -1395,7 +1465,7 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
viz_str.extend(self._viz_groups(egroups, bundles, clientmeta))
if key:
for category in categories:
- viz_str.append('"%s" [label="%s", shape="record", '
+ viz_str.append('"%s" [label="%s", shape="trapezium", '
'style="filled", fillcolor="%s"];' %
(category, category, categories[category]))
return "\n".join("\t" + s for s in viz_str)
@@ -1409,8 +1479,8 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
instances = {}
rv = []
- for client in list(self.clients):
- if include_client(client):
+ for client in list(self.list_clients()):
+ if not include_client(client):
continue
if client in self.clientgroups:
grps = self.clientgroups[client]
@@ -1438,9 +1508,10 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
the graph"""
return not clientmeta or bundle in clientmeta.bundles
- bundles = list(set(bund.get('name'))
- for bund in self.groups_xml.xdata.findall('.//Bundle')
- if include_bundle(bund.get('name')))
+ bundles = \
+ list(set(bund.get('name')
+ for bund in self.groups_xml.xdata.findall('.//Bundle')
+ if include_bundle(bund.get('name'))))
bundles.sort()
return ['"bundle-%s" [ label="%s", shape="septagon"];' % (bundle,
bundle)
@@ -1586,15 +1657,36 @@ class MetadataLint(Bcfg2.Server.Lint.ServerPlugin):
"client")
def duplicate_groups(self):
- """ Check for groups that are defined more than once. We
- count a group tag as a definition if it a) has profile or
- public set; or b) has any children."""
- allgroups = [
- g
- for g in self.metadata.groups_xml.xdata.xpath("//Groups/Group") +
- self.metadata.groups_xml.xdata.xpath("//Groups/Group//Group")
- if g.get("profile") or g.get("public") or g.getchildren()]
- self.duplicate_entries(allgroups, "group")
+ """ Check for groups that are defined more than once. There are two
+ ways this can happen:
+
+ 1. The group is listed twice with contradictory options.
+ 2. The group is listed with no options *first*, and then with
+ options later.
+
+ In this context, 'first' refers to the order in which groups
+ are parsed; see the loop condition below and
+ _handle_groups_xml_event above for details. """
+ groups = dict()
+ duplicates = dict()
+ for grp in self.metadata.groups_xml.xdata.xpath("//Groups/Group") + \
+ self.metadata.groups_xml.xdata.xpath("//Groups/Group//Group"):
+ grpname = grp.get("name")
+ if grpname in duplicates:
+ duplicates[grpname].append(grp)
+ elif set(grp.attrib.keys()).difference(['negate', 'name']):
+ # group has options
+ if grpname in groups:
+ duplicates[grpname] = [grp, groups[grpname]]
+ else:
+ groups[grpname] = grp
+ else: # group has no options
+ groups[grpname] = grp
+ for grpname, grps in duplicates.items():
+ self.LintError("duplicate-group",
+ "Group %s is defined multiple times:\n%s" %
+ (grpname,
+ "\n".join(self.RenderXML(g) for g in grps)))
def duplicate_entries(self, allentries, etype):
""" Generic duplicate entry finder.
diff --git a/src/lib/Bcfg2/Server/Plugins/NagiosGen.py b/src/lib/Bcfg2/Server/Plugins/NagiosGen.py
index 466665382..8f1d03586 100644
--- a/src/lib/Bcfg2/Server/Plugins/NagiosGen.py
+++ b/src/lib/Bcfg2/Server/Plugins/NagiosGen.py
@@ -24,9 +24,9 @@ class NagiosGen(Bcfg2.Server.Plugin.Plugin,
'config.xml'),
core.fam, should_monitor=True,
create=self.name)
- self.Entries = {'Path':
- {'/etc/nagiosgen.status': self.createhostconfig,
- '/etc/nagios/nagiosgen.cfg': self.createserverconfig}}
+ self.Entries = {
+ 'Path': {'/etc/nagiosgen.status': self.createhostconfig,
+ '/etc/nagios/nagiosgen.cfg': self.createserverconfig}}
self.client_attrib = {'encoding': 'ascii',
'owner': 'root',
diff --git a/src/lib/Bcfg2/Server/Plugins/Ohai.py b/src/lib/Bcfg2/Server/Plugins/Ohai.py
index 1ec3cbd60..0853ea993 100644
--- a/src/lib/Bcfg2/Server/Plugins/Ohai.py
+++ b/src/lib/Bcfg2/Server/Plugins/Ohai.py
@@ -10,7 +10,9 @@ import Bcfg2.Server.Plugin
try:
import json
-except ImportError:
+ # py2.4 json library is structured differently
+ json.loads # pylint: disable=W0104
+except (ImportError, AttributeError):
import simplejson as json
PROBECODE = """#!/bin/sh
diff --git a/src/lib/Bcfg2/Server/Plugins/POSIXCompat.py b/src/lib/Bcfg2/Server/Plugins/POSIXCompat.py
index 1736becc7..71128d64c 100644
--- a/src/lib/Bcfg2/Server/Plugins/POSIXCompat.py
+++ b/src/lib/Bcfg2/Server/Plugins/POSIXCompat.py
@@ -9,6 +9,8 @@ class POSIXCompat(Bcfg2.Server.Plugin.Plugin,
Bcfg2.Server.Plugin.GoalValidator):
"""POSIXCompat is a goal validator plugin for POSIX entries."""
+ create = False
+
def __init__(self, core, datastore):
Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
Bcfg2.Server.Plugin.GoalValidator.__init__(self)
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Collection.py b/src/lib/Bcfg2/Server/Plugins/Packages/Collection.py
index b25cb0fc4..39c51f351 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/Collection.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Collection.py
@@ -614,6 +614,10 @@ class Collection(list, Bcfg2.Server.Plugin.Debuggable):
self.filter_unknown(unknown)
return packages, unknown
+ def __repr__(self):
+ return "%s(%s)" % (self.__class__.__name__,
+ list.__repr__(self))
+
def get_collection_class(source_type):
""" Given a source type, determine the class of Collection object
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py b/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py
index 332f0bbab..c47e18201 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py
@@ -88,13 +88,12 @@ class PackagesSources(Bcfg2.Server.Plugin.StructFile,
:type event: Bcfg2.Server.FileMonitor.Event
:returns: None
"""
- Bcfg2.Server.Plugin.StructFile.HandleEvent(self, event=event)
if event and event.filename != self.name:
for fpath in self.extras:
if fpath == os.path.abspath(event.filename):
self.parsed.add(fpath)
break
-
+ Bcfg2.Server.Plugin.StructFile.HandleEvent(self, event=event)
if self.loaded:
self.logger.info("Reloading Packages plugin")
self.pkg_obj.Reload()
@@ -111,10 +110,11 @@ class PackagesSources(Bcfg2.Server.Plugin.StructFile,
def Index(self):
Bcfg2.Server.Plugin.StructFile.Index(self)
self.entries = []
- for xsource in self.xdata.findall('.//Source'):
- source = self.source_from_xml(xsource)
- if source is not None:
- self.entries.append(source)
+ if self.loaded:
+ for xsource in self.xdata.findall('.//Source'):
+ source = self.source_from_xml(xsource)
+ if source is not None:
+ self.entries.append(source)
Index.__doc__ = Bcfg2.Server.Plugin.StructFile.Index.__doc__ + """
``Index`` is responsible for calling :func:`source_from_xml`
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py b/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py
index bb7caab0d..f038ec9c0 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py
@@ -53,13 +53,15 @@ The Yum Backend
import os
import re
import sys
+import time
import copy
import errno
import socket
import logging
import lxml.etree
-from subprocess import Popen, PIPE
import Bcfg2.Server.Plugin
+from lockfile import FileLock
+from Bcfg2.Utils import Executor
# pylint: disable=W0622
from Bcfg2.Compat import StringIO, cPickle, HTTPError, URLError, \
ConfigParser, any
@@ -85,7 +87,9 @@ try:
import yum
try:
import json
- except ImportError:
+ # py2.4 json library is structured differently
+ json.loads # pylint: disable=W0104
+ except (ImportError, AttributeError):
import simplejson as json
HAS_YUM = True
except ImportError:
@@ -102,9 +106,6 @@ FL = '{http://linux.duke.edu/metadata/filelists}'
PULPSERVER = None
PULPCONFIG = None
-#: The path to bcfg2-yum-helper
-HELPER = None
-
def _setup_pulp(setup):
""" Connect to a Pulp server and pass authentication credentials.
@@ -263,6 +264,8 @@ class YumCollection(Collection):
.. private-include: _add_gpg_instances, _get_pulp_consumer
"""
+ _helper = None
+
#: Options that are included in the [packages:yum] section of the
#: config but that should not be included in the temporary
#: yum.conf we write out
@@ -277,18 +280,25 @@ class YumCollection(Collection):
debug=debug)
self.keypath = os.path.join(self.cachepath, "keys")
+ #: A :class:`Bcfg2.Utils.Executor` object to use to run
+ #: external commands
+ self.cmd = Executor()
+
if self.use_yum:
#: Define a unique cache file for this collection to use
#: for cached yum metadata
self.cachefile = os.path.join(self.cachepath,
"cache-%s" % self.cachekey)
- if not os.path.exists(self.cachefile):
- os.mkdir(self.cachefile)
#: The path to the server-side config file used when
#: resolving packages with the Python yum libraries
self.cfgfile = os.path.join(self.cachefile, "yum.conf")
- self.write_config()
+
+ if not os.path.exists(self.cachefile):
+ self.debug_log("Creating common cache %s" % self.cachefile)
+ os.mkdir(self.cachefile)
+ if not self.disableMetaData:
+ self.setup_data()
else:
self.cachefile = None
@@ -309,7 +319,28 @@ class YumCollection(Collection):
self.logger.error("Could not create Pulp consumer "
"cert directory at %s: %s" %
(certdir, err))
- self.pulp_cert_set = PulpCertificateSet(certdir, self.fam)
+ self.__class__.pulp_cert_set = PulpCertificateSet(certdir,
+ self.fam)
+
+ @property
+ def disableMetaData(self):
+ """ Report whether or not metadata processing is enabled.
+ This duplicates code in Packages/__init__.py, and can probably
+ be removed in Bcfg2 1.4 when we have a module-level setup
+ object. """
+ if self.setup is None:
+ return True
+ try:
+ return not self.setup.cfp.getboolean("packages", "resolver")
+ except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
+ return False
+ except ValueError:
+ # for historical reasons we also accept "enabled" and
+ # "disabled"
+ return self.setup.cfp.get(
+ "packages",
+ "metadata",
+ default="enabled").lower() == "disabled"
@property
def __package_groups__(self):
@@ -323,20 +354,21 @@ class YumCollection(Collection):
a call to it; I wish there was a way to do this without
forking, but apparently not); finally we check in /usr/sbin,
the default location. """
- global HELPER
- if not HELPER:
+ if not self._helper:
+ # pylint: disable=W0212
try:
- HELPER = self.setup.cfp.get("packages:yum", "helper")
+ self.__class__._helper = self.setup.cfp.get("packages:yum",
+ "helper")
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
# first see if bcfg2-yum-helper is in PATH
try:
self.debug_log("Checking for bcfg2-yum-helper in $PATH")
- Popen(['bcfg2-yum-helper'],
- stdin=PIPE, stdout=PIPE, stderr=PIPE).wait()
- HELPER = 'bcfg2-yum-helper'
+ self.cmd.run(['bcfg2-yum-helper'])
+ self.__class__._helper = 'bcfg2-yum-helper'
except OSError:
- HELPER = "/usr/sbin/bcfg2-yum-helper"
- return HELPER
+ self.__class__._helper = "/usr/sbin/bcfg2-yum-helper"
+ # pylint: enable=W0212
+ return self._helper
@property
def use_yum(self):
@@ -374,6 +406,7 @@ class YumCollection(Collection):
# the rpmdb is so hopelessly intertwined with yum that we
# have to totally reinvent the dependency resolver.
mainopts = dict(cachedir='/',
+ persistdir='/',
installroot=self.cachefile,
keepcache="0",
debuglevel="0",
@@ -675,7 +708,10 @@ class YumCollection(Collection):
gdicts.append(dict(group=group, type=ptype))
if self.use_yum:
- return self.call_helper("get_groups", inputdata=gdicts)
+ try:
+ return self.call_helper("get_groups", inputdata=gdicts)
+ except ValueError:
+ return dict()
else:
pkgs = dict()
for gdict in gdicts:
@@ -837,13 +873,25 @@ class YumCollection(Collection):
if not self.use_yum:
return Collection.complete(self, packagelist)
+ lock = FileLock(os.path.join(self.cachefile, "lock"))
+ slept = 0
+ while lock.is_locked():
+ if slept > 30:
+ self.logger.warning("Packages: Timeout waiting for yum cache "
+ "to release its lock")
+ return set(), set()
+ self.logger.debug("Packages: Yum cache is locked, waiting...")
+ time.sleep(3)
+ slept += 3
+
if packagelist:
- result = \
- self.call_helper("complete",
- dict(packages=list(packagelist),
- groups=list(self.get_relevant_groups())))
- if not result:
- # some sort of error, reported by call_helper()
+ try:
+ result = self.call_helper(
+ "complete",
+ dict(packages=list(packagelist),
+ groups=list(self.get_relevant_groups())))
+ except ValueError:
+ # error reported by call_helper()
return set(), packagelist
# json doesn't understand sets or tuples, so we get back a
# lists of lists (packages) and a list of unicode strings
@@ -874,38 +922,41 @@ class YumCollection(Collection):
``bcfg2-yum-helper`` command.
"""
cmd = [self.helper, "-c", self.cfgfile]
- verbose = self.debug_flag or self.setup['verbose']
- if verbose:
+ if self.setup['verbose']:
+ cmd.append("-v")
+ if self.debug_flag:
+ if not self.setup['verbose']:
+ # ensure that running in debug gets -vv, even if
+ # verbose is not enabled
+ cmd.append("-v")
cmd.append("-v")
cmd.append(command)
- self.debug_log("Packages: running %s" % " ".join(cmd), flag=verbose)
- try:
- helper = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
- except OSError:
- err = sys.exc_info()[1]
- self.logger.error("Packages: Failed to execute %s: %s" %
- (" ".join(cmd), err))
- return None
+ self.debug_log("Packages: running %s" % " ".join(cmd))
if inputdata:
- idata = json.dumps(inputdata)
- (stdout, stderr) = helper.communicate(idata)
- else:
- (stdout, stderr) = helper.communicate()
- rv = helper.wait()
- if rv:
- self.logger.error("Packages: error running bcfg2-yum-helper "
- "(returned %d): %s" % (rv, stderr))
+ result = self.cmd.run(cmd, timeout=self.setup['client_timeout'],
+ inputdata=json.dumps(inputdata))
else:
+ result = self.cmd.run(cmd, timeout=self.setup['client_timeout'])
+ if not result.success:
+ self.logger.error("Packages: error running bcfg2-yum-helper: %s" %
+ result.error)
+ elif result.stderr:
self.debug_log("Packages: debug info from bcfg2-yum-helper: %s" %
- stderr, flag=verbose)
+ result.stderr)
+
try:
- return json.loads(stdout)
+ return json.loads(result.stdout)
except ValueError:
- err = sys.exc_info()[1]
- self.logger.error("Packages: error reading bcfg2-yum-helper "
- "output: %s" % err)
- return None
+ if result.stdout:
+ err = sys.exc_info()[1]
+ self.logger.error("Packages: Error reading bcfg2-yum-helper "
+ "output: %s" % err)
+ self.logger.error("Packages: bcfg2-yum-helper output: %s" %
+ result.stdout)
+ else:
+ self.logger.error("Packages: No bcfg2-yum-helper output")
+ raise
def setup_data(self, force_update=False):
""" Do any collection-level data setup tasks. This is called
@@ -917,8 +968,7 @@ class YumCollection(Collection):
If using the yum Python libraries, this cleans up cached yum
metadata, regenerates the server-side yum config (in order to
catch any new sources that have been added to this server),
- and then cleans up cached yum metadata again, in case the new
- config has any preexisting cache.
+ then regenerates the yum cache.
:param force_update: Ignore all local cache and setup data
from its original upstream sources (i.e.,
@@ -929,15 +979,22 @@ class YumCollection(Collection):
return Collection.setup_data(self, force_update)
if force_update:
- # we call this twice: one to clean up data from the old
- # config, and once to clean up data from the new config
- self.call_helper("clean")
+ # clean up data from the old config
+ try:
+ self.call_helper("clean")
+ except ValueError:
+ # error reported by call_helper
+ pass
- os.unlink(self.cfgfile)
+ if os.path.exists(self.cfgfile):
+ os.unlink(self.cfgfile)
self.write_config()
- if force_update:
- self.call_helper("clean")
+ try:
+ self.call_helper("makecache")
+ except ValueError:
+ # error reported by call_helper
+ pass
class YumSource(Source):
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py b/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py
index f82b8a392..479138ef1 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py
@@ -9,7 +9,8 @@ import shutil
import lxml.etree
import Bcfg2.Logger
import Bcfg2.Server.Plugin
-from Bcfg2.Compat import ConfigParser, urlopen, HTTPError, URLError
+from Bcfg2.Compat import ConfigParser, urlopen, HTTPError, URLError, \
+ MutableMapping
from Bcfg2.Server.Plugins.Packages.Collection import Collection, \
get_collection_class
from Bcfg2.Server.Plugins.Packages.PackagesSources import PackagesSources
@@ -22,7 +23,54 @@ APT_CONFIG_DEFAULT = \
"/etc/apt/sources.list.d/bcfg2-packages-generated-sources.list"
+class OnDemandDict(MutableMapping):
+ """ This maps a set of keys to a set of value-getting functions;
+ the values are populated on-the-fly by the functions as the values
+ are needed (and not before). This is used by
+ :func:`Bcfg2.Server.Plugins.Packages.Packages.get_additional_data`;
+ see the docstring for that function for details on why.
+
+ Unlike a dict, you should not specify values for for the righthand
+ side of this mapping, but functions that get values. E.g.:
+
+ .. code-block:: python
+
+ d = OnDemandDict(foo=load_foo,
+ bar=lambda: "bar");
+ """
+
+ def __init__(self, **getters):
+ self._values = dict()
+ self._getters = dict(**getters)
+
+ def __getitem__(self, key):
+ if key not in self._values:
+ self._values[key] = self._getters[key]()
+ return self._values[key]
+
+ def __setitem__(self, key, getter):
+ self._getters[key] = getter
+
+ def __delitem__(self, key):
+ del self._values[key]
+ del self._getters[key]
+
+ def __len__(self):
+ return len(self._getters)
+
+ def __iter__(self):
+ return iter(self._getters.keys())
+
+ def __repr__(self):
+ rv = dict(self._values)
+ for key in self._getters.keys():
+ if key not in rv:
+ rv[key] = 'unknown'
+ return str(rv)
+
+
class Packages(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Caching,
Bcfg2.Server.Plugin.StructureValidator,
Bcfg2.Server.Plugin.Generator,
Bcfg2.Server.Plugin.Connector,
@@ -45,8 +93,12 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
#: and :func:`Reload`
__rmi__ = Bcfg2.Server.Plugin.Plugin.__rmi__ + ['Refresh', 'Reload']
+ __child_rmi__ = Bcfg2.Server.Plugin.Plugin.__child_rmi__ + \
+ [('Refresh', 'expire_cache'), ('Reload', 'expire_cache')]
+
def __init__(self, core, datastore):
Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Caching.__init__(self)
Bcfg2.Server.Plugin.StructureValidator.__init__(self)
Bcfg2.Server.Plugin.Generator.__init__(self)
Bcfg2.Server.Plugin.Connector.__init__(self)
@@ -110,8 +162,21 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
#: object when one is requested, so each entry is very
#: short-lived -- it's purged at the end of each client run.
self.clients = dict()
- # pylint: enable=C0301
+ #: groupcache caches group lookups. It maps Collections (via
+ #: :attr:`Bcfg2.Server.Plugins.Packages.Collection.Collection.cachekey`)
+ #: to sets of package groups, and thence to the packages
+ #: indicated by those groups.
+ self.groupcache = dict()
+
+ #: pkgcache caches complete package sets. It maps Collections
+ #: (via
+ #: :attr:`Bcfg2.Server.Plugins.Packages.Collection.Collection.cachekey`)
+ #: to sets of initial packages, and thence to the final
+ #: (complete) package selections resolved from the initial
+ #: packages
+ self.pkgcache = dict()
+ # pylint: enable=C0301
__init__.__doc__ = Bcfg2.Server.Plugin.Plugin.__init__.__doc__
def set_debug(self, debug):
@@ -355,14 +420,24 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
for el in to_remove:
el.getparent().remove(el)
- gpkgs = collection.get_groups(groups)
- for pkgs in gpkgs.values():
+ groups.sort()
+ # check for this set of groups in the group cache
+ gkey = hash(tuple(groups))
+ if gkey not in self.groupcache[collection.cachekey]:
+ self.groupcache[collection.cachekey][gkey] = \
+ collection.get_groups(groups)
+ for pkgs in self.groupcache[collection.cachekey][gkey].values():
base.update(pkgs)
# essential pkgs are those marked as such by the distribution
base.update(collection.get_essential())
- packages, unknown = collection.complete(base)
+ # check for this set of packages in the package cache
+ pkey = hash(tuple(base))
+ if pkey not in self.pkgcache[collection.cachekey]:
+ self.pkgcache[collection.cachekey][pkey] = \
+ collection.complete(base)
+ packages, unknown = self.pkgcache[collection.cachekey][pkey]
if unknown:
self.logger.info("Packages: Got %d unknown entries" % len(unknown))
self.logger.info("Packages: %s" % list(unknown))
@@ -388,6 +463,9 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
self._load_config()
return True
+ def expire_cache(self, _=None):
+ self.Reload()
+
def _load_config(self, force_update=False):
"""
Load the configuration data and setup sources
@@ -415,9 +493,11 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
if not self.disableMetaData:
collection.setup_data(force_update)
- # clear Collection caches
+ # clear Collection and package caches
self.clients = dict()
self.collections = dict()
+ self.groupcache = dict()
+ self.pkgcache = dict()
for source in self.sources.entries:
cachefiles.add(source.cachefile)
@@ -493,8 +573,12 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
if not self.sources.loaded:
# if sources.xml has not received a FAM event yet, defer;
# instantiate a dummy Collection object
- return Collection(metadata, [], self.cachepath, self.data,
- self.core.fam)
+ collection = Collection(metadata, [], self.cachepath, self.data,
+ self.core.fam)
+ ckey = collection.cachekey
+ self.groupcache.setdefault(ckey, dict())
+ self.pkgcache.setdefault(ckey, dict())
+ return collection
if metadata.hostname in self.clients:
return self.collections[self.clients[metadata.hostname]]
@@ -510,7 +594,8 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
if len(sclasses) > 1:
self.logger.warning("Packages: Multiple source types found for "
"%s: %s" %
- ",".join([s.__name__ for s in sclasses]))
+ (metadata.hostname,
+ ",".join([s.__name__ for s in sclasses])))
cclass = Collection
elif len(sclasses) == 0:
self.logger.error("Packages: No sources found for %s" %
@@ -530,24 +615,47 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
if cclass != Collection:
self.clients[metadata.hostname] = ckey
self.collections[ckey] = collection
+ self.groupcache.setdefault(ckey, dict())
+ self.pkgcache.setdefault(ckey, dict())
return collection
def get_additional_data(self, metadata):
""" Return additional data for the given client. This will be
- a dict containing a single key, ``sources``, whose value is a
- list of data returned from
- :func:`Bcfg2.Server.Plugins.Packages.Collection.Collection.get_additional_data`,
- namely, a list of
- :attr:`Bcfg2.Server.Plugins.Packages.Source.Source.url_map`
- data.
+ an :class:`Bcfg2.Server.Plugins.Packages.OnDemandDict`
+ containing two keys:
+
+ * ``sources``, whose value is a list of data returned from
+ :func:`Bcfg2.Server.Plugins.Packages.Collection.Collection.get_additional_data`,
+ namely, a list of
+ :attr:`Bcfg2.Server.Plugins.Packages.Source.Source.url_map`
+ data; and
+ * ``get_config``, whose value is the
+ :func:`Bcfg2.Server.Plugins.Packages.Packages.get_config`
+ function, which can be used to get the Packages config for
+ other systems.
+
+ This uses an OnDemandDict instead of just a normal dict
+ because loading a source collection can be a fairly
+ time-consuming process, particularly for the first time. As a
+ result, when all metadata objects are built at once (such as
+ after the server is restarted, or far more frequently if
+ Metadata caching is disabled), this function would be a major
+ bottleneck if we tried to build all collections at the same
+ time. Instead, they're merely built on-demand.
:param metadata: The client metadata
:type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
:return: dict of lists of ``url_map`` data
"""
- collection = self.get_collection(metadata)
- return dict(sources=collection.get_additional_data(),
- get_config=self.get_config)
+ def get_sources():
+ """ getter for the 'sources' key of the OnDemandDict
+ returned by this function. This delays calling
+ get_collection() until it's absolutely necessary. """
+ return self.get_collection(metadata).get_additional_data()
+
+ return OnDemandDict(
+ sources=get_sources,
+ get_config=lambda: self.get_config)
def end_client_run(self, metadata):
""" Hook to clear the cache for this client in
diff --git a/src/lib/Bcfg2/Server/Plugins/Probes.py b/src/lib/Bcfg2/Server/Plugins/Probes.py
index f8baddb4b..48be1ac26 100644
--- a/src/lib/Bcfg2/Server/Plugins/Probes.py
+++ b/src/lib/Bcfg2/Server/Plugins/Probes.py
@@ -9,9 +9,11 @@ import operator
import lxml.etree
import Bcfg2.Server
import Bcfg2.Server.Plugin
+from Bcfg2.Compat import unicode # pylint: disable=W0622
try:
from django.db import models
+ from django.core.exceptions import MultipleObjectsReturned
HAS_DJANGO = True
class ProbesDataModel(models.Model,
@@ -32,8 +34,10 @@ except ImportError:
try:
import json
+ # py2.4 json library is structured differently
+ json.loads # pylint: disable=W0104
HAS_JSON = True
-except ImportError:
+except (ImportError, AttributeError):
try:
import simplejson as json
HAS_JSON = True
@@ -63,7 +67,10 @@ class ProbeData(str): # pylint: disable=E0012,R0924
.json, and .yaml properties to provide convenient ways to use
ProbeData objects as XML, JSON, or YAML data """
def __new__(cls, data):
- return str.__new__(cls, data)
+ if isinstance(data, unicode):
+ return str.__new__(cls, data.encode('utf-8'))
+ else:
+ return str.__new__(cls, data)
def __init__(self, data): # pylint: disable=W0613
str.__init__(self)
@@ -180,14 +187,16 @@ class ProbeSet(Bcfg2.Server.Plugin.EntrySet):
class Probes(Bcfg2.Server.Plugin.Probing,
+ Bcfg2.Server.Plugin.Caching,
Bcfg2.Server.Plugin.Connector,
Bcfg2.Server.Plugin.DatabaseBacked):
""" A plugin to gather information from a client machine """
__author__ = 'bcfg-dev@mcs.anl.gov'
def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Connector.__init__(self)
Bcfg2.Server.Plugin.Probing.__init__(self)
+ Bcfg2.Server.Plugin.Caching.__init__(self)
+ Bcfg2.Server.Plugin.Connector.__init__(self)
Bcfg2.Server.Plugin.DatabaseBacked.__init__(self, core, datastore)
try:
@@ -197,6 +206,7 @@ class Probes(Bcfg2.Server.Plugin.Probing,
err = sys.exc_info()[1]
raise Bcfg2.Server.Plugin.PluginInitError(err)
+ self.allowed_cgroups = core.setup['probe_allowed_groups']
self.probedata = dict()
self.cgroups = dict()
self.load_data()
@@ -222,9 +232,15 @@ class Probes(Bcfg2.Server.Plugin.Probing,
lxml.etree.SubElement(top, 'Client', name=client,
timestamp=str(int(probedata.timestamp)))
for probe in sorted(probedata):
- lxml.etree.SubElement(
- ctag, 'Probe', name=probe,
- value=self.probedata[client][probe])
+ try:
+ lxml.etree.SubElement(
+ ctag, 'Probe', name=probe,
+ value=str(
+ self.probedata[client][probe]).decode('utf-8'))
+ except AttributeError:
+ lxml.etree.SubElement(
+ ctag, 'Probe', name=probe,
+ value=str(self.probedata[client][probe]))
for group in sorted(self.cgroups[client]):
lxml.etree.SubElement(ctag, "Group", name=group)
try:
@@ -239,35 +255,50 @@ class Probes(Bcfg2.Server.Plugin.Probing,
def _write_data_db(self, client):
""" Write received probe data to the database """
for probe, data in self.probedata[client.hostname].items():
- pdata = \
- ProbesDataModel.objects.get_or_create(hostname=client.hostname,
- probe=probe)[0]
+ try:
+ pdata = ProbesDataModel.objects.get_or_create(
+ hostname=client.hostname,
+ probe=probe)[0]
+ except MultipleObjectsReturned:
+ ProbesDataModel.objects.filter(hostname=client.hostname,
+ probe=probe).delete()
+ ProbesDataModel.objects.get_or_create(
+ hostname=client.hostname,
+ probe=probe)
if pdata.data != data:
pdata.data = data
pdata.save()
ProbesDataModel.objects.filter(
hostname=client.hostname).exclude(
- probe__in=self.probedata[client.hostname]).delete()
+ probe__in=self.probedata[client.hostname]).delete()
for group in self.cgroups[client.hostname]:
try:
- ProbesGroupsModel.objects.get(hostname=client.hostname,
- group=group)
- except ProbesGroupsModel.DoesNotExist:
- grp = ProbesGroupsModel(hostname=client.hostname,
- group=group)
- grp.save()
+ ProbesGroupsModel.objects.get_or_create(
+ hostname=client.hostname,
+ group=group)
+ except MultipleObjectsReturned:
+ ProbesGroupsModel.objects.filter(hostname=client.hostname,
+ group=group).delete()
+ ProbesGroupsModel.objects.get_or_create(
+ hostname=client.hostname,
+ group=group)
ProbesGroupsModel.objects.filter(
hostname=client.hostname).exclude(
- group__in=self.cgroups[client.hostname]).delete()
+ group__in=self.cgroups[client.hostname]).delete()
- def load_data(self):
+ def expire_cache(self, key=None):
+ self.load_data(client=key)
+
+ def load_data(self, client=None):
""" Load probe data from the appropriate backend (probed.xml
or the database) """
if self._use_db:
- return self._load_data_db()
+ return self._load_data_db(client=client)
else:
+ # the XML backend doesn't support loading data for single
+ # clients, so it reloads all data
return self._load_data_xml()
def _load_data_xml(self):
@@ -292,20 +323,36 @@ class Probes(Bcfg2.Server.Plugin.Probing,
elif pdata.tag == 'Group':
self.cgroups[client.get('name')].append(pdata.get('name'))
- def _load_data_db(self):
+ if self.core.metadata_cache_mode in ['cautious', 'aggressive']:
+ self.core.expire_caches_by_type(Bcfg2.Server.Plugin.Metadata)
+
+ def _load_data_db(self, client=None):
""" Load probe data from the database """
- self.probedata = {}
- self.cgroups = {}
- for pdata in ProbesDataModel.objects.all():
+ if client is None:
+ self.probedata = {}
+ self.cgroups = {}
+ probedata = ProbesDataModel.objects.all()
+ groupdata = ProbesGroupsModel.objects.all()
+ else:
+ self.probedata.pop(client, None)
+ self.cgroups.pop(client, None)
+ probedata = ProbesDataModel.objects.filter(hostname=client)
+ groupdata = ProbesGroupsModel.objects.filter(hostname=client)
+
+ for pdata in probedata:
if pdata.hostname not in self.probedata:
self.probedata[pdata.hostname] = ClientProbeDataSet(
timestamp=time.mktime(pdata.timestamp.timetuple()))
self.probedata[pdata.hostname][pdata.probe] = ProbeData(pdata.data)
- for pgroup in ProbesGroupsModel.objects.all():
+ for pgroup in groupdata:
if pgroup.hostname not in self.cgroups:
self.cgroups[pgroup.hostname] = []
self.cgroups[pgroup.hostname].append(pgroup.group)
+ if self.core.metadata_cache_mode in ['cautious', 'aggressive']:
+ self.core.expire_caches_by_type(Bcfg2.Server.Plugin.Metadata,
+ key=client)
+
@Bcfg2.Server.Plugin.track_statistics()
def GetProbes(self, meta):
return self.probes.get_probe_data(meta)
@@ -347,11 +394,22 @@ class Probes(Bcfg2.Server.Plugin.Probing,
if line.split(':')[0] == 'group':
newgroup = line.split(':')[1].strip()
if newgroup not in cgroups:
- cgroups.append(newgroup)
+ if self._group_allowed(newgroup):
+ cgroups.append(newgroup)
+ else:
+ self.logger.info(
+ "Disallowed group assignment %s from %s" %
+ (newgroup, client.hostname))
dlines.remove(line)
dobj = ProbeData("\n".join(dlines))
cprobedata[data.get('name')] = dobj
+ def _group_allowed(self, group):
+ """ Determine if the named group can be set as a probe group
+ by checking the regexes listed in the [probes] groups_allowed
+ setting """
+ return any(r.match(group) for r in self.allowed_cgroups)
+
def get_additional_groups(self, meta):
return self.cgroups.get(meta.hostname, list())
get_additional_groups.__doc__ = \
diff --git a/src/lib/Bcfg2/Server/Plugins/Properties.py b/src/lib/Bcfg2/Server/Plugins/Properties.py
index e97f66675..8c6cf799a 100644
--- a/src/lib/Bcfg2/Server/Plugins/Properties.py
+++ b/src/lib/Bcfg2/Server/Plugins/Properties.py
@@ -17,8 +17,10 @@ except ImportError:
try:
import json
+ # py2.4 json library is structured differently
+ json.loads # pylint: disable=W0104
HAS_JSON = True
-except ImportError:
+except (ImportError, AttributeError):
try:
import simplejson as json
HAS_JSON = True
@@ -223,7 +225,7 @@ class XMLPropertyFile(Bcfg2.Server.Plugin.StructFile, PropertyFile):
if strict:
raise PluginExecutionError(msg)
else:
- LOGGER.warning(msg)
+ LOGGER.info(msg)
Index.__doc__ = Bcfg2.Server.Plugin.StructFile.Index.__doc__
def _decrypt(self, element):
diff --git a/src/lib/Bcfg2/Server/Plugins/PuppetENC.py b/src/lib/Bcfg2/Server/Plugins/PuppetENC.py
index 801e7006d..072f3f7e7 100644
--- a/src/lib/Bcfg2/Server/Plugins/PuppetENC.py
+++ b/src/lib/Bcfg2/Server/Plugins/PuppetENC.py
@@ -127,7 +127,7 @@ class PuppetENC(Bcfg2.Server.Plugin.Plugin,
self.logger.warning("PuppetENC is incompatible with aggressive "
"client metadata caching, try 'cautious' or "
"'initial' instead")
- self.core.cache.expire()
+ self.core.expire_caches_by_type(Bcfg2.Server.Plugin.Metadata)
def end_statistics(self, metadata):
self.end_client_run(self, metadata)
diff --git a/src/lib/Bcfg2/Server/Plugins/SSHbase.py b/src/lib/Bcfg2/Server/Plugins/SSHbase.py
index d8b3104b7..2deea5f07 100644
--- a/src/lib/Bcfg2/Server/Plugins/SSHbase.py
+++ b/src/lib/Bcfg2/Server/Plugins/SSHbase.py
@@ -92,6 +92,7 @@ class KnownHostsEntrySet(Bcfg2.Server.Plugin.EntrySet):
class SSHbase(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Caching,
Bcfg2.Server.Plugin.Generator,
Bcfg2.Server.Plugin.PullTarget):
"""
@@ -125,6 +126,7 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
def __init__(self, core, datastore):
Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Caching.__init__(self)
Bcfg2.Server.Plugin.Generator.__init__(self)
Bcfg2.Server.Plugin.PullTarget.__init__(self)
self.ipcache = {}
@@ -149,6 +151,9 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
HostKeyEntrySet(keypattern, self.data)
self.Entries['Path']["/etc/ssh/" + keypattern] = self.build_hk
+ def expire_cache(self, key=None):
+ self.__skn = False
+
def get_skn(self):
"""Build memory cache of the ssh known hosts file."""
if not self.__skn:
diff --git a/src/lib/Bcfg2/Server/Plugins/ServiceCompat.py b/src/lib/Bcfg2/Server/Plugins/ServiceCompat.py
index c3a2221f6..41e6bf8b5 100644
--- a/src/lib/Bcfg2/Server/Plugins/ServiceCompat.py
+++ b/src/lib/Bcfg2/Server/Plugins/ServiceCompat.py
@@ -6,7 +6,9 @@ import Bcfg2.Server.Plugin
class ServiceCompat(Bcfg2.Server.Plugin.Plugin,
Bcfg2.Server.Plugin.GoalValidator):
""" Use old-style service modes for older clients """
- name = 'ServiceCompat'
+
+ create = False
+
__author__ = 'bcfg-dev@mcs.anl.gov'
mode_map = {('true', 'true'): 'default',
('interactive', 'true'): 'interactive_only',
diff --git a/src/lib/Bcfg2/Server/Plugins/Svn.py b/src/lib/Bcfg2/Server/Plugins/Svn.py
index 51f44c52d..240fd7f89 100644
--- a/src/lib/Bcfg2/Server/Plugins/Svn.py
+++ b/src/lib/Bcfg2/Server/Plugins/Svn.py
@@ -59,9 +59,48 @@ class Svn(Bcfg2.Server.Plugin.Version):
self.client.callback_conflict_resolver = \
self.get_conflict_resolver(choice)
+ try:
+ if self.core.setup.cfp.get(
+ "svn",
+ "always_trust").lower() == "true":
+ self.client.callback_ssl_server_trust_prompt = \
+ self.ssl_server_trust_prompt
+ except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
+ self.logger.debug("Svn: Using subversion cache for SSL "
+ "certificate trust")
+
+ try:
+ if (self.core.setup.cfp.get("svn", "user") and
+ self.core.setup.cfp.get("svn", "password")):
+ self.client.callback_get_login = \
+ self.get_login
+ except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
+ self.logger.info("Svn: Using subversion cache for "
+ "password-based authetication")
+
self.logger.debug("Svn: Initialized svn plugin with SVN directory %s" %
self.vcs_path)
+ # pylint: disable=W0613
+ def get_login(self, realm, username, may_save):
+ """ PySvn callback to get credentials for HTTP basic authentication """
+ self.logger.debug("Svn: Logging in with username: %s" %
+ self.core.setup.cfp.get("svn", "user"))
+ return True, \
+ self.core.setup.cfp.get("svn", "user"), \
+ self.core.setup.cfp.get("svn", "password"), \
+ False
+ # pylint: enable=W0613
+
+ def ssl_server_trust_prompt(self, trust_dict):
+ """ PySvn callback to always trust SSL certificates from SVN server """
+ self.logger.debug("Svn: Trusting SSL certificate from %s, "
+ "issued by %s for realm %s" %
+ (trust_dict['hostname'],
+ trust_dict['issuer_dname'],
+ trust_dict['realm']))
+ return True, trust_dict['failures'], False
+
def get_conflict_resolver(self, choice):
""" Get a PySvn conflict resolution callback """
def callback(conflict_description):
diff --git a/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py b/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py
index fcd73bae2..db7370f01 100644
--- a/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py
+++ b/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py
@@ -115,7 +115,7 @@ class TemplateHelperLint(Bcfg2.Server.Lint.ServerPlugin):
def Run(self):
for helper in self.core.plugins['TemplateHelper'].entries.values():
- if self.HandlesFile(helper):
+ if self.HandlesFile(helper.name):
self.check_helper(helper.name)
def check_helper(self, helper):
diff --git a/src/lib/Bcfg2/Server/Reports/reports/models.py b/src/lib/Bcfg2/Server/Reports/reports/models.py
index 73adaaaaf..c43c3cee7 100644
--- a/src/lib/Bcfg2/Server/Reports/reports/models.py
+++ b/src/lib/Bcfg2/Server/Reports/reports/models.py
@@ -9,11 +9,13 @@ except ImproperlyConfigured:
print("Reports: unable to import django models: %s" % e)
sys.exit(1)
-from django.db import connection, transaction
+from django.db import connection
from django.db.models import Q
from datetime import datetime, timedelta
from time import strptime
+from Bcfg2.Reporting.Compat import transaction
+
KIND_CHOICES = (
#These are the kinds of config elements
('Package', 'Package'),
@@ -288,7 +290,7 @@ class Reason(models.Model):
return rv
@staticmethod
- @transaction.commit_on_success
+ @transaction.atomic
def prune_orphans():
'''Prune oprhaned rows... no good way to use the ORM'''
cursor = connection.cursor()
@@ -305,7 +307,7 @@ class Entries(models.Model):
return self.name
@staticmethod
- @transaction.commit_on_success
+ @transaction.atomic
def prune_orphans():
'''Prune oprhaned rows... no good way to use the ORM'''
cursor = connection.cursor()
@@ -334,7 +336,7 @@ class Performance(models.Model):
return self.metric
@staticmethod
- @transaction.commit_on_success
+ @transaction.atomic
def prune_orphans():
'''Prune oprhaned rows... no good way to use the ORM'''
cursor = connection.cursor()
diff --git a/src/lib/Bcfg2/Server/models.py b/src/lib/Bcfg2/Server/models.py
index 1f64111e7..7e2f5b09d 100644
--- a/src/lib/Bcfg2/Server/models.py
+++ b/src/lib/Bcfg2/Server/models.py
@@ -57,7 +57,7 @@ def load_models(plugins=None, cfile='/etc/bcfg2.conf', quiet=True):
# the second attempt.
LOGGER.error("Failed to load plugin %s: %s" % (plugin,
err))
- continue
+ continue
for sym in dir(mod):
obj = getattr(mod, sym)
if hasattr(obj, "__bases__") and models.Model in obj.__bases__:
diff --git a/src/lib/Bcfg2/Utils.py b/src/lib/Bcfg2/Utils.py
index 1c2dceed2..ab1276178 100644
--- a/src/lib/Bcfg2/Utils.py
+++ b/src/lib/Bcfg2/Utils.py
@@ -81,9 +81,6 @@ class PackedDigitRange(object): # pylint: disable=E0012,R0924
def __str__(self):
return "[%s]" % self.str
- def __len__(self):
- return sum(r[1] - r[0] + 1 for r in self.ranges) + len(self.ints)
-
def locked(fd):
""" Acquire a lock on a file.
@@ -219,7 +216,9 @@ class Executor(object):
"""
if isinstance(command, str):
cmdstr = command
- command = shlex.split(cmdstr)
+
+ if not shell:
+ command = shlex.split(cmdstr)
else:
cmdstr = " ".join(command)
self.logger.debug("Running: %s" % cmdstr)
@@ -245,9 +244,9 @@ class Executor(object):
# py3k fixes
if not isinstance(stdout, str):
- stdout = stdout.decode('utf-8')
+ stdout = stdout.decode('utf-8') # pylint: disable=E1103
if not isinstance(stderr, str):
- stderr = stderr.decode('utf-8')
+ stderr = stderr.decode('utf-8') # pylint: disable=E1103
for line in stdout.splitlines(): # pylint: disable=E1103
self.logger.debug('< %s' % line)
diff --git a/src/lib/Bcfg2/settings.py b/src/lib/Bcfg2/settings.py
index 9393830a8..82a3bdb2a 100644
--- a/src/lib/Bcfg2/settings.py
+++ b/src/lib/Bcfg2/settings.py
@@ -26,12 +26,16 @@ DATABASE_USER = None
DATABASE_PASSWORD = None
DATABASE_HOST = None
DATABASE_PORT = None
+DATABASE_OPTIONS = None
+DATABASE_SCHEMA = None
TIME_ZONE = None
DEBUG = False
TEMPLATE_DEBUG = DEBUG
+ALLOWED_HOSTS = ['*']
+
MEDIA_URL = '/site_media/'
@@ -56,8 +60,8 @@ def read_config(cfile=DEFAULT_CONFIG, repo=None, quiet=False):
""" read the config file and set django settings based on it """
# pylint: disable=W0602,W0603
global DATABASE_ENGINE, DATABASE_NAME, DATABASE_USER, DATABASE_PASSWORD, \
- DATABASE_HOST, DATABASE_PORT, DEBUG, TEMPLATE_DEBUG, TIME_ZONE, \
- MEDIA_URL
+ DATABASE_HOST, DATABASE_PORT, DATABASE_OPTIONS, DATABASE_SCHEMA, \
+ DEBUG, TEMPLATE_DEBUG, TIME_ZONE, MEDIA_URL
# pylint: enable=W0602,W0603
if not os.path.exists(cfile) and os.path.exists(DEFAULT_CONFIG):
@@ -84,7 +88,9 @@ def read_config(cfile=DEFAULT_CONFIG, repo=None, quiet=False):
USER=setup['db_user'],
PASSWORD=setup['db_password'],
HOST=setup['db_host'],
- PORT=setup['db_port'])
+ PORT=setup['db_port'],
+ OPTIONS=setup['db_options'],
+ SCHEMA=setup['db_schema'])
if HAS_DJANGO and django.VERSION[0] == 1 and django.VERSION[1] < 2:
DATABASE_ENGINE = setup['db_engine']
@@ -93,6 +99,8 @@ def read_config(cfile=DEFAULT_CONFIG, repo=None, quiet=False):
DATABASE_PASSWORD = DATABASES['default']['PASSWORD']
DATABASE_HOST = DATABASES['default']['HOST']
DATABASE_PORT = DATABASES['default']['PORT']
+ DATABASE_OPTIONS = DATABASES['default']['OPTIONS']
+ DATABASE_SCHEMA = DATABASES['default']['SCHEMA']
# dropping the version check. This was added in 1.1.2
TIME_ZONE = setup['time_zone']
diff --git a/src/lib/Bcfg2/version.py b/src/lib/Bcfg2/version.py
index 12fc584fe..35d4cfa0a 100644
--- a/src/lib/Bcfg2/version.py
+++ b/src/lib/Bcfg2/version.py
@@ -2,7 +2,7 @@
import re
-__version__ = "1.3.1"
+__version__ = "1.3.3"
class Bcfg2VersionInfo(tuple): # pylint: disable=E0012,R0924
diff --git a/src/sbin/bcfg2-admin b/src/sbin/bcfg2-admin
index 31e49c00b..14d188342 100755
--- a/src/sbin/bcfg2-admin
+++ b/src/sbin/bcfg2-admin
@@ -83,7 +83,7 @@ def main():
raise SystemExit(1)
mode = mode_cls(setup)
try:
- mode(setup['args'][1:])
+ return mode(setup['args'][1:])
finally:
mode.shutdown()
else:
@@ -93,6 +93,6 @@ def main():
if __name__ == '__main__':
try:
- main()
+ sys.exit(main())
except KeyboardInterrupt:
raise SystemExit(1)
diff --git a/src/sbin/bcfg2-crypt b/src/sbin/bcfg2-crypt
index aad89882f..5641732cd 100755
--- a/src/sbin/bcfg2-crypt
+++ b/src/sbin/bcfg2-crypt
@@ -18,291 +18,199 @@ except ImportError:
raise SystemExit(1)
-class EncryptionChunkingError(Exception):
- """ error raised when Encryptor cannot break a file up into chunks
- to be encrypted, or cannot reassemble the chunks """
- pass
-
-
-class Encryptor(object):
- """ Generic encryptor for all files """
-
- def __init__(self, setup):
+def print_xml(element, keep_text=False):
+ """ Render an XML element for error output. This prefixes the
+ line number and removes children for nicer display.
+
+ :param element: The element to render
+ :type element: lxml.etree._Element
+ :param keep_text: Do not discard text content from the element for
+ display
+ :type keep_text: boolean
+ """
+ xml = None
+ if len(element) or element.text:
+ el = copy.copy(element)
+ if el.text and not keep_text:
+ el.text = '...'
+ for child in el.iterchildren():
+ el.remove(child)
+ xml = lxml.etree.tostring(
+ el,
+ xml_declaration=False).decode("UTF-8").strip()
+ else:
+ xml = lxml.etree.tostring(
+ element,
+ xml_declaration=False).decode("UTF-8").strip()
+ return "%s (line %s)" % (xml, element.sourceline)
+
+
+class PassphraseError(Exception):
+ """ Exception raised when there's a problem determining the
+ passphrase to encrypt or decrypt with """
+
+
+class DecryptError(Exception):
+ """ Exception raised when decryption fails. """
+
+
+class EncryptError(Exception):
+ """ Exception raised when encryption fails. """
+
+
+class CryptoTool(object):
+ """ Generic decryption/encryption interface base object """
+ def __init__(self, filename, setup):
self.setup = setup
- self.passphrase = None
- self.pname = None
self.logger = logging.getLogger(self.__class__.__name__)
+ self.passphrases = Bcfg2.Encryption.get_passphrases(self.setup)
- def get_encrypted_filename(self, plaintext_filename):
- """ get the name of the file encrypted data should be written to """
- return plaintext_filename
-
- def get_plaintext_filename(self, encrypted_filename):
- """ get the name of the file decrypted data should be written to """
- return encrypted_filename
-
- def chunk(self, data):
- """ generator to break the file up into smaller chunks that
- will each be individually encrypted or decrypted """
- yield data
-
- def unchunk(self, data, original): # pylint: disable=W0613
- """ given chunks of a file, reassemble then into the whole file """
+ self.filename = filename
try:
- return data[0]
- except IndexError:
- raise EncryptionChunkingError("No data to unchunk")
+ self.data = open(self.filename).read()
+ except IOError:
+ err = sys.exc_info()[1]
+ self.logger.error("Error reading %s, skipping: %s" % (filename,
+ err))
+ return False
- def set_passphrase(self):
- """ set the passphrase for the current file """
+ self.pname, self.passphrase = self._get_passphrase()
+
+ def _get_passphrase(self):
+ """ get the passphrase for the current file """
if (not self.setup.cfp.has_section(Bcfg2.Encryption.CFG_SECTION) or
len(Bcfg2.Encryption.get_passphrases(self.setup)) == 0):
- self.logger.error("No passphrases available in %s" %
- self.setup['configfile'])
- return False
-
- if self.passphrase:
- self.logger.debug("Using previously determined passphrase %s" %
- self.pname)
- return True
+ raise PassphraseError("No passphrases available in %s" %
+ self.setup['configfile'])
+ pname = None
if self.setup['passphrase']:
- self.pname = self.setup['passphrase']
+ pname = self.setup['passphrase']
- if self.pname:
+ if pname:
if self.setup.cfp.has_option(Bcfg2.Encryption.CFG_SECTION,
- self.pname):
- self.passphrase = \
- self.setup.cfp.get(Bcfg2.Encryption.CFG_SECTION,
- self.pname)
+ pname):
+ passphrase = self.setup.cfp.get(Bcfg2.Encryption.CFG_SECTION,
+ pname)
self.logger.debug("Using passphrase %s specified on command "
- "line" % self.pname)
- return True
+ "line" % pname)
+ return (pname, passphrase)
else:
- self.logger.error("Could not find passphrase %s in %s" %
- (self.pname, self.setup['configfile']))
- return False
+ raise PassphraseError("Could not find passphrase %s in %s" %
+ (pname, self.setup['configfile']))
else:
pnames = Bcfg2.Encryption.get_passphrases(self.setup)
if len(pnames) == 1:
- self.pname = pnames.keys()[0]
- self.passphrase = pnames[self.pname]
- self.logger.info("Using passphrase %s" % self.pname)
- return True
+ pname = pnames.keys()[0]
+ passphrase = pnames[pname]
+ self.logger.info("Using passphrase %s" % pname)
+ return (pname, passphrase)
elif len(pnames) > 1:
- self.logger.warning("Multiple passphrases found in %s, "
- "specify one on the command line with -p" %
- self.setup['configfile'])
- self.logger.info("No passphrase could be determined")
- return False
-
- def encrypt(self, fname):
- """ encrypt the given file, returning the encrypted data """
- try:
- plaintext = open(fname).read()
- except IOError:
- err = sys.exc_info()[1]
- self.logger.error("Error reading %s, skipping: %s" % (fname, err))
- return False
+ return (None, None)
+ raise PassphraseError("No passphrase could be determined")
- if not self.set_passphrase():
- return False
+ def get_destination_filename(self, original_filename):
+ """ Get the filename where data should be written """
+ return original_filename
- crypted = []
+ def write(self, data):
+ """ write data to disk """
+ new_fname = self.get_destination_filename(self.filename)
try:
- for chunk in self.chunk(plaintext):
- try:
- passphrase, pname = self.get_passphrase(chunk)
- except TypeError:
- return False
-
- crypted.append(self._encrypt(chunk, passphrase, name=pname))
- except EncryptionChunkingError:
- err = sys.exc_info()[1]
- self.logger.error("Error getting data to encrypt from %s: %s" %
- (fname, err))
- return False
- return self.unchunk(crypted, plaintext)
-
- # pylint: disable=W0613
- def _encrypt(self, plaintext, passphrase, name=None):
- """ encrypt a single chunk of a file """
- return Bcfg2.Encryption.ssl_encrypt(
- plaintext, passphrase,
- Bcfg2.Encryption.get_algorithm(self.setup))
- # pylint: enable=W0613
-
- def decrypt(self, fname):
- """ decrypt the given file, returning the plaintext data """
- try:
- crypted = open(fname).read()
+ self._write(new_fname, data)
+ self.logger.info("Wrote data to %s" % new_fname)
+ return True
except IOError:
err = sys.exc_info()[1]
- self.logger.error("Error reading %s, skipping: %s" % (fname, err))
+ self.logger.error("Error writing data from %s to %s: %s" %
+ (self.filename, new_fname, err))
return False
- self.set_passphrase()
+ def _write(self, filename, data):
+ """ Perform the actual write of data. This is separate from
+ :func:`CryptoTool.write` so it can be easily
+ overridden. """
+ open(filename, "wb").write(data)
- plaintext = []
- try:
- for chunk in self.chunk(crypted):
- try:
- passphrase, pname = self.get_passphrase(chunk)
- try:
- plaintext.append(self._decrypt(chunk, passphrase))
- except Bcfg2.Encryption.EVPError:
- self.logger.info("Could not decrypt %s with the "
- "specified passphrase" % fname)
- continue
- except:
- err = sys.exc_info()[1]
- self.logger.error("Error decrypting %s: %s" %
- (fname, err))
- continue
- except TypeError:
- pchunk = None
- passphrases = Bcfg2.Encryption.get_passphrases(self.setup)
- for pname, passphrase in passphrases.items():
- self.logger.debug("Trying passphrase %s" % pname)
- try:
- pchunk = self._decrypt(chunk, passphrase)
- break
- except Bcfg2.Encryption.EVPError:
- pass
- except:
- err = sys.exc_info()[1]
- self.logger.error("Error decrypting %s: %s" %
- (fname, err))
- if pchunk is not None:
- plaintext.append(pchunk)
- else:
- self.logger.error("Could not decrypt %s with any "
- "passphrase in %s" %
- (fname, self.setup['configfile']))
- continue
- except EncryptionChunkingError:
- err = sys.exc_info()[1]
- self.logger.error("Error getting encrypted data from %s: %s" %
- (fname, err))
- return False
- try:
- return self.unchunk(plaintext, crypted)
- except EncryptionChunkingError:
- err = sys.exc_info()[1]
- self.logger.error("Error assembling plaintext data from %s: %s" %
- (fname, err))
- return False
+class Decryptor(CryptoTool):
+ """ Decryptor interface """
+ def decrypt(self):
+ """ decrypt the file, returning the encrypted data """
+ raise NotImplementedError
- def _decrypt(self, crypted, passphrase):
- """ decrypt a single chunk """
- return Bcfg2.Encryption.ssl_decrypt(
- crypted, passphrase,
- Bcfg2.Encryption.get_algorithm(self.setup))
- def write_encrypted(self, fname, data=None):
- """ write encrypted data to disk """
- if data is None:
- data = self.decrypt(fname)
- new_fname = self.get_encrypted_filename(fname)
- try:
- open(new_fname, "wb").write(data)
- self.logger.info("Wrote encrypted data to %s" % new_fname)
- return True
- except IOError:
- err = sys.exc_info()[1]
- self.logger.error("Error writing encrypted data from %s to %s: %s"
- % (fname, new_fname, err))
- return False
- except EncryptionChunkingError:
- err = sys.exc_info()[1]
- self.logger.error("Error assembling encrypted data from %s: %s" %
- (fname, err))
- return False
+class Encryptor(CryptoTool):
+ """ encryptor interface """
+ def encrypt(self):
+ """ encrypt the file, returning the encrypted data """
+ raise NotImplementedError
- def write_decrypted(self, fname, data=None):
- """ write decrypted data to disk """
- if data is None:
- data = self.decrypt(fname)
- new_fname = self.get_plaintext_filename(fname)
- try:
- open(new_fname, "wb").write(data)
- self.logger.info("Wrote decrypted data to %s" % new_fname)
- return True
- except IOError:
- err = sys.exc_info()[1]
- self.logger.error("Error writing encrypted data from %s to %s: %s"
- % (fname, new_fname, err))
- return False
- def get_passphrase(self, chunk):
- """ get the passphrase for a chunk of a file """
- pname = self._get_passphrase(chunk)
- if not self.pname:
- if not pname:
- self.logger.info("No passphrase given on command line or "
- "found in file")
- return False
- elif self.setup.cfp.has_option(Bcfg2.Encryption.CFG_SECTION,
- pname):
- passphrase = self.setup.cfp.get(Bcfg2.Encryption.CFG_SECTION,
- pname)
- else:
- self.logger.error("Could not find passphrase %s in %s" %
- (pname, self.setup['configfile']))
- return False
- else:
- pname = self.pname
- passphrase = self.passphrase
- if self.pname != pname:
- self.logger.warning("Passphrase given on command line (%s) "
- "differs from passphrase embedded in "
- "file (%s), using command-line option" %
- (self.pname, pname))
- return (passphrase, pname)
+class CfgEncryptor(Encryptor):
+ """ encryptor class for Cfg files """
- def _get_passphrase(self, chunk): # pylint: disable=W0613
- """ get the passphrase for a chunk of a file """
- return None
+ def __init__(self, filename, setup):
+ Encryptor.__init__(self, filename, setup)
+ if self.passphrase is None:
+ raise PassphraseError("Multiple passphrases found in %s, "
+ "specify one on the command line with -p" %
+ self.setup['configfile'])
+ def encrypt(self):
+ return Bcfg2.Encryption.ssl_encrypt(
+ self.data, self.passphrase,
+ Bcfg2.Encryption.get_algorithm(self.setup))
-class CfgEncryptor(Encryptor):
- """ encryptor class for Cfg files """
+ def get_destination_filename(self, original_filename):
+ return original_filename + ".crypt"
- def get_encrypted_filename(self, plaintext_filename):
- return plaintext_filename + ".crypt"
- def get_plaintext_filename(self, encrypted_filename):
- if encrypted_filename.endswith(".crypt"):
- return encrypted_filename[:-6]
+class CfgDecryptor(Decryptor):
+ """ Decrypt Cfg files """
+
+ def decrypt(self):
+ """ decrypt the given file, returning the plaintext data """
+ if self.passphrase:
+ try:
+ return Bcfg2.Encryption.ssl_decrypt(
+ self.data, self.passphrase,
+ Bcfg2.Encryption.get_algorithm(self.setup))
+ except Bcfg2.Encryption.EVPError:
+ raise DecryptError("Could not decrypt %s with the "
+ "specified passphrase" % self.filename)
+ except:
+ raise DecryptError("Error decrypting %s: %s" %
+ (self.filename, sys.exc_info()[1]))
+ else: # no passphrase given, brute force
+ try:
+ return Bcfg2.Encryption.bruteforce_decrypt(
+ self.data, passphrases=self.passphrases.values(),
+ algorithm=Bcfg2.Encryption.get_algorithm(self.setup))
+ except Bcfg2.Encryption.EVPError:
+ raise DecryptError("Could not decrypt %s with any passphrase" %
+ self.filename)
+
+ def get_destination_filename(self, original_filename):
+ if original_filename.endswith(".crypt"):
+ return original_filename[:-6]
else:
- return Encryptor.get_plaintext_filename(self, encrypted_filename)
+ return Decryptor.get_plaintext_filename(self, original_filename)
-class PropertiesEncryptor(Encryptor):
- """ encryptor class for Properties files """
+class PropertiesCryptoMixin(object):
+ """ Mixin to provide some common methods for Properties crypto """
+ default_xpath = '//*'
- def _encrypt(self, plaintext, passphrase, name=None):
- # plaintext is an lxml.etree._Element
- if name is None:
- name = "true"
- if plaintext.text and plaintext.text.strip():
- plaintext.text = Bcfg2.Encryption.ssl_encrypt(
- plaintext.text,
- passphrase,
- Bcfg2.Encryption.get_algorithm(self.setup)).strip()
- plaintext.set("encrypted", name)
- return plaintext
-
- def chunk(self, data):
- xdata = lxml.etree.XML(data, parser=XMLParser)
+ def _get_elements(self, xdata):
+ """ Get the list of elements to encrypt or decrypt """
if self.setup['xpath']:
elements = xdata.xpath(self.setup['xpath'])
if not elements:
- raise EncryptionChunkingError("XPath expression %s matched no "
- "elements" % self.setup['xpath'])
+ self.logger.warning("XPath expression %s matched no "
+ "elements" % self.setup['xpath'])
else:
- elements = xdata.xpath('//*[@encrypted]')
+ elements = xdata.xpath(self.default_xpath)
if not elements:
elements = list(xdata.getiterator(tag=lxml.etree.Element))
@@ -329,50 +237,96 @@ class PropertiesEncryptor(Encryptor):
ans = input("Encrypt this element? [y/N] ")
if not ans.lower().startswith("y"):
elements.remove(element)
+ return elements
+
+ def _get_element_passphrase(self, element):
+ """ Get the passphrase to use to encrypt or decrypt a given
+ element """
+ pname = element.get("encrypted")
+ if pname in self.passphrases:
+ passphrase = self.passphrases[pname]
+ elif self.passphrase:
+ if pname:
+ self.logger.warning("Passphrase %s not found in %s, "
+ "using passphrase given on command line"
+ % (pname, self.setup['configfile']))
+ passphrase = self.passphrase
+ pname = self.pname
+ else:
+ raise PassphraseError("Multiple passphrases found in %s, "
+ "specify one on the command line with -p" %
+ self.setup['configfile'])
+ return (pname, passphrase)
+
+ def _write(self, filename, data):
+ """ Write the data """
+ data.getroottree().write(filename,
+ xml_declaration=False,
+ pretty_print=True)
- # this is not a good use of a generator, but we need to
- # generate the full list of elements in order to ensure that
- # some exist before we know what to return
- for elt in elements:
- yield elt
-
- def unchunk(self, data, original):
- # Properties elements are modified in-place, so we don't
- # actually need to unchunk anything
- xdata = data[0]
- # find root element
- while xdata.getparent() is not None:
- xdata = xdata.getparent()
- return lxml.etree.tostring(xdata,
- xml_declaration=False,
- pretty_print=True).decode('UTF-8')
-
- def _get_passphrase(self, chunk):
- pname = chunk.get("encrypted")
- if pname and pname.lower() != "true":
- return pname
- return None
-
- def _decrypt(self, crypted, passphrase):
- # crypted is in lxml.etree._Element
- if not crypted.text or not crypted.text.strip():
- self.logger.warning("Skipping empty element %s" % crypted.tag)
- return crypted
- decrypted = Bcfg2.Encryption.ssl_decrypt(
- crypted.text,
- passphrase,
- Bcfg2.Encryption.get_algorithm(self.setup)).strip()
- try:
- crypted.text = decrypted.encode('ascii', 'xmlcharrefreplace')
- except UnicodeDecodeError:
- # we managed to decrypt the value, but it contains content
- # that can't even be encoded into xml entities. what
- # probably happened here is that we coincidentally could
- # decrypt a value encrypted with a different key, and
- # wound up with gibberish.
- self.logger.warning("Decrypted %s to gibberish, skipping" %
- crypted.tag)
- return crypted
+
+class PropertiesEncryptor(Encryptor, PropertiesCryptoMixin):
+ """ encryptor class for Properties files """
+
+ def encrypt(self):
+ xdata = lxml.etree.XML(self.data, parser=XMLParser)
+ for elt in self._get_elements(xdata):
+ try:
+ pname, passphrase = self._get_element_passphrase(elt)
+ except PassphraseError:
+ raise EncryptError(str(sys.exc_info()[1]))
+ self.logger.debug("Encrypting %s" % print_xml(elt))
+ elt.text = Bcfg2.Encryption.ssl_encrypt(
+ elt.text, passphrase,
+ Bcfg2.Encryption.get_algorithm(self.setup)).strip()
+ elt.set("encrypted", pname)
+ return xdata
+
+ def _write(self, filename, data):
+ PropertiesCryptoMixin._write(self, filename, data)
+
+
+class PropertiesDecryptor(Decryptor, PropertiesCryptoMixin):
+ """ decryptor class for Properties files """
+ default_xpath = '//*[@encrypted]'
+
+ def decrypt(self):
+ decrypted_any = False
+ xdata = lxml.etree.XML(self.data, parser=XMLParser)
+ for elt in self._get_elements(xdata):
+ try:
+ pname, passphrase = self._get_element_passphrase(elt)
+ except PassphraseError:
+ raise DecryptError(str(sys.exc_info()[1]))
+ self.logger.debug("Decrypting %s" % print_xml(elt))
+ try:
+ decrypted = Bcfg2.Encryption.ssl_decrypt(
+ elt.text, passphrase,
+ Bcfg2.Encryption.get_algorithm(self.setup)).strip()
+ decrypted_any = True
+ except (Bcfg2.Encryption.EVPError, TypeError):
+ self.logger.error("Could not decrypt %s, skipping" %
+ print_xml(elt))
+ continue
+ try:
+ elt.text = decrypted.encode('ascii', 'xmlcharrefreplace')
+ elt.set("encrypted", pname)
+ except UnicodeDecodeError:
+ # we managed to decrypt the value, but it contains
+ # content that can't even be encoded into xml
+ # entities. what probably happened here is that we
+ # coincidentally could decrypt a value encrypted with
+ # a different key, and wound up with gibberish.
+ self.logger.warning("Decrypted %s to gibberish, skipping" %
+ elt.tag)
+ if decrypted_any:
+ return xdata
+ else:
+ raise DecryptError("Failed to decrypt any data in %s" %
+ self.filename)
+
+ def _write(self, filename, data):
+ PropertiesCryptoMixin._write(self, filename, data)
def main(): # pylint: disable=R0912,R0915
@@ -422,9 +376,6 @@ def main(): # pylint: disable=R0912,R0915
logger.error("--remove cannot be used with --properties, ignoring")
setup['remove'] = Bcfg2.Options.CRYPT_REMOVE.default
- props_crypt = PropertiesEncryptor(setup)
- cfg_crypt = CfgEncryptor(setup)
-
for fname in setup['args']:
if not os.path.exists(fname):
logger.error("%s does not exist, skipping" % fname)
@@ -454,10 +405,10 @@ def main(): # pylint: disable=R0912,R0915
props = False
if props:
- encryptor = props_crypt
if setup['remove']:
logger.info("Cannot use --remove with Properties file %s, "
"ignoring for this file" % fname)
+ tools = (PropertiesEncryptor, PropertiesDecryptor)
else:
if setup['xpath']:
logger.info("Cannot use xpath with Cfg file %s, ignoring "
@@ -465,32 +416,52 @@ def main(): # pylint: disable=R0912,R0915
if setup['interactive']:
logger.info("Cannot use interactive mode with Cfg file %s, "
"ignoring -I for this file" % fname)
- encryptor = cfg_crypt
+ tools = (CfgEncryptor, CfgDecryptor)
data = None
+ mode = None
if setup['encrypt']:
- xform = encryptor.encrypt
- write = encryptor.write_encrypted
+ try:
+ tool = tools[0](fname, setup)
+ except PassphraseError:
+ logger.error(str(sys.exc_info()[1]))
+ return 2
+ mode = "encrypt"
elif setup['decrypt']:
- xform = encryptor.decrypt
- write = encryptor.write_decrypted
+ try:
+ tool = tools[1](fname, setup)
+ except PassphraseError:
+ logger.error(str(sys.exc_info()[1]))
+ return 2
+ mode = "decrypt"
else:
logger.info("Neither --encrypt nor --decrypt specified, "
"determining mode")
- data = encryptor.decrypt(fname)
- if data:
- write = encryptor.write_decrypted
- else:
+ try:
+ tool = tools[1](fname, setup)
+ except PassphraseError:
+ logger.error(str(sys.exc_info()[1]))
+ return 2
+
+ try:
+ data = tool.decrypt()
+ mode = "decrypt"
+ except DecryptError:
logger.info("Failed to decrypt %s, trying encryption" % fname)
- data = None
- xform = encryptor.encrypt
- write = encryptor.write_encrypted
+ try:
+ tool = tools[0](fname, setup)
+ except PassphraseError:
+ logger.error(str(sys.exc_info()[1]))
+ return 2
+ mode = "encrypt"
if data is None:
- data = xform(fname)
- if not data:
- logger.error("Failed to %s %s, skipping" % (xform.__name__, fname))
- continue
+ try:
+ data = getattr(tool, mode)()
+ except (EncryptError, DecryptError):
+ logger.error("Failed to %s %s, skipping: %s" %
+ (mode, fname, sys.exc_info()[1]))
+ continue
if setup['crypt_stdout']:
if len(setup['args']) > 1:
print("----- %s -----" % fname)
@@ -498,10 +469,10 @@ def main(): # pylint: disable=R0912,R0915
if len(setup['args']) > 1:
print("")
else:
- write(fname, data=data)
+ tool.write(data)
if (setup['remove'] and
- encryptor.get_encrypted_filename(fname) != fname):
+ tool.get_destination_filename(fname) != fname):
try:
os.unlink(fname)
except IOError:
diff --git a/src/sbin/bcfg2-info b/src/sbin/bcfg2-info
index 6aafd24d1..6008f8896 100755
--- a/src/sbin/bcfg2-info
+++ b/src/sbin/bcfg2-info
@@ -231,10 +231,14 @@ class InfoCore(cmd.Cmd, Bcfg2.Server.Core.BaseCore):
print("Refusing to write files outside of /tmp without -f "
"option")
return
- lxml.etree.ElementTree(self.BuildConfiguration(client)).write(
- ofile,
- encoding='UTF-8', xml_declaration=True,
- pretty_print=True)
+ try:
+ lxml.etree.ElementTree(self.BuildConfiguration(client)).write(
+ ofile,
+ encoding='UTF-8', xml_declaration=True,
+ pretty_print=True)
+ except IOError:
+ err = sys.exc_info()[1]
+ print("Failed to write File %s: %s" % (ofile, err))
else:
print(self._get_usage(self.do_build))
@@ -433,7 +437,7 @@ Bcfg2 client itself.""")
pname, client = alist
automatch = self.setup.cfp.getboolean("properties", "automatch",
default=False)
- pfile = self.plugins['Properties'].store.entries[pname]
+ pfile = self.plugins['Properties'].entries[pname]
if (not force and
not automatch and
pfile.xdata.get("automatch", "false").lower() != "true"):
@@ -469,7 +473,6 @@ Bcfg2 client itself.""")
('Path Bcfg2 repository', self.setup['repo']),
('Plugins', self.setup['plugins']),
('Password', self.setup['password']),
- ('Server Metadata Connector', self.setup['mconnect']),
('Filemonitor', self.setup['filemonitor']),
('Server address', self.setup['location']),
('Path to key', self.setup['key']),
@@ -479,6 +482,17 @@ Bcfg2 client itself.""")
('Logging', self.setup['logging'])]
print_tabular(output)
+ def do_expirecache(self, args):
+ """ expirecache [<hostname> [<hostname> ...]]- Expire the
+ metadata cache """
+ alist = args.split()
+ if len(alist):
+ for client in self._get_client_list(alist):
+ self.expire_caches_by_type(Bcfg2.Server.Plugin.Metadata,
+ key=client)
+ else:
+ self.expire_caches_by_type(Bcfg2.Server.Plugin.Metadata)
+
def do_probes(self, args):
""" probes [-p] <hostname> - Get probe list for the given
host, in XML (the default) or human-readable pretty (with -p)
@@ -714,7 +728,7 @@ Bcfg2 client itself.""")
def run(self, args): # pylint: disable=W0221
try:
self.load_plugins()
- self.fam.handle_events_in_interval(1)
+ self.block_for_fam_events(handle_events=True)
if args:
self.onecmd(" ".join(args))
else:
@@ -755,7 +769,8 @@ USAGE = build_usage()
def main():
optinfo = dict(profile=Bcfg2.Options.CORE_PROFILE,
interactive=Bcfg2.Options.INTERACTIVE,
- interpreter=Bcfg2.Options.INTERPRETER)
+ interpreter=Bcfg2.Options.INTERPRETER,
+ command_timeout=Bcfg2.Options.CLIENT_COMMAND_TIMEOUT)
optinfo.update(Bcfg2.Options.INFO_COMMON_OPTIONS)
setup = Bcfg2.Options.OptionParser(optinfo)
setup.hm = "\n".join([" bcfg2-info [options] [command <command args>]",
diff --git a/src/sbin/bcfg2-lint b/src/sbin/bcfg2-lint
index 9a98eaaaa..9ceb1dd04 100755
--- a/src/sbin/bcfg2-lint
+++ b/src/sbin/bcfg2-lint
@@ -3,6 +3,7 @@
"""This tool examines your Bcfg2 specifications for errors."""
import sys
+import time
import inspect
import logging
import Bcfg2.Logger
@@ -52,7 +53,11 @@ def run_plugin(plugin, plugin_name, setup=None, errorhandler=None,
args.append(setup)
# python 2.5 doesn't support mixing *magic and keyword arguments
- return plugin(*args, **dict(files=files, errorhandler=errorhandler)).Run()
+ start = time.time()
+ rv = plugin(*args, **dict(files=files, errorhandler=errorhandler)).Run()
+ LOGGER.debug(" Ran %s in %0.2f seconds" % (plugin_name,
+ time.time() - start))
+ return rv
def get_errorhandler(setup):
@@ -68,7 +73,7 @@ def load_server(setup):
""" load server """
core = Bcfg2.Server.Core.BaseCore(setup)
core.load_plugins()
- core.fam.handle_events_in_interval(0.1)
+ core.block_for_fam_events(handle_events=True)
return core
diff --git a/src/sbin/bcfg2-reports b/src/sbin/bcfg2-reports
index bb45e0009..b0c170b1b 100755
--- a/src/sbin/bcfg2-reports
+++ b/src/sbin/bcfg2-reports
@@ -53,15 +53,15 @@ def print_fields(fields, client, fmt, extra=None):
else:
fdata.append("dirty")
elif field == 'total':
- fdata.append(client.current_interaction.totalcount)
+ fdata.append(client.current_interaction.total_count)
elif field == 'good':
- fdata.append(client.current_interaction.goodcount)
+ fdata.append(client.current_interaction.good_count)
elif field == 'modified':
- fdata.append(client.current_interaction.modified_entry_count())
+ fdata.append(client.current_interaction.modified_count)
elif field == 'extra':
- fdata.append(client.current_interaction.extra_entry_count())
+ fdata.append(client.current_interaction.extra_count)
elif field == 'bad':
- fdata.append((client.current_interaction.badcount()))
+ fdata.append(client.current_interaction.bad_count)
else:
try:
fdata.append(getattr(client, field))
diff --git a/src/sbin/bcfg2-test b/src/sbin/bcfg2-test
index c33143a04..7c38a65d8 100755
--- a/src/sbin/bcfg2-test
+++ b/src/sbin/bcfg2-test
@@ -5,6 +5,7 @@ without failures"""
import os
import sys
+import signal
import fnmatch
import logging
import Bcfg2.Logger
@@ -156,7 +157,7 @@ def get_core(setup):
""" Get a server core, with events handled """
core = Bcfg2.Server.Core.BaseCore(setup)
core.load_plugins()
- core.fam.handle_events_in_interval(0.1)
+ core.block_for_fam_events(handle_events=True)
return core
@@ -190,9 +191,23 @@ def run_child(setup, clients, queue):
core.shutdown()
+def get_sigint_handler(core):
+ """ Get a function that handles SIGINT/Ctrl-C by shutting down the
+ core and exiting properly."""
+
+ def hdlr(sig, frame): # pylint: disable=W0613
+ """ Handle SIGINT/Ctrl-C by shutting down the core and exiting
+ properly. """
+ core.shutdown()
+ os._exit(1) # pylint: disable=W0212
+
+ return hdlr
+
+
def parse_args():
""" Parse command line arguments. """
optinfo = dict(Bcfg2.Options.TEST_COMMON_OPTIONS)
+
optinfo.update(Bcfg2.Options.CLI_COMMON_OPTIONS)
optinfo.update(Bcfg2.Options.SERVER_COMMON_OPTIONS)
setup = Bcfg2.Options.OptionParser(optinfo)
@@ -246,6 +261,7 @@ def main():
setup = parse_args()
logger = logging.getLogger(sys.argv[0])
core = get_core(setup)
+ signal.signal(signal.SIGINT, get_sigint_handler(core))
if setup['args']:
clients = setup['args']
@@ -282,8 +298,8 @@ def main():
for client in clients:
yield ClientTest(core, client, ignore)
- TestProgram(argv=sys.argv[:1] + core.setup['noseopts'],
- suite=LazySuite(generate_tests), exit=False)
+ result = TestProgram(argv=sys.argv[:1] + core.setup['noseopts'],
+ suite=LazySuite(generate_tests), exit=False)
# block until all children have completed -- should be
# immediate since we've already gotten all the results we
@@ -292,7 +308,10 @@ def main():
child.join()
core.shutdown()
- os._exit(0) # pylint: disable=W0212
+ if result.success:
+ os._exit(0) # pylint: disable=W0212
+ else:
+ os._exit(1) # pylint: disable=W0212
if __name__ == "__main__":
diff --git a/src/sbin/bcfg2-yum-helper b/src/sbin/bcfg2-yum-helper
index 7dbdad16b..227d977de 100755
--- a/src/sbin/bcfg2-yum-helper
+++ b/src/sbin/bcfg2-yum-helper
@@ -10,10 +10,14 @@ import sys
import yum
import logging
import Bcfg2.Logger
+from Bcfg2.Compat import wraps
+from lockfile import FileLock, LockTimeout
from optparse import OptionParser
try:
import json
-except ImportError:
+ # py2.4 json library is structured differently
+ json.loads # pylint: disable=W0104
+except (ImportError, AttributeError):
import simplejson as json
@@ -42,8 +46,8 @@ def pkgtup_to_string(package):
return ''.join(str(e) for e in rv)
-class DepSolver(object):
- """ Yum dependency solver """
+class YumHelper(object):
+ """ Yum helper base object """
def __init__(self, cfgfile, verbose=1):
self.cfgfile = cfgfile
@@ -57,6 +61,16 @@ class DepSolver(object):
self.yumbase._getConfig(cfgfile, debuglevel=verbose)
# pylint: enable=E1121,W0212
self.logger = logging.getLogger(self.__class__.__name__)
+
+
+class DepSolver(YumHelper):
+ """ Yum dependency solver. This is used for operations that only
+ read from the yum cache, and thus operates in cacheonly mode. """
+
+ def __init__(self, cfgfile, verbose=1):
+ YumHelper.__init__(self, cfgfile, verbose=verbose)
+ # internally, yum uses an integer, not a boolean, for conf.cache
+ self.yumbase.conf.cache = 1
self._groups = None
def get_groups(self):
@@ -181,6 +195,45 @@ class DepSolver(object):
packages.add(txmbr.pkgtup)
return list(packages), list(unknown)
+
+def acquire_lock(func):
+ """ decorator for CacheManager methods that gets and release a
+ lock while the method runs """
+ @wraps(func)
+ def inner(self, *args, **kwargs):
+ """ Get and release a lock while running the function this
+ wraps. """
+ self.logger.debug("Acquiring lock at %s" % self.lockfile)
+ while not self.lock.i_am_locking():
+ try:
+ self.lock.acquire(timeout=60) # wait up to 60 seconds
+ except LockTimeout:
+ self.lock.break_lock()
+ self.lock.acquire()
+ try:
+ func(self, *args, **kwargs)
+ finally:
+ self.lock.release()
+ self.logger.debug("Released lock at %s" % self.lockfile)
+
+ return inner
+
+
+class CacheManager(YumHelper):
+ """ Yum cache manager. Unlike :class:`DepSolver`, this can write
+ to the yum cache, and so is used for operations that muck with the
+ cache. (Technically, :func:`CacheManager.clean_cache` could be in
+ either DepSolver or CacheManager, but for consistency I've put it
+ here.) """
+
+ def __init__(self, cfgfile, verbose=1):
+ YumHelper.__init__(self, cfgfile, verbose=verbose)
+ self.lockfile = \
+ os.path.join(os.path.dirname(self.yumbase.conf.config_file_path),
+ "lock")
+ self.lock = FileLock(self.lockfile)
+
+ @acquire_lock
def clean_cache(self):
""" clean the yum cache """
for mdtype in ["Headers", "Packages", "Sqlite", "Metadata",
@@ -193,6 +246,27 @@ class DepSolver(object):
if not msg.startswith("0 "):
self.logger.info(msg)
+ @acquire_lock
+ def populate_cache(self):
+ """ populate the yum cache """
+ for repo in self.yumbase.repos.findRepos('*'):
+ repo.metadata_expire = 0
+ repo.mdpolicy = "group:all"
+ self.yumbase.doRepoSetup()
+ self.yumbase.repos.doSetup()
+ for repo in self.yumbase.repos.listEnabled():
+ # this populates the cache as a side effect
+ repo.repoXML # pylint: disable=W0104
+ try:
+ repo.getGroups()
+ except yum.Errors.RepoMDError:
+ pass # this repo has no groups
+ self.yumbase.repos.populateSack(mdtype='metadata', cacheonly=1)
+ self.yumbase.repos.populateSack(mdtype='filelists', cacheonly=1)
+ self.yumbase.repos.populateSack(mdtype='otherdata', cacheonly=1)
+ # this does something with the groups cache as a side effect
+ self.yumbase.comps # pylint: disable=W0104
+
def main():
parser = OptionParser()
@@ -221,29 +295,70 @@ def main():
logger.error("Config file %s not found" % options.config)
return 1
- depsolver = DepSolver(options.config, options.verbose)
+ # pylint: disable=W0702
+ rv = 0
if cmd == "clean":
- depsolver.clean_cache()
- print(json.dumps(True))
+ cachemgr = CacheManager(options.config, options.verbose)
+ try:
+ cachemgr.clean_cache()
+ print(json.dumps(True))
+ except:
+ logger.error("Unexpected error cleaning cache: %s" %
+ sys.exc_info()[1], exc_info=1)
+ print(json.dumps(False))
+ rv = 2
+ elif cmd == "makecache":
+ cachemgr = CacheManager(options.config, options.verbose)
+ try:
+ # this code copied from yumcommands.py
+ cachemgr.populate_cache()
+ print(json.dumps(True))
+ except yum.Errors.YumBaseError:
+ logger.error("Unexpected error creating cache: %s" %
+ sys.exc_info()[1], exc_info=1)
+ print(json.dumps(False))
elif cmd == "complete":
- data = json.loads(sys.stdin.read())
- depsolver.groups = data['groups']
- (packages, unknown) = depsolver.complete([pkg_to_tuple(p)
- for p in data['packages']])
- print(json.dumps(dict(packages=list(packages),
- unknown=list(unknown))))
+ depsolver = DepSolver(options.config, options.verbose)
+ try:
+ data = json.loads(sys.stdin.read())
+ except:
+ logger.error("Unexpected error decoding JSON input: %s" %
+ sys.exc_info()[1])
+ rv = 2
+ try:
+ depsolver.groups = data['groups']
+ (packages, unknown) = depsolver.complete(
+ [pkg_to_tuple(p) for p in data['packages']])
+ print(json.dumps(dict(packages=list(packages),
+ unknown=list(unknown))))
+ except:
+ logger.error("Unexpected error completing package set: %s" %
+ sys.exc_info()[1], exc_info=1)
+ print(json.dumps(dict(packages=[], unknown=data['packages'])))
+ rv = 2
elif cmd == "get_groups":
- data = json.loads(sys.stdin.read())
- rv = dict()
- for gdata in data:
- if "type" in gdata:
- packages = depsolver.get_group(gdata['group'],
- ptype=gdata['type'])
- else:
- packages = depsolver.get_group(gdata['group'])
- rv[gdata['group']] = list(packages)
- print(json.dumps(rv))
-
+ depsolver = DepSolver(options.config, options.verbose)
+ try:
+ data = json.loads(sys.stdin.read())
+ rv = dict()
+ for gdata in data:
+ if "type" in gdata:
+ packages = depsolver.get_group(gdata['group'],
+ ptype=gdata['type'])
+ else:
+ packages = depsolver.get_group(gdata['group'])
+ rv[gdata['group']] = list(packages)
+ print(json.dumps(rv))
+ except:
+ logger.error("Unexpected error getting groups: %s" %
+ sys.exc_info()[1], exc_info=1)
+ print(json.dumps(dict()))
+ rv = 2
+ else:
+ logger.error("Unknown command %s" % cmd)
+ print(json.dumps(None))
+ rv = 2
+ return rv
if __name__ == '__main__':
sys.exit(main())
diff --git a/testsuite/Testschema/test_schema.py b/testsuite/Testschema/test_schema.py
index ddfe4775f..cd9b74cdf 100644
--- a/testsuite/Testschema/test_schema.py
+++ b/testsuite/Testschema/test_schema.py
@@ -41,7 +41,7 @@ class TestSchemas(Bcfg2TestCase):
xmllint = Popen(['xmllint', '--xinclude', '--noout', '--schema',
self.schema_url] + schemas,
stdout=PIPE, stderr=STDOUT)
- print(xmllint.communicate()[0])
+ print(xmllint.communicate()[0].decode())
self.assertEqual(xmllint.wait(), 0)
def test_duplicates(self):
diff --git a/testsuite/Testsrc/Testlib/TestClient/TestTools/TestPOSIX/TestAugeas.py b/testsuite/Testsrc/Testlib/TestClient/TestTools/TestPOSIX/TestAugeas.py
new file mode 100644
index 000000000..b8534f5a8
--- /dev/null
+++ b/testsuite/Testsrc/Testlib/TestClient/TestTools/TestPOSIX/TestAugeas.py
@@ -0,0 +1,247 @@
+# -*- coding: utf-8 -*-
+import os
+import sys
+import copy
+import lxml.etree
+import tempfile
+from mock import Mock, MagicMock, patch
+try:
+ from Bcfg2.Client.Tools.POSIX.Augeas import *
+ HAS_AUGEAS = True
+except ImportError:
+ POSIXAugeas = None
+ HAS_AUGEAS = False
+
+# add all parent testsuite directories to sys.path to allow (most)
+# relative imports in python 2.4
+path = os.path.dirname(__file__)
+while path != "/":
+ if os.path.basename(path).lower().startswith("test"):
+ sys.path.append(path)
+ if os.path.basename(path) == "testsuite":
+ break
+ path = os.path.dirname(path)
+from TestPOSIX.Testbase import TestPOSIXTool
+from common import *
+
+
+test_data = """<Test>
+ <Empty/>
+ <Text>content with spaces</Text>
+ <Attrs foo="foo" bar="bar"/>
+ <Children identical="false">
+ <Foo/>
+ <Bar attr="attr"/>
+ </Children>
+ <Children identical="true">
+ <Thing>one</Thing>
+ <Thing>two</Thing>
+ </Children>
+ <Children multi="true">
+ <Thing>same</Thing>
+ <Thing>same</Thing>
+ <Thing>same</Thing>
+ <Thing>same</Thing>
+ </Children>
+</Test>
+"""
+
+test_xdata = lxml.etree.XML(test_data)
+
+if can_skip or HAS_AUGEAS:
+ class TestPOSIXAugeas(TestPOSIXTool):
+ test_obj = POSIXAugeas
+
+ applied_commands = dict(
+ insert=lxml.etree.Element(
+ "Insert", label="Thing",
+ path='Test/Children[#attribute/identical = "true"]/Thing'),
+ set=lxml.etree.Element("Set", path="Test/Text/#text",
+ value="content with spaces"),
+ move=lxml.etree.Element(
+ "Move", source="Test/Foo",
+ destination='Test/Children[#attribute/identical = "false"]/Foo'),
+ remove=lxml.etree.Element("Remove", path="Test/Bar"),
+ clear=lxml.etree.Element("Clear", path="Test/Empty/#text"),
+ setm=lxml.etree.Element(
+ "SetMulti", sub="#text", value="same",
+ base='Test/Children[#attribute/multi = "true"]/Thing'))
+
+ @skipUnless(HAS_AUGEAS, "Python Augeas libraries not found")
+ def setUp(self):
+ fd, self.tmpfile = tempfile.mkstemp()
+ os.fdopen(fd, 'w').write(test_data)
+
+ def tearDown(self):
+ tmpfile = getattr(self, "tmpfile", None)
+ if tmpfile and os.path.exists(tmpfile):
+ os.unlink(tmpfile)
+
+ def test_fully_specified(self):
+ ptool = self.get_obj()
+
+ entry = lxml.etree.Element("Path", name="/test", type="augeas")
+ self.assertFalse(ptool.fully_specified(entry))
+
+ lxml.etree.SubElement(entry, "Set", path="/test", value="test")
+ self.assertTrue(ptool.fully_specified(entry))
+
+ def test_install(self):
+ # this is tested adequately by the other tests
+ pass
+
+ def test_verify(self):
+ # this is tested adequately by the other tests
+ pass
+
+ @patch("Bcfg2.Client.Tools.POSIX.Augeas.POSIXTool.verify")
+ def _verify(self, commands, mock_verify):
+ ptool = self.get_obj()
+ mock_verify.return_value = True
+
+ entry = lxml.etree.Element("Path", name=self.tmpfile,
+ type="augeas", lens="Xml")
+ entry.extend(commands)
+
+ modlist = []
+ self.assertTrue(ptool.verify(entry, modlist))
+ mock_verify.assert_called_with(ptool, entry, modlist)
+ self.assertXMLEqual(lxml.etree.parse(self.tmpfile).getroot(),
+ test_xdata)
+
+ def test_verify_insert(self):
+ """ Test successfully verifying an Insert command """
+ self._verify([self.applied_commands['insert']])
+
+ def test_verify_set(self):
+ """ Test successfully verifying a Set command """
+ self._verify([self.applied_commands['set']])
+
+ def test_verify_move(self):
+ """ Test successfully verifying a Move command """
+ self._verify([self.applied_commands['move']])
+
+ def test_verify_remove(self):
+ """ Test successfully verifying a Remove command """
+ self._verify([self.applied_commands['remove']])
+
+ def test_verify_clear(self):
+ """ Test successfully verifying a Clear command """
+ self._verify([self.applied_commands['clear']])
+
+ def test_verify_set_multi(self):
+ """ Test successfully verifying a SetMulti command """
+ self._verify([self.applied_commands['setm']])
+
+ def test_verify_all(self):
+ """ Test successfully verifying multiple commands """
+ self._verify(self.applied_commands.values())
+
+ @patch("Bcfg2.Client.Tools.POSIX.Augeas.POSIXTool.install")
+ def _install(self, commands, expected, mock_install, **attrs):
+ ptool = self.get_obj()
+ mock_install.return_value = True
+
+ entry = lxml.etree.Element("Path", name=self.tmpfile,
+ type="augeas", lens="Xml")
+ for key, val in attrs.items():
+ entry.set(key, val)
+ entry.extend(commands)
+
+ self.assertTrue(ptool.install(entry))
+ mock_install.assert_called_with(ptool, entry)
+ self.assertXMLEqual(lxml.etree.parse(self.tmpfile).getroot(),
+ expected)
+
+ def test_install_set_existing(self):
+ """ Test setting the value of an existing node """
+ expected = copy.deepcopy(test_xdata)
+ expected.find("Text").text = "Changed content"
+ self._install([lxml.etree.Element("Set", path="Test/Text/#text",
+ value="Changed content")],
+ expected)
+
+ def test_install_set_new(self):
+ """ Test setting the value of an new node """
+ expected = copy.deepcopy(test_xdata)
+ newtext = lxml.etree.SubElement(expected, "NewText")
+ newtext.text = "new content"
+ self._install([lxml.etree.Element("Set", path="Test/NewText/#text",
+ value="new content")],
+ expected)
+
+ def test_install_remove(self):
+ """ Test removing a node """
+ expected = copy.deepcopy(test_xdata)
+ expected.remove(expected.find("Attrs"))
+ self._install(
+ [lxml.etree.Element("Remove",
+ path='Test/*[#attribute/foo = "foo"]')],
+ expected)
+
+ def test_install_move(self):
+ """ Test moving a node """
+ expected = copy.deepcopy(test_xdata)
+ foo = expected.xpath("//Foo")[0]
+ expected.append(foo)
+ self._install(
+ [lxml.etree.Element("Move", source='Test/Children/Foo',
+ destination='Test/Foo')],
+ expected)
+
+ def test_install_clear(self):
+ """ Test clearing a node """
+ # TODO: clearing a node doesn't seem to work with the XML lens
+ #
+ # % augtool -b
+ # augtool> set /augeas/load/Xml/incl[3] "/tmp/test.xml"
+ # augtool> load
+ # augtool> clear '/files/tmp/test.xml/Test/Text/#text'
+ # augtool> save
+ # error: Failed to execute command
+ # saving failed (run 'print /augeas//error' for details)
+ # augtool> print /augeas//error
+ #
+ # The error isn't useful.
+ pass
+
+ def test_install_set_multi(self):
+ """ Test setting multiple nodes at once """
+ expected = copy.deepcopy(test_xdata)
+ for thing in expected.xpath("Children[@identical='true']/Thing"):
+ thing.text = "same"
+ self._install(
+ [lxml.etree.Element(
+ "SetMulti", value="same",
+ base='Test/Children[#attribute/identical = "true"]',
+ sub="Thing/#text")],
+ expected)
+
+ def test_install_insert(self):
+ """ Test inserting a node """
+ expected = copy.deepcopy(test_xdata)
+ children = expected.xpath("Children[@identical='true']")[0]
+ thing = lxml.etree.Element("Thing")
+ thing.text = "three"
+ children.append(thing)
+ self._install(
+ [lxml.etree.Element(
+ "Insert",
+ path='Test/Children[#attribute/identical = "true"]/Thing[2]',
+ label="Thing", where="after"),
+ lxml.etree.Element(
+ "Set",
+ path='Test/Children[#attribute/identical = "true"]/Thing[3]/#text',
+ value="three")],
+ expected)
+
+ def test_install_initial(self):
+ """ Test creating initial content and then modifying it """
+ os.unlink(self.tmpfile)
+ expected = copy.deepcopy(test_xdata)
+ expected.find("Text").text = "Changed content"
+ initial = lxml.etree.Element("Initial")
+ initial.text = test_data
+ modify = lxml.etree.Element("Set", path="Test/Text/#text",
+ value="Changed content")
+ self._install([initial, modify], expected, current_exists="false")
diff --git a/testsuite/Testsrc/Testlib/TestClient/TestTools/TestPOSIX/Testbase.py b/testsuite/Testsrc/Testlib/TestClient/TestTools/TestPOSIX/Testbase.py
index 49e9be2ba..d2f383f42 100644
--- a/testsuite/Testsrc/Testlib/TestClient/TestTools/TestPOSIX/Testbase.py
+++ b/testsuite/Testsrc/Testlib/TestClient/TestTools/TestPOSIX/Testbase.py
@@ -711,12 +711,13 @@ class TestPOSIXTool(TestTool):
gather_data_rv[idx] = val
ptool._gather_data.return_value = tuple(gather_data_rv)
+ stat_mode = 17407
mtime = 1344430414
+ stat_rv = (stat_mode, Mock(), Mock(), Mock(), Mock(), Mock(), Mock(),
+ Mock(), mtime, Mock())
+ gather_data_rv[0] = stat_rv
entry = reset()
entry.set("mtime", str(mtime))
- stat_rv = MagicMock()
- stat_rv.__getitem__.return_value = mtime
- gather_data_rv[0] = stat_rv
ptool._gather_data.return_value = tuple(gather_data_rv)
self.assertTrue(ptool._verify_metadata(entry))
ptool._gather_data.assert_called_with(entry.get("name"))
@@ -788,7 +789,7 @@ class TestPOSIXTool(TestTool):
ptool._gather_data.assert_called_with(entry.get("name"))
ptool._verify_acls.assert_called_with(entry,
path=entry.get("name"))
- mock_matchpathcon.assert_called_with(entry.get("name"), 0)
+ mock_matchpathcon.assert_called_with(entry.get("name"), stat_mode)
self.assertEqual(entry.get("current_exists", 'true'), 'true')
for attr, idx, val in expected:
self.assertEqual(entry.get(attr), val)
@@ -803,7 +804,7 @@ class TestPOSIXTool(TestTool):
ptool._gather_data.assert_called_with(entry.get("name"))
ptool._verify_acls.assert_called_with(entry,
path=entry.get("name"))
- mock_matchpathcon.assert_called_with(entry.get("name"), 0)
+ mock_matchpathcon.assert_called_with(entry.get("name"), stat_mode)
self.assertEqual(entry.get("current_exists", 'true'), 'true')
for attr, idx, val in expected:
self.assertEqual(entry.get(attr), val)
@@ -897,7 +898,7 @@ class TestPOSIXTool(TestTool):
filedef_rv.__iter__.return_value = iter(file_acls)
defacls = acls
- for akey, perms in acls.items():
+ for akey, perms in list(acls.items()):
defacls[('default', akey[1], akey[2])] = perms
self.assertItemsEqual(ptool._list_file_acls(path), defacls)
mock_isdir.assert_called_with(path)
@@ -1009,7 +1010,7 @@ class TestPOSIXTool(TestTool):
else:
return True
ptool._set_perms.side_effect = set_perms_rv
- self.assertFalse(ptool._makedirs(entry))
+ self.assertTrue(ptool._makedirs(entry))
self.assertItemsEqual(mock_exists.call_args_list,
[call("/test"), call("/test/foo"),
call("/test/foo/bar")])
diff --git a/testsuite/Testsrc/Testlib/TestClient/TestTools/TestPOSIXUsers.py b/testsuite/Testsrc/Testlib/TestClient/TestTools/TestPOSIXUsers.py
index 9478f7071..c207900f1 100644
--- a/testsuite/Testsrc/Testlib/TestClient/TestTools/TestPOSIXUsers.py
+++ b/testsuite/Testsrc/Testlib/TestClient/TestTools/TestPOSIXUsers.py
@@ -27,7 +27,14 @@ class TestPOSIXUsers(TestTool):
def get_obj(self, logger=None, setup=None, config=None):
if setup is None:
setup = MagicMock()
- setup.__getitem__.return_value = []
+ def getitem(key):
+ if key == 'encoding':
+ return 'UTF-8'
+ else:
+ return []
+
+ setup.__getitem__.side_effect = getitem
+
return TestTool.get_obj(self, logger, setup, config)
@patch("pwd.getpwall")
@@ -381,15 +388,15 @@ class TestPOSIXUsers(TestTool):
(lxml.etree.Element("POSIXUser", name="test", group="test",
home="/home/test", shell="/bin/zsh",
gecos="Test McTest"),
- ["-m", "-g", "test", "-d", "/home/test", "-s", "/bin/zsh",
+ ["-g", "test", "-d", "/home/test", "-s", "/bin/zsh",
"-c", "Test McTest"]),
(lxml.etree.Element("POSIXUser", name="test", group="test",
home="/home/test", shell="/bin/zsh",
gecos="Test McTest", uid="1001"),
- ["-m", "-u", "1001", "-g", "test", "-d", "/home/test",
+ ["-u", "1001", "-g", "test", "-d", "/home/test",
"-s", "/bin/zsh", "-c", "Test McTest"]),
(entry,
- ["-m", "-g", "test", "-G", "wheel,users", "-d", "/home/test",
+ ["-g", "test", "-G", "wheel,users", "-d", "/home/test",
"-s", "/bin/zsh", "-c", "Test McTest"])]
for entry, expected in cases:
for action in ["add", "mod", "del"]:
diff --git a/testsuite/Testsrc/Testlib/TestServer/TestPlugin/Testbase.py b/testsuite/Testsrc/Testlib/TestServer/TestPlugin/Testbase.py
index 318f5ceaa..870983f60 100644
--- a/testsuite/Testsrc/Testlib/TestServer/TestPlugin/Testbase.py
+++ b/testsuite/Testsrc/Testlib/TestServer/TestPlugin/Testbase.py
@@ -29,16 +29,11 @@ class TestDebuggable(Bcfg2TestCase):
def test_set_debug(self):
d = self.get_obj()
- d.debug_log = Mock()
self.assertEqual(True, d.set_debug(True))
self.assertEqual(d.debug_flag, True)
- self.assertTrue(d.debug_log.called)
-
- d.debug_log.reset_mock()
self.assertEqual(False, d.set_debug(False))
self.assertEqual(d.debug_flag, False)
- self.assertTrue(d.debug_log.called)
def test_toggle_debug(self):
d = self.get_obj()
@@ -80,24 +75,25 @@ class TestPlugin(TestDebuggable):
@patch("os.makedirs")
@patch("os.path.exists")
def test__init(self, mock_exists, mock_makedirs):
- core = Mock()
- core.setup = MagicMock()
-
- mock_exists.return_value = True
- p = self.get_obj(core=core)
- self.assertEqual(p.data, os.path.join(datastore, p.name))
- self.assertEqual(p.core, core)
- mock_exists.assert_any_call(p.data)
- self.assertFalse(mock_makedirs.called)
-
- mock_exists.reset_mock()
- mock_makedirs.reset_mock()
- mock_exists.return_value = False
- p = self.get_obj(core=core)
- self.assertEqual(p.data, os.path.join(datastore, p.name))
- self.assertEqual(p.core, core)
- mock_exists.assert_any_call(p.data)
- mock_makedirs.assert_any_call(p.data)
+ if self.test_obj.create:
+ core = Mock()
+ core.setup = MagicMock()
+
+ mock_exists.return_value = True
+ p = self.get_obj(core=core)
+ self.assertEqual(p.data, os.path.join(datastore, p.name))
+ self.assertEqual(p.core, core)
+ mock_exists.assert_any_call(p.data)
+ self.assertFalse(mock_makedirs.called)
+
+ mock_exists.reset_mock()
+ mock_makedirs.reset_mock()
+ mock_exists.return_value = False
+ p = self.get_obj(core=core)
+ self.assertEqual(p.data, os.path.join(datastore, p.name))
+ self.assertEqual(p.core, core)
+ mock_exists.assert_any_call(p.data)
+ mock_makedirs.assert_any_call(p.data)
@patch("os.makedirs")
def test_init_repo(self, mock_makedirs):
diff --git a/testsuite/Testsrc/Testlib/TestServer/TestPlugin/Testhelpers.py b/testsuite/Testsrc/Testlib/TestServer/TestPlugin/Testhelpers.py
index 94866cf39..ce17cb076 100644
--- a/testsuite/Testsrc/Testlib/TestServer/TestPlugin/Testhelpers.py
+++ b/testsuite/Testsrc/Testlib/TestServer/TestPlugin/Testhelpers.py
@@ -1,5 +1,4 @@
import os
-import re
import sys
import copy
import lxml.etree
@@ -7,6 +6,7 @@ import Bcfg2.Server
from Bcfg2.Compat import reduce
from mock import Mock, MagicMock, patch
from Bcfg2.Server.Plugin.helpers import *
+from Bcfg2.Server.Plugin.exceptions import PluginInitError
# add all parent testsuite directories to sys.path to allow (most)
# relative imports in python 2.4
@@ -28,6 +28,7 @@ def tostring(el):
class FakeElementTree(lxml.etree._ElementTree):
xinclude = Mock()
+ parse = Mock
class TestFunctions(Bcfg2TestCase):
@@ -76,6 +77,14 @@ class TestFunctions(Bcfg2TestCase):
class TestDatabaseBacked(TestPlugin):
test_obj = DatabaseBacked
+ def get_obj(self, core=None):
+ if not HAS_DJANGO:
+ if core is None:
+ core = MagicMock()
+ # disable the database
+ core.setup.cfp.getboolean.return_value = False
+ return TestPlugin.get_obj(self, core=core)
+
@skipUnless(HAS_DJANGO, "Django not found")
def test__use_db(self):
core = Mock()
@@ -90,13 +99,13 @@ class TestDatabaseBacked(TestPlugin):
Bcfg2.Server.Plugin.helpers.HAS_DJANGO = False
core = Mock()
+ core.setup.cfp.getboolean.return_value = False
db = self.get_obj(core)
self.assertFalse(db._use_db)
core = Mock()
core.setup.cfp.getboolean.return_value = True
- db = self.get_obj(core)
- self.assertFalse(db._use_db)
+ self.assertRaises(PluginInitError, self.get_obj, core)
Bcfg2.Server.Plugin.helpers.HAS_DJANGO = True
@@ -623,17 +632,9 @@ class TestXMLFileBacked(TestFileBacked):
self.assertIn("/test/test2.xml", xfb.extra_monitors)
fam = Mock()
- if self.should_monitor is not True:
- fam.reset_mock()
- xfb = self.get_obj(fam=fam)
- fam.reset_mock()
- xfb.add_monitor("/test/test3.xml")
- self.assertFalse(fam.AddMonitor.called)
- self.assertIn("/test/test3.xml", xfb.extra_monitors)
-
- if self.should_monitor is not False:
- fam.reset_mock()
- xfb = self.get_obj(fam=fam, should_monitor=True)
+ fam.reset_mock()
+ xfb = self.get_obj(fam=fam)
+ if xfb.fam:
xfb.add_monitor("/test/test4.xml")
fam.AddMonitor.assert_called_with("/test/test4.xml", xfb)
self.assertIn("/test/test4.xml", xfb.extra_monitors)
@@ -1131,14 +1132,14 @@ class TestXMLSrc(TestXMLFileBacked):
# ensure that the node object has the necessary interface
self.assertTrue(hasattr(self.test_obj.__node__, "Match"))
- @patch("%s.open" % builtins)
- def test_HandleEvent(self, mock_open):
+ @patch("lxml.etree.parse")
+ def test_HandleEvent(self, mock_parse):
xdata = lxml.etree.Element("Test")
lxml.etree.SubElement(xdata, "Path", name="path", attr="whatever")
xsrc = self.get_obj("/test/foo.xml")
xsrc.__node__ = Mock()
- mock_open.return_value.read.return_value = tostring(xdata)
+ mock_parse.return_value = xdata.getroottree()
if xsrc.__priority_required__:
# test with no priority at all
@@ -1147,20 +1148,20 @@ class TestXMLSrc(TestXMLFileBacked):
# test with bogus priority
xdata.set("priority", "cow")
- mock_open.return_value.read.return_value = tostring(xdata)
+ mock_parse.return_value = xdata.getroottree()
self.assertRaises(PluginExecutionError,
- xsrc.HandleEvent, Mock())
+ xsrc.HandleEvent, Mock())
# assign a priority to use in future tests
xdata.set("priority", "10")
- mock_open.return_value.read.return_value = tostring(xdata)
+ mock_parse.return_value = xdata.getroottree()
- mock_open.reset_mock()
+ mock_parse.reset_mock()
xsrc = self.get_obj("/test/foo.xml")
xsrc.__node__ = Mock()
xsrc.HandleEvent(Mock())
- mock_open.assert_called_with("/test/foo.xml")
- mock_open.return_value.read.assert_any_call()
+ mock_parse.assert_called_with("/test/foo.xml",
+ parser=Bcfg2.Server.XMLParser)
self.assertXMLEqual(xsrc.__node__.call_args[0][0], xdata)
self.assertEqual(xsrc.__node__.call_args[0][1], dict())
self.assertEqual(xsrc.pnode, xsrc.__node__.return_value)
diff --git a/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestAWSTags.py b/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestAWSTags.py
new file mode 100644
index 000000000..05e0bb9a1
--- /dev/null
+++ b/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestAWSTags.py
@@ -0,0 +1,140 @@
+import os
+import sys
+import lxml.etree
+import Bcfg2.Server.Plugin
+from mock import Mock, MagicMock, patch
+try:
+ from Bcfg2.Server.Plugins.AWSTags import *
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+# add all parent testsuite directories to sys.path to allow (most)
+# relative imports in python 2.4
+path = os.path.dirname(__file__)
+while path != "/":
+ if os.path.basename(path).lower().startswith("test"):
+ sys.path.append(path)
+ if os.path.basename(path) == "testsuite":
+ break
+ path = os.path.dirname(path)
+from common import *
+from TestPlugin import TestPlugin, TestConnector, TestClientRunHooks
+
+config = '''
+<AWSTags>
+ <Tag name="name-only">
+ <Group>group1</Group>
+ <Group>group2</Group>
+ </Tag>
+ <Tag name="name-and-value" value="value">
+ <Group>group3</Group>
+ </Tag>
+ <Tag name="regex-(.*)">
+ <Group>group-$1</Group>
+ </Tag>
+ <Tag name="regex-value" value="(.*)">
+ <Group>group-$1</Group>
+ </Tag>
+</AWSTags>
+'''
+
+tags = {
+ "empty.example.com": {},
+ "no-matches.example.com": {"nameonly": "foo",
+ "Name": "no-matches",
+ "foo": "bar"},
+ "foo.example.com": {"name-only": "name-only",
+ "name-and-value": "wrong",
+ "regex-name": "foo"},
+ "bar.example.com": {"name-and-value": "value",
+ "regex-value": "bar"}}
+
+groups = {
+ "empty.example.com": [],
+ "no-matches.example.com": [],
+ "foo.example.com": ["group1", "group2", "group-name"],
+ "bar.example.com": ["group3", "group-value", "group-bar"]}
+
+
+def make_instance(name):
+ rv = Mock()
+ rv.private_dns_name = name
+ rv.tags = tags[name]
+ return rv
+
+
+instances = [make_instance(n) for n in tags.keys()]
+
+
+def get_all_instances(filters=None):
+ insts = [i for i in instances
+ if i.private_dns_name == filters['private-dns-name']]
+ res = Mock()
+ res.instances = insts
+ return [res]
+
+
+if HAS_BOTO:
+ class TestAWSTags(TestPlugin, TestClientRunHooks, TestConnector):
+ test_obj = AWSTags
+
+ def get_obj(self, core=None):
+ @patchIf(not isinstance(Bcfg2.Server.Plugins.AWSTags.connect_ec2,
+ Mock),
+ "Bcfg2.Server.Plugins.AWSTags.connect_ec2", Mock())
+ @patch("lxml.etree.Element", Mock())
+ def inner():
+ obj = TestPlugin.get_obj(self, core=core)
+ obj.config.data = config
+ obj.config.Index()
+ return obj
+ return inner()
+
+ @patch("Bcfg2.Server.Plugins.AWSTags.connect_ec2")
+ def test_connect(self, mock_connect_ec2):
+ """ Test connection to EC2 """
+ key_id = "a09sdbipasdf"
+ access_key = "oiilb234ipwe9"
+
+ def cfp_get(section, option):
+ if option == "access_key_id":
+ return key_id
+ elif option == "secret_access_key":
+ return access_key
+ else:
+ return Mock()
+
+ core = Mock()
+ core.setup.cfp.get = Mock(side_effect=cfp_get)
+ awstags = self.get_obj(core=core)
+ mock_connect_ec2.assert_called_with(
+ aws_access_key_id=key_id,
+ aws_secret_access_key=access_key)
+
+ def test_get_additional_data(self):
+ """ Test AWSTags.get_additional_data() """
+ awstags = self.get_obj()
+ awstags._ec2.get_all_instances = \
+ Mock(side_effect=get_all_instances)
+
+ for hostname, expected in tags.items():
+ metadata = Mock()
+ metadata.hostname = hostname
+ self.assertItemsEqual(awstags.get_additional_data(metadata),
+ expected)
+
+ def test_get_additional_groups_caching(self):
+ """ Test AWSTags.get_additional_groups() with caching enabled """
+ awstags = self.get_obj()
+ awstags._ec2.get_all_instances = \
+ Mock(side_effect=get_all_instances)
+
+ for hostname, expected in groups.items():
+ metadata = Mock()
+ metadata.hostname = hostname
+ actual = awstags.get_additional_groups(metadata)
+ msg = """%s has incorrect groups:
+actual: %s
+expected: %s""" % (hostname, actual, expected)
+ self.assertItemsEqual(actual, expected, msg)
diff --git a/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/TestCfgEncryptedGenerator.py b/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/TestCfgEncryptedGenerator.py
index 71a7410da..2bfec0e2d 100644
--- a/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/TestCfgEncryptedGenerator.py
+++ b/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/TestCfgEncryptedGenerator.py
@@ -1,6 +1,7 @@
import os
import sys
import lxml.etree
+import Bcfg2.Server.Plugins.Cfg
from mock import Mock, MagicMock, patch
from Bcfg2.Server.Plugins.Cfg.CfgEncryptedGenerator import *
from Bcfg2.Server.Plugin import PluginExecutionError
@@ -47,9 +48,10 @@ if can_skip or HAS_CRYPTO:
ceg = self.get_obj()
ceg.handle_event(event)
mock_handle_event.assert_called_with(ceg, event)
- mock_decrypt.assert_called_with("encrypted",
- setup=SETUP,
- algorithm=mock_get_algorithm.return_value)
+ mock_decrypt.assert_called_with(
+ "encrypted",
+ setup=Bcfg2.Server.Plugins.Cfg.SETUP,
+ algorithm=mock_get_algorithm.return_value)
self.assertEqual(ceg.data, "plaintext")
reset()
diff --git a/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/TestCfgPrivateKeyCreator.py b/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/TestCfgPrivateKeyCreator.py
index dc4b11241..e139a592b 100644
--- a/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/TestCfgPrivateKeyCreator.py
+++ b/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/TestCfgPrivateKeyCreator.py
@@ -31,6 +31,7 @@ class TestCfgPrivateKeyCreator(TestCfgCreator, TestStructFile):
should_monitor = False
def get_obj(self, name=None, fam=None):
+ Bcfg2.Server.Plugins.Cfg.CfgPublicKeyCreator.CFG = Mock()
return TestCfgCreator.get_obj(self, name=name)
@patch("Bcfg2.Server.Plugins.Cfg.CfgCreator.handle_event")
@@ -259,24 +260,6 @@ class TestCfgPrivateKeyCreator(TestCfgCreator, TestStructFile):
pkc.write_data.assert_called_with("privatekey", group="foo")
mock_rmtree.assert_called_with(datastore)
- reset()
- self.assertEqual(pkc.create_data(entry, metadata, return_pair=True),
- ("ssh-rsa publickey pubkey.filename\n",
- "privatekey"))
- pkc.XMLMatch.assert_called_with(metadata)
- pkc.get_specificity.assert_called_with(metadata,
- pkc.XMLMatch.return_value)
- pkc._gen_keypair.assert_called_with(metadata,
- pkc.XMLMatch.return_value)
- self.assertItemsEqual(mock_open.call_args_list,
- [call(privkey + ".pub"), call(privkey)])
- pkc.pubkey_creator.get_filename.assert_called_with(group="foo")
- pkc.pubkey_creator.write_data.assert_called_with(
- "ssh-rsa publickey pubkey.filename\n",
- group="foo")
- pkc.write_data.assert_called_with("privatekey", group="foo")
- mock_rmtree.assert_called_with(datastore)
-
inner()
if HAS_CRYPTO:
diff --git a/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/TestCfgPublicKeyCreator.py b/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/TestCfgPublicKeyCreator.py
index 04772cf9a..ef4610fae 100644
--- a/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/TestCfgPublicKeyCreator.py
+++ b/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/TestCfgPublicKeyCreator.py
@@ -26,6 +26,7 @@ class TestCfgPublicKeyCreator(TestCfgCreator, TestStructFile):
should_monitor = False
def get_obj(self, name=None, fam=None):
+ Bcfg2.Server.Plugins.Cfg.CfgPublicKeyCreator.CFG = Mock()
return TestCfgCreator.get_obj(self, name=name)
@patch("Bcfg2.Server.Plugins.Cfg.CfgCreator.handle_event")
@@ -37,41 +38,117 @@ class TestCfgPublicKeyCreator(TestCfgCreator, TestStructFile):
mock_HandleEvent.assert_called_with(pkc, evt)
mock_handle_event.assert_called_with(pkc, evt)
- def test_create_data(self):
+ @patch("os.unlink")
+ @patch("os.path.exists")
+ @patch("tempfile.mkstemp")
+ @patch("os.fdopen", Mock())
+ @patch("%s.open" % builtins)
+ def test_create_data(self, mock_open, mock_mkstemp, mock_exists,
+ mock_unlink):
metadata = Mock()
pkc = self.get_obj()
pkc.cfg = Mock()
+ pkc.core = Mock()
+ pkc.cmd = Mock()
+ pkc.write_data = Mock()
+ pubkey = "public key data"
privkey_entryset = Mock()
privkey_creator = Mock()
- pubkey = Mock()
- privkey = Mock()
- privkey_creator.create_data.return_value = (pubkey, privkey)
- privkey_entryset.best_matching.return_value = privkey_creator
+ privkey_creator.get_specificity = Mock()
+ privkey_creator.get_specificity.return_value = dict()
+ fileloc = pkc.get_filename()
pkc.cfg.entries = {"/home/foo/.ssh/id_rsa": privkey_entryset}
+ def reset():
+ privkey_creator.reset_mock()
+ pkc.cmd.reset_mock()
+ pkc.core.reset_mock()
+ pkc.write_data.reset_mock()
+ mock_exists.reset_mock()
+ mock_unlink.reset_mock()
+ mock_mkstemp.reset_mock()
+ mock_open.reset_mock()
+
# public key doesn't end in .pub
entry = lxml.etree.Element("Path", name="/home/bar/.ssh/bogus")
self.assertRaises(CfgCreationError,
pkc.create_data, entry, metadata)
+ self.assertFalse(pkc.write_data.called)
+
+ # cannot bind private key
+ reset()
+ pkc.core.Bind.side_effect = PluginExecutionError
+ entry = lxml.etree.Element("Path", name="/home/foo/.ssh/id_rsa.pub")
+ self.assertRaises(CfgCreationError,
+ pkc.create_data, entry, metadata)
+ self.assertFalse(pkc.write_data.called)
# private key not in cfg.entries
+ reset()
+ pkc.core.Bind.side_effect = None
+ pkc.core.Bind.return_value = "private key data"
entry = lxml.etree.Element("Path", name="/home/bar/.ssh/id_rsa.pub")
self.assertRaises(CfgCreationError,
pkc.create_data, entry, metadata)
+ self.assertFalse(pkc.write_data.called)
- # successful operation
+ # no privkey.xml defined
+ reset()
+ privkey_entryset.best_matching.side_effect = PluginExecutionError
+ entry = lxml.etree.Element("Path", name="/home/foo/.ssh/id_rsa.pub")
+ self.assertRaises(CfgCreationError,
+ pkc.create_data, entry, metadata)
+ self.assertFalse(pkc.write_data.called)
+
+ # successful operation, create new key
+ reset()
+ pkc.cmd.run.return_value = Mock()
+ pkc.cmd.run.return_value.success = True
+ pkc.cmd.run.return_value.stdout = pubkey
+ mock_mkstemp.return_value = (Mock(), str(Mock()))
+ mock_exists.return_value = False
+ privkey_entryset.best_matching.side_effect = None
+ privkey_entryset.best_matching.return_value = privkey_creator
entry = lxml.etree.Element("Path", name="/home/foo/.ssh/id_rsa.pub")
self.assertEqual(pkc.create_data(entry, metadata), pubkey)
+ self.assertTrue(pkc.core.Bind.called)
+ (privkey_entry, md) = pkc.core.Bind.call_args[0]
+ self.assertXMLEqual(privkey_entry,
+ lxml.etree.Element("Path",
+ name="/home/foo/.ssh/id_rsa"))
+ self.assertEqual(md, metadata)
+
privkey_entryset.get_handlers.assert_called_with(metadata, CfgCreator)
- privkey_entryset.best_matching.assert_called_with(metadata,
- privkey_entryset.get_handlers.return_value)
- self.assertXMLEqual(privkey_creator.create_data.call_args[0][0],
+ privkey_entryset.best_matching.assert_called_with(
+ metadata,
+ privkey_entryset.get_handlers.return_value)
+ mock_exists.assert_called_with(fileloc)
+ pkc.cmd.run.assert_called_with(["ssh-keygen", "-y", "-f",
+ mock_mkstemp.return_value[1]])
+ self.assertEqual(pkc.write_data.call_args[0][0], pubkey)
+ mock_unlink.assert_called_with(mock_mkstemp.return_value[1])
+ self.assertFalse(mock_open.called)
+
+ # successful operation, no need to create new key
+ reset()
+ mock_exists.return_value = True
+ mock_open.return_value = Mock()
+ mock_open.return_value.read.return_value = pubkey
+ pkc.cmd.run.return_value.stdout = None
+ self.assertEqual(pkc.create_data(entry, metadata), pubkey)
+ self.assertTrue(pkc.core.Bind.called)
+ (privkey_entry, md) = pkc.core.Bind.call_args[0]
+ self.assertXMLEqual(privkey_entry,
lxml.etree.Element("Path",
name="/home/foo/.ssh/id_rsa"))
- self.assertEqual(privkey_creator.create_data.call_args[0][1], metadata)
+ self.assertEqual(md, metadata)
- # no privkey.xml
- privkey_entryset.best_matching.side_effect = PluginExecutionError
- self.assertRaises(CfgCreationError,
- pkc.create_data, entry, metadata)
+ privkey_entryset.get_handlers.assert_called_with(metadata, CfgCreator)
+ privkey_entryset.best_matching.assert_called_with(
+ metadata,
+ privkey_entryset.get_handlers.return_value)
+ mock_exists.assert_called_with(fileloc)
+ mock_open.assert_called_with(fileloc)
+ self.assertFalse(mock_mkstemp.called)
+ self.assertFalse(pkc.write_data.called)
diff --git a/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/Test_init.py b/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/Test_init.py
index 2e758774e..fdfb3a9f7 100644
--- a/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/Test_init.py
+++ b/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/Test_init.py
@@ -6,7 +6,7 @@ import Bcfg2.Options
from Bcfg2.Compat import walk_packages
from mock import Mock, MagicMock, patch
from Bcfg2.Server.Plugins.Cfg import *
-from Bcfg2.Server.Plugin import PluginExecutionError
+from Bcfg2.Server.Plugin import PluginExecutionError, Specificity
# add all parent testsuite directories to sys.path to allow (most)
# relative imports in python 2.4
@@ -298,21 +298,20 @@ class TestCfgEntrySet(TestEntrySet):
for submodule in walk_packages(path=Bcfg2.Server.Plugins.Cfg.__path__,
prefix="Bcfg2.Server.Plugins.Cfg."):
expected.append(submodule[1].rsplit('.', 1)[-1])
- eset = self.get_obj()
- self.assertItemsEqual(expected,
- [h.__name__ for h in eset.handlers])
+ self.assertItemsEqual(expected, [h.__name__ for h in handlers()])
- def test_handle_event(self):
+ @patch("Bcfg2.Server.Plugins.Cfg.handlers")
+ def test_handle_event(self, mock_handlers):
eset = self.get_obj()
eset.entry_init = Mock()
- eset._handlers = [Mock(), Mock(), Mock()]
- for hdlr in eset.handlers:
+ mock_handlers.return_value = [Mock(), Mock(), Mock()]
+ for hdlr in mock_handlers.return_value:
hdlr.__name__ = "handler"
eset.entries = dict()
def reset():
eset.entry_init.reset_mock()
- for hdlr in eset.handlers:
+ for hdlr in mock_handlers.return_value:
hdlr.reset_mock()
# test that a bogus deleted event is discarded
@@ -322,7 +321,7 @@ class TestCfgEntrySet(TestEntrySet):
eset.handle_event(evt)
self.assertFalse(eset.entry_init.called)
self.assertItemsEqual(eset.entries, dict())
- for hdlr in eset.handlers:
+ for hdlr in mock_handlers.return_value:
self.assertFalse(hdlr.handles.called)
self.assertFalse(hdlr.ignore.called)
@@ -333,7 +332,7 @@ class TestCfgEntrySet(TestEntrySet):
evt.filename = os.path.join(datastore, "test.txt")
# test with no handler that handles
- for hdlr in eset.handlers:
+ for hdlr in mock_handlers.return_value:
hdlr.handles.return_value = False
hdlr.ignore.return_value = False
@@ -341,16 +340,16 @@ class TestCfgEntrySet(TestEntrySet):
eset.handle_event(evt)
self.assertFalse(eset.entry_init.called)
self.assertItemsEqual(eset.entries, dict())
- for hdlr in eset.handlers:
+ for hdlr in mock_handlers.return_value:
hdlr.handles.assert_called_with(evt, basename=eset.path)
hdlr.ignore.assert_called_with(evt, basename=eset.path)
# test with a handler that handles the entry
reset()
- eset.handlers[-1].handles.return_value = True
+ mock_handlers.return_value[-1].handles.return_value = True
eset.handle_event(evt)
- eset.entry_init.assert_called_with(evt, eset.handlers[-1])
- for hdlr in eset.handlers:
+ eset.entry_init.assert_called_with(evt, mock_handlers.return_value[-1])
+ for hdlr in mock_handlers.return_value:
hdlr.handles.assert_called_with(evt, basename=eset.path)
if not hdlr.return_value:
hdlr.ignore.assert_called_with(evt, basename=eset.path)
@@ -358,14 +357,14 @@ class TestCfgEntrySet(TestEntrySet):
# test with a handler that ignores the entry before one
# that handles it
reset()
- eset.handlers[0].ignore.return_value = True
+ mock_handlers.return_value[0].ignore.return_value = True
eset.handle_event(evt)
self.assertFalse(eset.entry_init.called)
- eset.handlers[0].handles.assert_called_with(evt,
+ mock_handlers.return_value[0].handles.assert_called_with(evt,
basename=eset.path)
- eset.handlers[0].ignore.assert_called_with(evt,
+ mock_handlers.return_value[0].ignore.assert_called_with(evt,
basename=eset.path)
- for hdlr in eset.handlers[1:]:
+ for hdlr in mock_handlers.return_value[1:]:
self.assertFalse(hdlr.handles.called)
self.assertFalse(hdlr.ignore.called)
@@ -377,7 +376,7 @@ class TestCfgEntrySet(TestEntrySet):
eset.entries[evt.filename] = Mock()
eset.handle_event(evt)
self.assertFalse(eset.entry_init.called)
- for hdlr in eset.handlers:
+ for hdlr in mock_handlers.return_value:
self.assertFalse(hdlr.handles.called)
self.assertFalse(hdlr.ignore.called)
eset.entries[evt.filename].handle_event.assert_called_with(evt)
@@ -387,7 +386,7 @@ class TestCfgEntrySet(TestEntrySet):
evt.code2str.return_value = "deleted"
eset.handle_event(evt)
self.assertFalse(eset.entry_init.called)
- for hdlr in eset.handlers:
+ for hdlr in mock_handlers.return_value:
self.assertFalse(hdlr.handles.called)
self.assertFalse(hdlr.ignore.called)
self.assertItemsEqual(eset.entries, dict())
@@ -462,7 +461,7 @@ class TestCfgEntrySet(TestEntrySet):
metadata = Mock()
# test basic entry, no validation, no filters, etc.
- eset._generate_data.return_value = "data"
+ eset._generate_data.return_value = ("data", None)
eset.get_handlers.return_value = []
bound = eset.bind_entry(entry, metadata)
eset.bind_info_to_entry.assert_called_with(entry, metadata)
@@ -475,7 +474,7 @@ class TestCfgEntrySet(TestEntrySet):
# test empty entry
entry = reset()
- eset._generate_data.return_value = ""
+ eset._generate_data.return_value = ("", None)
bound = eset.bind_entry(entry, metadata)
eset.bind_info_to_entry.assert_called_with(entry, metadata)
eset._generate_data.assert_called_with(entry, metadata)
@@ -486,7 +485,9 @@ class TestCfgEntrySet(TestEntrySet):
# test filters
entry = reset()
- eset._generate_data.return_value = "initial data"
+ generator = Mock()
+ generator.specific = Specificity(all=True)
+ eset._generate_data.return_value = ("initial data", generator)
filters = [Mock(), Mock()]
filters[0].modify_data.return_value = "modified data"
filters[1].modify_data.return_value = "final data"
@@ -508,7 +509,7 @@ class TestCfgEntrySet(TestEntrySet):
entry.set("encoding", "base64")
mock_b64encode.return_value = "base64 data"
eset.get_handlers.return_value = []
- eset._generate_data.return_value = "data"
+ eset._generate_data.return_value = ("data", None)
bound = eset.bind_entry(entry, metadata)
eset.bind_info_to_entry.assert_called_with(entry, metadata)
eset._generate_data.assert_called_with(entry, metadata)
@@ -559,7 +560,7 @@ class TestCfgEntrySet(TestEntrySet):
def reset():
for e in eset.entries.values():
- if e.specific is not None:
+ if hasattr(e.specific, "reset_mock"):
e.specific.reset_mock()
metadata = Mock()
@@ -576,7 +577,7 @@ class TestCfgEntrySet(TestEntrySet):
[eset.entries['test1.txt'],
eset.entries['test3.txt']])
for entry in eset.entries.values():
- if entry.specific is not None:
+ if hasattr(entry.specific.matches, "called"):
self.assertFalse(entry.specific.matches.called)
reset()
@@ -584,20 +585,22 @@ class TestCfgEntrySet(TestEntrySet):
[eset.entries['test6.txt']])
eset.entries['test6.txt'].specific.matches.assert_called_with(metadata)
for ename, entry in eset.entries.items():
- if ename != 'test6.txt' and entry.specific is not None:
+ if (ename != 'test6.txt' and
+ hasattr(entry.specific.matches, "called")):
self.assertFalse(entry.specific.matches.called)
reset()
self.assertItemsEqual(eset.get_handlers(metadata, CfgFilter), [])
eset.entries['test7.txt'].specific.matches.assert_called_with(metadata)
for ename, entry in eset.entries.items():
- if ename != 'test7.txt' and entry.specific is not None:
+ if (ename != 'test7.txt' and
+ hasattr(entry.specific.matches, "called")):
self.assertFalse(entry.specific.matches.called)
reset()
self.assertItemsEqual(eset.get_handlers(metadata, Mock), [])
for ename, entry in eset.entries.items():
- if entry.specific is not None:
+ if hasattr(entry.specific.matches, "called"):
self.assertFalse(entry.specific.matches.called)
def test_bind_info_to_entry(self):
@@ -692,7 +695,7 @@ class TestCfgEntrySet(TestEntrySet):
eset._create_data.reset_mock()
# test success
- self.assertEqual(eset._generate_data(entry, metadata),
+ self.assertEqual(eset._generate_data(entry, metadata)[0],
"data")
eset.get_handlers.assert_called_with(metadata, CfgGenerator)
eset.best_matching.assert_called_with(metadata,
@@ -709,7 +712,7 @@ class TestCfgEntrySet(TestEntrySet):
reset()
eset.best_matching.side_effect = PluginExecutionError
self.assertEqual(eset._generate_data(entry, metadata),
- eset._create_data.return_value)
+ (eset._create_data.return_value, None))
eset.get_handlers.assert_called_with(metadata, CfgGenerator)
eset.best_matching.assert_called_with(metadata,
eset.get_handlers.return_value)
diff --git a/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestMetadata.py b/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestMetadata.py
index 742946c42..a07fffba1 100644
--- a/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestMetadata.py
+++ b/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestMetadata.py
@@ -339,6 +339,7 @@ class TestXMLMetadataConfig(TestXMLFileBacked):
@patch('Bcfg2.Utils.locked', Mock(return_value=False))
@patch('fcntl.lockf', Mock())
+ @patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml")
@patch('os.open')
@patch('os.fdopen')
@patch('os.unlink')
@@ -346,7 +347,7 @@ class TestXMLMetadataConfig(TestXMLFileBacked):
@patch('os.path.islink')
@patch('os.readlink')
def test_write_xml(self, mock_readlink, mock_islink, mock_rename,
- mock_unlink, mock_fdopen, mock_open):
+ mock_unlink, mock_fdopen, mock_open, mock_load_xml):
fname = "clients.xml"
config = self.get_obj(fname)
fpath = os.path.join(self.metadata.data, fname)
@@ -360,6 +361,7 @@ class TestXMLMetadataConfig(TestXMLFileBacked):
mock_unlink.reset_mock()
mock_fdopen.reset_mock()
mock_open.reset_mock()
+ mock_load_xml.reset_mock()
mock_islink.return_value = False
@@ -371,6 +373,7 @@ class TestXMLMetadataConfig(TestXMLFileBacked):
self.assertTrue(mock_fdopen.return_value.write.called)
mock_islink.assert_called_with(fpath)
mock_rename.assert_called_with(tmpfile, fpath)
+ mock_load_xml.assert_called_with()
# test: clients.xml.new is locked the first time we write it
def rv(fname, mode):
@@ -389,6 +392,7 @@ class TestXMLMetadataConfig(TestXMLFileBacked):
self.assertTrue(mock_fdopen.return_value.write.called)
mock_islink.assert_called_with(fpath)
mock_rename.assert_called_with(tmpfile, fpath)
+ mock_load_xml.assert_called_with()
# test writing a symlinked clients.xml
reset()
@@ -397,6 +401,7 @@ class TestXMLMetadataConfig(TestXMLFileBacked):
mock_readlink.return_value = linkdest
config.write_xml(fpath, get_clients_test_tree())
mock_rename.assert_called_with(tmpfile, linkdest)
+ mock_load_xml.assert_called_with()
# test failure of os.rename()
reset()
@@ -830,21 +835,18 @@ class TestMetadata(_TestMetadata, TestClientRunHooks, TestDatabaseBacked):
self.assertEqual(metadata.groups['group4'].category, 'category1')
self.assertEqual(metadata.default, "group1")
- all_groups = []
- negated_groups = []
+ all_groups = set()
+ negated_groups = set()
for group in get_groups_test_tree().xpath("//Groups/Client//*") + \
get_groups_test_tree().xpath("//Groups/Group//*"):
if group.tag == 'Group' and not group.getchildren():
if group.get("negate", "false").lower() == 'true':
- negated_groups.append(group.get("name"))
+ negated_groups.add(group.get("name"))
else:
- all_groups.append(group.get("name"))
- self.assertItemsEqual([g.name
- for g in metadata.group_membership.values()],
- all_groups)
- self.assertItemsEqual([g.name
- for g in metadata.negated_groups.values()],
- negated_groups)
+ all_groups.add(group.get("name"))
+ self.assertItemsEqual(metadata.ordered_groups, all_groups)
+ self.assertItemsEqual(metadata.group_membership.keys(), all_groups)
+ self.assertItemsEqual(metadata.negated_groups.keys(), negated_groups)
@patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock())
def test_set_profile(self):
@@ -885,10 +887,13 @@ class TestMetadata(_TestMetadata, TestClientRunHooks, TestDatabaseBacked):
metadata = self.load_clients_data(metadata=self.load_groups_data())
if not metadata._use_db:
metadata.clients_xml.write = Mock()
+ metadata.core.build_metadata = Mock()
+ metadata.core.build_metadata.side_effect = \
+ lambda c: metadata.get_initial_metadata(c)
+
metadata.set_profile("client1", "group2", None)
mock_update_client.assert_called_with("client1",
dict(profile="group2"))
- metadata.clients_xml.write.assert_any_call()
self.assertEqual(metadata.clientgroups["client1"], ["group2"])
metadata.clients_xml.write.reset_mock()
@@ -910,8 +915,8 @@ class TestMetadata(_TestMetadata, TestClientRunHooks, TestDatabaseBacked):
self.assertEqual(metadata.clientgroups["uuid_new"], ["group1"])
@patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock())
- @patch("socket.gethostbyaddr")
- def test_resolve_client(self, mock_gethostbyaddr):
+ @patch("socket.getnameinfo")
+ def test_resolve_client(self, mock_getnameinfo):
metadata = self.load_clients_data(metadata=self.load_groups_data())
metadata.session_cache[('1.2.3.3', None)] = (time.time(), 'client3')
self.assertEqual(metadata.resolve_client(('1.2.3.3', None)), 'client3')
@@ -928,22 +933,22 @@ class TestMetadata(_TestMetadata, TestClientRunHooks, TestDatabaseBacked):
cleanup_cache=True), 'client3')
self.assertEqual(metadata.session_cache, dict())
- mock_gethostbyaddr.return_value = ('client6', [], ['1.2.3.6'])
- self.assertEqual(metadata.resolve_client(('1.2.3.6', None)), 'client6')
- mock_gethostbyaddr.assert_called_with('1.2.3.6')
+ mock_getnameinfo.return_value = ('client6', [], ['1.2.3.6'])
+ self.assertEqual(metadata.resolve_client(('1.2.3.6', 6789)), 'client6')
+ mock_getnameinfo.assert_called_with(('1.2.3.6', 6789), socket.NI_NAMEREQD)
- mock_gethostbyaddr.reset_mock()
- mock_gethostbyaddr.return_value = ('alias3', [], ['1.2.3.7'])
- self.assertEqual(metadata.resolve_client(('1.2.3.7', None)), 'client4')
- mock_gethostbyaddr.assert_called_with('1.2.3.7')
+ mock_getnameinfo.reset_mock()
+ mock_getnameinfo.return_value = ('alias3', [], ['1.2.3.7'])
+ self.assertEqual(metadata.resolve_client(('1.2.3.7', 6789)), 'client4')
+ mock_getnameinfo.assert_called_with(('1.2.3.7', 6789), socket.NI_NAMEREQD)
- mock_gethostbyaddr.reset_mock()
- mock_gethostbyaddr.return_value = None
- mock_gethostbyaddr.side_effect = socket.herror
+ mock_getnameinfo.reset_mock()
+ mock_getnameinfo.return_value = None
+ mock_getnameinfo.side_effect = socket.herror
self.assertRaises(Bcfg2.Server.Plugin.MetadataConsistencyError,
metadata.resolve_client,
- ('1.2.3.8', None))
- mock_gethostbyaddr.assert_called_with('1.2.3.8')
+ ('1.2.3.8', 6789))
+ mock_getnameinfo.assert_called_with(('1.2.3.8', 6789), socket.NI_NAMEREQD)
@patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock())
@patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.write_xml", Mock())
@@ -1485,30 +1490,30 @@ class TestMetadata_NoClientsXML(TestMetadataBase):
"1.2.3.8"))
@patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock())
- @patch("socket.gethostbyaddr")
- def test_resolve_client(self, mock_gethostbyaddr):
+ @patch("socket.getnameinfo")
+ def test_resolve_client(self, mock_getnameinfo):
metadata = self.load_clients_data(metadata=self.load_groups_data())
metadata.session_cache[('1.2.3.3', None)] = (time.time(), 'client3')
self.assertEqual(metadata.resolve_client(('1.2.3.3', None)), 'client3')
metadata.session_cache[('1.2.3.3', None)] = (time.time() - 100,
'client3')
- mock_gethostbyaddr.return_value = ("client3", [], ['1.2.3.3'])
+ mock_getnameinfo.return_value = ("client3", [], ['1.2.3.3'])
self.assertEqual(metadata.resolve_client(('1.2.3.3', None),
cleanup_cache=True), 'client3')
self.assertEqual(metadata.session_cache, dict())
- mock_gethostbyaddr.return_value = ('client6', [], ['1.2.3.6'])
- self.assertEqual(metadata.resolve_client(('1.2.3.6', None)), 'client6')
- mock_gethostbyaddr.assert_called_with('1.2.3.6')
+ mock_getnameinfo.return_value = ('client6', [], ['1.2.3.6'])
+ self.assertEqual(metadata.resolve_client(('1.2.3.6', 6789), socket.NI_NAMEREQD), 'client6')
+ mock_getnameinfo.assert_called_with(('1.2.3.6', 6789), socket.NI_NAMEREQD)
- mock_gethostbyaddr.reset_mock()
- mock_gethostbyaddr.return_value = None
- mock_gethostbyaddr.side_effect = socket.herror
+ mock_getnameinfo.reset_mock()
+ mock_getnameinfo.return_value = None
+ mock_getnameinfo.side_effect = socket.herror
self.assertRaises(Bcfg2.Server.Plugin.MetadataConsistencyError,
metadata.resolve_client,
- ('1.2.3.8', None))
- mock_gethostbyaddr.assert_called_with('1.2.3.8')
+ ('1.2.3.8', 6789), socket.NI_NAMEREQD)
+ mock_getnameinfo.assert_called_with(('1.2.3.8', 6789), socket.NI_NAMEREQD)
def test_handle_clients_xml_event(self):
pass
diff --git a/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestProbes.py b/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestProbes.py
index 0794db62e..2face023f 100644
--- a/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestProbes.py
+++ b/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestProbes.py
@@ -1,4 +1,5 @@
import os
+import re
import sys
import copy
import time
@@ -295,7 +296,9 @@ text
def inner():
return self.get_obj(core)
- return inner()
+ rv = inner()
+ rv.allowed_cgroups = [re.compile("^.*$")]
+ return rv
def test__init(self):
mock_load_data = Mock()
@@ -466,7 +469,7 @@ text
def test_load_data_db(self):
probes = self.get_probes_object(use_db=True)
probes.load_data()
- probes._load_data_db.assert_any_call()
+ probes._load_data_db.assert_any_call(client=None)
self.assertFalse(probes._load_data_xml.called)
@patch("lxml.etree.parse")
@@ -591,6 +594,37 @@ text
self.assertEqual(probes.cgroups[cname],
self.get_test_cgroups()[cname])
+ # test again, with an explicit list of allowed groups
+ probes.allowed_cgroups = [re.compile(r'^.*s$')]
+ for cname, cdata in self.get_test_probedata().items():
+ client = Mock()
+ client.hostname = cname
+ cgroups = []
+ cprobedata = ClientProbeDataSet()
+ for pname, pdata in cdata.items():
+ dataitem = lxml.etree.Element("Probe", name=pname)
+ if pname == "text":
+ # add some groups to the plaintext test to test
+ # group parsing
+ data = [pdata]
+ for group in self.get_test_cgroups()[cname]:
+ data.append("group:%s" % group)
+ dataitem.text = "\n".join(data)
+ else:
+ dataitem.text = str(pdata)
+
+ probes.ReceiveDataItem(client, dataitem, cgroups, cprobedata)
+
+ probes.cgroups[client.hostname] = cgroups
+ probes.probedata[client.hostname] = cprobedata
+ self.assertIn(client.hostname, probes.probedata)
+ self.assertIn(pname, probes.probedata[cname])
+ self.assertEqual(pdata, probes.probedata[cname][pname])
+ self.assertIn(client.hostname, probes.cgroups)
+ self.assertEqual(probes.cgroups[cname],
+ [g for g in self.get_test_cgroups()[cname]
+ if g.endswith("s")])
+
def test_get_additional_groups(self):
TestConnector.test_get_additional_groups(self)
diff --git a/testsuite/Testsrc/test_code_checks.py b/testsuite/Testsrc/test_code_checks.py
index a38710fd4..e1214a942 100644
--- a/testsuite/Testsrc/test_code_checks.py
+++ b/testsuite/Testsrc/test_code_checks.py
@@ -79,7 +79,9 @@ no_checks = {
"TCheetah.py",
"TGenshi.py"],
}
-
+if sys.version_info < (2, 6):
+ # multiprocessing core requires py2.6
+ no_checks['lib/Bcfg2/Server'].append('MultiprocessingCore.py')
try:
any
@@ -186,7 +188,7 @@ class CodeTestCase(Bcfg2TestCase):
cmd = self.command + self.full_args + extra_args + \
[os.path.join(srcpath, f) for f in files]
proc = Popen(cmd, stdout=PIPE, stderr=STDOUT, env=self.get_env())
- print(proc.communicate()[0])
+ print(proc.communicate()[0].decode())
self.assertEqual(proc.wait(), 0)
def _test_errors(self, files, extra_args=None):
@@ -198,7 +200,7 @@ class CodeTestCase(Bcfg2TestCase):
cmd = self.command + self.error_args + extra_args + \
[os.path.join(srcpath, f) for f in files]
proc = Popen(cmd, stdout=PIPE, stderr=STDOUT, env=self.get_env())
- print(proc.communicate()[0])
+ print(proc.communicate()[0].decode())
self.assertEqual(proc.wait(), 0)
@skipIf(not os.path.exists(srcpath), "%s does not exist" % srcpath)
@@ -321,7 +323,7 @@ class TestPylint(CodeTestCase):
args = self.command + self.error_args + extra_args + \
[os.path.join(srcpath, p) for p in files]
pylint = Popen(args, stdout=PIPE, stderr=STDOUT, env=self.get_env())
- output = pylint.communicate()[0]
+ output = pylint.communicate()[0].decode()
rv = pylint.wait()
for line in output.splitlines():
diff --git a/testsuite/before_install.sh b/testsuite/before_install.sh
index 5f1a59aaf..2c80036cd 100755
--- a/testsuite/before_install.sh
+++ b/testsuite/before_install.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/bash -ex
# before_install script for Travis-CI
@@ -8,6 +8,7 @@ sudo apt-get update -qq
sudo apt-get install -qq swig libxml2-utils
if [[ "$WITH_OPTIONAL_DEPS" == "yes" ]]; then
if [[ ${PYVER:0:1} == "2" ]]; then
- sudo apt-get install -qq python-selinux python-pylibacl yum
+ sudo apt-get install -y yum libaugeas0 augeas-lenses libacl1-dev \
+ libssl-dev
fi
fi
diff --git a/testsuite/common.py b/testsuite/common.py
index e26d0be61..7471795a6 100644
--- a/testsuite/common.py
+++ b/testsuite/common.py
@@ -13,6 +13,7 @@ import re
import sys
import codecs
import unittest
+import lxml.etree
from mock import patch, MagicMock, _patch, DEFAULT
from Bcfg2.Compat import wraps
@@ -222,8 +223,11 @@ class Bcfg2TestCase(unittest.TestCase):
lines = ['First has %d, Second has %d: %r' % diff
for diff in differences]
diffMsg = '\n'.join(lines)
- standardMsg = self._truncateMessage(standardMsg, diffMsg)
- msg = self._formatMessage(msg, standardMsg)
+ standardMsg += diffMsg
+ if msg is None:
+ msg = standardMsg
+ else:
+ msg = "%s : %s" % (standardMsg, msg)
self.fail(msg)
if not hasattr(unittest.TestCase, "assertRegexpMatches"):
@@ -262,24 +266,43 @@ class Bcfg2TestCase(unittest.TestCase):
"%s is not less than or equal to %s")
def assertXMLEqual(self, el1, el2, msg=None):
- """ Test that the two XML trees given are equal. Both
- elements and all children are expected to have ``name``
- attributes. """
- self.assertEqual(el1.tag, el2.tag, msg=msg)
- self.assertEqual(el1.text, el2.text, msg=msg)
- self.assertItemsEqual(el1.attrib.items(), el2.attrib.items(), msg=msg)
+ """ Test that the two XML trees given are equal. """
+ if msg is None:
+ msg = "XML trees are not equal: %s"
+ else:
+ msg += ": %s"
+ fullmsg = msg + "\nFirst: %s" % lxml.etree.tostring(el1) + \
+ "\nSecond: %s" % lxml.etree.tostring(el2)
+
+ self.assertEqual(el1.tag, el2.tag, msg=fullmsg % "Tags differ")
+ if el1.text is not None and el2.text is not None:
+ self.assertEqual(el1.text.strip(), el2.text.strip(),
+ msg=fullmsg % "Text content differs")
+ else:
+ self.assertEqual(el1.text, el2.text,
+ msg=fullmsg % "Text content differs")
+ self.assertItemsEqual(el1.attrib.items(), el2.attrib.items(),
+ msg=fullmsg % "Attributes differ")
self.assertEqual(len(el1.getchildren()),
- len(el2.getchildren()))
+ len(el2.getchildren()),
+ msg=fullmsg % "Different numbers of children")
+ matched = []
for child1 in el1.getchildren():
- cname = child1.get("name")
- self.assertIsNotNone(cname,
- msg="Element %s has no 'name' attribute" %
- child1.tag)
- children2 = el2.xpath("%s[@name='%s']" % (child1.tag, cname))
- self.assertEqual(len(children2), 1,
- msg="More than one %s element named %s" % \
- (child1.tag, cname))
- self.assertXMLEqual(child1, children2[0], msg=msg)
+ for child2 in el2.xpath(child1.tag):
+ if child2 in matched:
+ continue
+ try:
+ self.assertXMLEqual(child1, child2)
+ matched.append(child2)
+ break
+ except AssertionError:
+ continue
+ else:
+ assert False, \
+ fullmsg % ("Element %s is missing from second" %
+ lxml.etree.tostring(child1))
+ self.assertItemsEqual(el2.getchildren(), matched,
+ msg=fullmsg % "Second has extra element(s)")
class DBModelTestCase(Bcfg2TestCase):
@@ -394,4 +417,3 @@ try:
re_type = re._pattern_type
except AttributeError:
re_type = type(re.compile(""))
-
diff --git a/testsuite/ext/exception_messages.py b/testsuite/ext/exception_messages.py
index 877ba42a1..cd3d7112c 100644
--- a/testsuite/ext/exception_messages.py
+++ b/testsuite/ext/exception_messages.py
@@ -1,16 +1,30 @@
-from logilab import astng
-from pylint.interfaces import IASTNGChecker
+try:
+ from logilab import astng as ast
+ from pylint.interfaces import IASTNGChecker as IChecker
+ PYLINT = 0 # pylint 0.something
+except ImportError:
+ import astroid as ast
+ from pylint.interfaces import IAstroidChecker as IChecker
+ PYLINT = 1 # pylint 1.something
from pylint.checkers import BaseChecker
from pylint.checkers.utils import safe_infer
+if PYLINT == 0:
+ # this is not quite correct; later versions of pylint 0.* wanted a
+ # three-tuple for messages as well
+ msg = ('Exception raised without arguments',
+ 'Used when an exception is raised without any arguments')
+else:
+ msg = ('Exception raised without arguments',
+ 'exception-without-args',
+ 'Used when an exception is raised without any arguments')
+msgs = {'R9901': msg}
+
class ExceptionMessageChecker(BaseChecker):
- __implements__ = IASTNGChecker
+ __implements__ = IChecker
name = 'Exception Messages'
- msgs = \
- {'R9901': ('Exception raised without arguments',
- 'Used when an exception is raised without any arguments')}
options = (
('exceptions-without-args',
dict(default=('NotImplementedError',),
@@ -23,9 +37,9 @@ class ExceptionMessageChecker(BaseChecker):
def visit_raise(self, node):
if node.exc is None:
return
- if isinstance(node.exc, astng.Name):
+ if isinstance(node.exc, ast.Name):
raised = safe_infer(node.exc)
- if (isinstance(raised, astng.Class) and
+ if (isinstance(raised, ast.Class) and
raised.name not in self.config.exceptions_without_args):
self.add_message('R9901', node=node.exc)
diff --git a/testsuite/install.sh b/testsuite/install.sh
index 1ca89f40f..9de5b8c6d 100755
--- a/testsuite/install.sh
+++ b/testsuite/install.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/bash -ex
# install script for Travis-CI
@@ -7,20 +7,13 @@ pip install -r testsuite/requirements.txt --use-mirrors
PYVER=$(python -c 'import sys;print(".".join(str(v) for v in sys.version_info[0:2]))')
if [[ "$WITH_OPTIONAL_DEPS" == "yes" ]]; then
- pip install --use-mirrors genshi PyYAML pyinotify
- if [[ $PYVER == "2.5" ]]; then
- # markdown 2.2+ doesn't work on py2.5
- pip install --use-mirrors simplejson 'markdown<2.2'
- fi
+ pip install --use-mirrors genshi PyYAML pyinotify boto 'django<1.5' \
+ pylibacl
+ easy_install https://fedorahosted.org/released/python-augeas/python-augeas-0.4.1.tar.gz
if [[ ${PYVER:0:1} == "2" ]]; then
# django supports py3k, but South doesn't, and the django bits
# in bcfg2 require South
- pip install cheetah 'django<1.5' 'South<0.8' M2Crypto
- fi
-else
- # python < 2.6 requires M2Crypto for SSL communication, not just
- # for encryption support
- if [[ $PYVER == "2.5" || $PYVER == "2.4" ]]; then
- pip install --use-mirrors M2crypto
+ pip install cheetah 'South<0.8'
+ pip install m2crypto
fi
fi
diff --git a/testsuite/pylintrc.conf b/testsuite/pylintrc.conf
index 14ccd1d23..653c68426 100644
--- a/testsuite/pylintrc.conf
+++ b/testsuite/pylintrc.conf
@@ -156,7 +156,7 @@ zope=no
# List of members which are set dynamically and missed by pylint inference
# system, and so shouldn't trigger E0201 when accessed. Python regular
# expressions are accepted.
-generated-members=objects,DoesNotExist,isoformat,filter,save,count,get,add,id
+generated-members=objects,DoesNotExist,isoformat,filter,save,count,get,add,id,MultipleObjectsReturned
[MISCELLANEOUS]
diff --git a/testsuite/requirements.txt b/testsuite/requirements.txt
index 2d6dbc557..898249389 100644
--- a/testsuite/requirements.txt
+++ b/testsuite/requirements.txt
@@ -2,6 +2,6 @@ lxml
nose
mock
sphinx
-pylint
+pylint<1.0
pep8
python-daemon
diff --git a/tools/README b/tools/README
index 9e7f667e3..5505573c8 100644
--- a/tools/README
+++ b/tools/README
@@ -55,12 +55,12 @@ encap-util-xml.sh
export.py
- Export a tagged version of the Bcfg2 source
-export.sh
- - Export a tagged version of the Bcfg2 source
-
generate-manpages.bash
- Generate man pages from the Sphinx source
+git_commit.py
+ - Trigger script to commit local changes back to a git repository
+
hostbasepush.py
- Call the Hostbase.rebuildState XML-RPC method
@@ -100,4 +100,3 @@ upgrade
yum-listpkgs-xml.py
- Produces a list of all packages installed and available in a
format suitable for use by Packages or Pkgmgr
-
diff --git a/tools/bcfg2-cron b/tools/bcfg2-cron
index fe0e6e90d..9a93c2e44 100755
--- a/tools/bcfg2-cron
+++ b/tools/bcfg2-cron
@@ -1,6 +1,6 @@
#!/bin/sh
#
-# Script to run bcfg2 with cron.
+# Script to run bcfg2 with cron.
#
# This script is designed so that bcfg2-cron can be invoked from both
# /etc/cron.daily and /etc/cron.hourly. This allows the administrators to
diff --git a/tools/bcfg2-profile-templates.py b/tools/bcfg2-profile-templates.py
index 93314f1e3..2b0ca6d63 100755
--- a/tools/bcfg2-profile-templates.py
+++ b/tools/bcfg2-profile-templates.py
@@ -5,6 +5,7 @@
import sys
import time
import math
+import signal
import logging
import operator
import Bcfg2.Logger
@@ -17,6 +18,19 @@ def stdev(nums):
return math.sqrt(sum((n - mean)**2 for n in nums) / float(len(nums)))
+def get_sigint_handler(core):
+ """ Get a function that handles SIGINT/Ctrl-C by shutting down the
+ core and exiting properly."""
+
+ def hdlr(sig, frame): # pylint: disable=W0613
+ """ Handle SIGINT/Ctrl-C by shutting down the core and exiting
+ properly. """
+ core.shutdown()
+ os._exit(1) # pylint: disable=W0212
+
+ return hdlr
+
+
def main():
optinfo = dict(
client=Bcfg2.Options.Option("Benchmark templates for one client",
@@ -49,10 +63,11 @@ def main():
logger = logging.getLogger(sys.argv[0])
core = Bcfg2.Server.Core.BaseCore(setup)
+ signal.signal(signal.SIGINT, get_sigint_handler(core))
logger.info("Bcfg2 server core loaded")
core.load_plugins()
logger.debug("Plugins loaded")
- core.fam.handle_events_in_interval(0.1)
+ core.block_for_fam_events(handle_events=True)
logger.debug("Repository events processed")
if setup['args']:
diff --git a/tools/bcfg2_local.py b/tools/bcfg2_local.py
index 8c164e52e..78a46ba5c 100755
--- a/tools/bcfg2_local.py
+++ b/tools/bcfg2_local.py
@@ -20,7 +20,7 @@ class LocalCore(BaseCore):
Bcfg2.Server.Core.BaseCore.__init__(self, setup=setup)
setup['syslog'], setup['logging'] = saved
self.load_plugins()
- self.fam.handle_events_in_interval(0.1)
+ self.block_for_fam_events(handle_events=True)
def _daemonize(self):
return True
@@ -47,7 +47,10 @@ class LocalProxy(object):
func = getattr(self.core, attr)
if func.exposed:
def inner(*args, **kwargs):
- args = ((self.ipaddr, self.hostname), ) + args
+ # the port portion of the addresspair tuple isn't
+ # actually used, so it's safe to hardcode 6789
+ # here.
+ args = ((self.ipaddr, 6789), ) + args
return func(*args, **kwargs)
return inner
raise AttributeError(attr)
diff --git a/tools/export.py b/tools/export.py
index 5cd0d5c41..dc2878e3e 100755
--- a/tools/export.py
+++ b/tools/export.py
@@ -136,8 +136,7 @@ E.G. 1.2.0pre1 is a valid version.
tarname = '/tmp/%s-%s.tar.gz' % (pkgname, version)
- newchangelog = \
-"""bcfg2 (%s-0.0) unstable; urgency=low
+ newchangelog = """bcfg2 (%s-0.0) unstable; urgency=low
* New upstream release
@@ -164,56 +163,6 @@ E.G. 1.2.0pre1 is a valid version.
print(help_message)
quit()
- if version_info['build'] == '':
- rpmchangelog = ["* %s %s <%s> %s-1\n" %
- (datetime.datetime.now().strftime("%a %b %d %Y"),
- name, email, version_release),
- "- New upstream release\n", "\n"]
- else:
- rpmchangelog = ["* %s %s <%s> %s-0.%s.%s\n" %
- (datetime.datetime.now().strftime("%a %b %d %Y"),
- name, email, version_release,
- version_info['build'][-1], version_info['build']),
- "- New upstream release\n", "\n"]
-
- # write out the new RPM changelog
- specs = ["misc/bcfg2.spec", "misc/bcfg2-selinux.spec", "redhat/bcfg2.spec.in"]
- if options.dryrun:
- print("*** Add the following to the top of the %%changelog section in %s:\n%s\n"
- % (rpmchangelog, " and ".join(specs)))
- else:
- for fname in specs:
- try:
- lines = open(fname).readlines()
- for lineno in range(len(lines)):
- if lines[lineno].startswith("%changelog"):
- break
- else:
- print("No %changelog section found in %s" % fname)
- continue
- for line in reversed(rpmchangelog):
- lines.insert(lineno + 1, line)
- open(fname, 'w').write("".join(lines))
- except:
- err = sys.exc_info()[1]
- print("Could not write %s: %s" % (fname, err))
- print(help_message)
- quit()
-
- # Update redhat directory versions
- if options.dryrun:
- print("*** Replace redhat/VERSIONS content with '%s'."
- % version_release)
- print("*** Replace redhat/RELEASE content with '%s'."
- % version_info['build'])
- else:
- with open('redhat/VERSION', 'w') as f:
- f.write("%s\n" % version_release)
- f.close()
- with open('redhat/RELEASE', 'w') as f:
- f.write("0.0%s\n" % version_info['build'])
- f.close()
-
# update solaris version
find_and_replace('solaris/Makefile', 'VERS=',
'VERS=%s-1\n' % version,
@@ -228,14 +177,28 @@ E.G. 1.2.0pre1 is a valid version.
startswith=True,
dryrun=options.dryrun)
# update solaris IPS version
- find_and_replace('solaris-ips/MANIFEST.bcfg2.header', 'set name=pkg.fmri value="pkg://bcfg2/bcfg2@',
- 'set name=pkg.fmri value="pkg://bcfg2/bcfg2@%s"' % version,
- startswith=True,
- dryrun=options.dryrun)
- find_and_replace('solaris-ips/MANIFEST.bcfg2-server.header', 'set name=pkg.fmri value="pkg://bcfg2/bcfg2-server@',
- 'set name=pkg.fmri value="pkg://bcfg2/bcfg2-server@%s"' % version,
- startswith=True,
- dryrun=options.dryrun)
+ find_and_replace('solaris-ips/Makefile', 'VERS=',
+ 'VERS=%s-1\n' % version,
+ startswith=True,
+ dryrun=options.dryrun)
+ find_and_replace('solaris-ips/MANIFEST.bcfg2.header',
+ 'set name=pkg.fmri value="pkg://bcfg2/bcfg2@',
+ 'set name=pkg.fmri value="pkg://bcfg2/bcfg2@%s"\n' % version,
+ startswith=True,
+ dryrun=options.dryrun)
+ find_and_replace('solaris-ips/MANIFEST.bcfg2-server.header',
+ 'set name=pkg.fmri value="pkg://bcfg2/bcfg2-server@',
+ 'set name=pkg.fmri value="pkg://bcfg2/bcfg2-server@%s"\n' % version,
+ startswith=True,
+ dryrun=options.dryrun)
+ find_and_replace('solaris-ips/pkginfo.bcfg2', 'VERSION=',
+ 'VERSION="%s"\n' % version,
+ startswith=True,
+ dryrun=options.dryrun)
+ find_and_replace('solaris-ips/pkginfo.bcfg2-server', 'VERSION=',
+ 'VERSION="%s"\n' % version,
+ startswith=True,
+ dryrun=options.dryrun)
# set new version in Bcfg2/version.py
find_and_replace('src/lib/Bcfg2/version.py',
'__version__ =',
@@ -248,40 +211,59 @@ E.G. 1.2.0pre1 is a valid version.
find_and_replace('misc/bcfg2-selinux.spec', 'Version:',
'Version: %s\n' % version_release,
dryrun=options.dryrun)
- if version_info['build'] == '':
- find_and_replace('misc/bcfg2.spec', 'Release: ',
- 'Release: 1\n',
+ if version_info['build'].startswith('rc'):
+ find_and_replace('misc/bcfg2.spec', 'global _rc ',
+ '%%global _rc %s\n' % version_info['build'],
dryrun=options.dryrun)
- find_and_replace('misc/bcfg2-selinux.spec', 'Release: ',
- 'Release: 1\n',
+ find_and_replace('misc/bcfg2-selinux.spec', 'global _rc ',
+ '%%global _rc %s\n' % version_info['build'],
+ dryrun=options.dryrun)
+ elif version_info['build'].startswith('pre'):
+ find_and_replace('misc/bcfg2.spec', 'global _pre ',
+ '%%global _pre %s\n' % version_info['build'],
+ dryrun=options.dryrun)
+ find_and_replace('misc/bcfg2-selinux.spec', 'global _pre ',
+ '%%global _pre %s\n' % version_info['build'],
dryrun=options.dryrun)
else:
+ # comment out pre/rc
+ find_and_replace('misc/bcfg2.spec', 'global _pre ',
+ '#%%global _pre 2\n',
+ dryrun=options.dryrun)
+ find_and_replace('misc/bcfg2-selinux.spec', 'global _pre ',
+ '#%%global _pre 2\n',
+ dryrun=options.dryrun)
+ find_and_replace('misc/bcfg2.spec', 'global _rc ',
+ '#%%global _rc 1\n',
+ dryrun=options.dryrun)
+ find_and_replace('misc/bcfg2-selinux.spec', 'global _rc ',
+ '#%%global _rc 1\n',
+ dryrun=options.dryrun)
+
find_and_replace('misc/bcfg2.spec', 'Release: ',
- 'Release: 0.%s.%s\n' %
- (version_info['build'][-1], version_info['build']),
+ 'Release: 1%{?_pre_rc}%{?dist}\n',
+ startswith=True,
dryrun=options.dryrun)
find_and_replace('misc/bcfg2-selinux.spec', 'Release: ',
- 'Release: 0.%s.%s\n' %
- (version_info['build'][-1], version_info['build']),
+ 'Release: 1%{?_pre_rc}%{?dist}\n',
+ startswith=True,
dryrun=options.dryrun)
find_and_replace('misc/bcfg2.spec', '%setup',
- '%%setup -q -n %%{name}-%%{version}%s\n' %
- version_info['build'],
+ '%setup -q -n %{name}-%{version}%{?_pre_rc}\n',
startswith=True,
dryrun=options.dryrun)
find_and_replace('misc/bcfg2-selinux.spec', '%setup',
- '%%setup -q -n %%{name}-%%{version}%s\n' %
- version_info['build'],
+ '%setup -q -n %{name}-%{version}%{?_pre_rc}\n',
startswith=True,
dryrun=options.dryrun)
find_and_replace('misc/bcfg2.spec', 'BuildRoot',
'BuildRoot: %%{_tmppath}/%%{name}-%%{version}%s-%%{release}-root-%%(%%{__id_u} -n)\n' %
- version_info['build'],
+ version_info['build'],
startswith=True,
dryrun=options.dryrun)
find_and_replace('misc/bcfg2-selinux.spec', 'BuildRoot',
'BuildRoot: %%{_tmppath}/%%{name}-%%{version}%s-%%{release}-root-%%(%%{__id_u} -n)\n' %
- version_info['build'],
+ version_info['build'],
startswith=True,
dryrun=options.dryrun)
# fix pre problem noted in
diff --git a/tools/export.sh b/tools/export.sh
deleted file mode 100755
index 632067f95..000000000
--- a/tools/export.sh
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/usr/bin/env sh
-
-pkgname="bcfg2"
-repo="https://svn.mcs.anl.gov/repos/bcfg"
-version="${1}"
-expath="/tmp/${pkgname}-${version}/"
-tarname="/tmp/${pkgname}-${version}.tar.gz"
-url=`svn info | grep URL | awk '{print $2}'`
-
-if [ -z "${version}" ] ; then
- echo "Usage: $0 <version>"
- exit 1
-fi
-
-# update the version
-tmpbase=`basename $0`
-deblogtmp=`mktemp /tmp/${tmpbase}.XXXXXX`
-majorver=`/usr/bin/python -c "print '${version}'[:5]"`
-minorver=`/usr/bin/python -c "print '${version}'[5:]"`
-printf "name: "
-read name
-printf "email: "
-read email
-cat > deblogtmp << EOF
-bcfg2 (${majorver}-0.0${minorver}) unstable; urgency=low
-
- * New upstream release
-
- -- ${name} <${email}> `/bin/date -R`
-
-EOF
-sed -i "s/^\(Version:\) [:digits:]*.*$/\1 ${version}/" misc/bcfg2.spec
-cat debian/changelog >> deblogtmp
-mv deblogtmp debian/changelog
-echo ${majorver} > redhat/VERSION
-echo 0.0${minorver} > redhat/RELEASE
-sed -i "s/\(version=\).*/\1\"${version}\",/" setup.py
-sed -i "s/^\(VERS\).*/\1=${version}-1/" solaris/Makefile
-svn ci -m "Version bump to ${version}"
-
-# tag the release
-tagstr=`echo ${version} | sed -e 's/\./_/g'`
-svn copy "$url" "${repo}/tags/${pkgname}_${tagstr}" -m "tagged ${version} release"
-svn export . "${expath}"
-svn log -v "${repo}/tags/${pkgname}_${tagstr}" > "${expath}/ChangeLog"
-cd /tmp
-
-tar czf "${tarname}" "${pkgname}-${version}"
-gpg --armor --output "${tarname}".gpg --detach-sig "${tarname}"
-scp "${tarname}"* terra.mcs.anl.gov:/mcs/ftp/pub/bcfg
diff --git a/tools/git_commit.py b/tools/git_commit.py
new file mode 100755
index 000000000..cc4061f25
--- /dev/null
+++ b/tools/git_commit.py
@@ -0,0 +1,181 @@
+#!/usr/bin/env python
+""" Trigger script to commit selected changes to a local repository
+back to git. To use this script, enable the Trigger plugin, put this
+script in /var/lib/bcfg2/Trigger/, and create /etc/bcfg2-commit.conf.
+
+The config file, /etc/bcfg2-commit.conf, may contain four options in
+the [global] section:
+
+* "config" is the path to the Bcfg2 server config file. (Default:
+ /etc/bcfg2.conf)
+* "commit" is a comma-separated list of globs giving the paths that
+ should be committed back to the repository. Default is 'SSLCA/*,
+ SSHbase/*, Cfg/*', which will commit data back for SSLCA, SSHbase,
+ Cfg, FileProbes, etc., but not, for instance, Probes/probed.xml.
+ You may wish to add Metadata/clients.xml to the commit list.
+* "debug" and "verbose" let you set the log level for git_commit.py
+ itself.
+"""
+
+
+import os
+import sys
+import git
+import logging
+import Bcfg2.Logger
+import Bcfg2.Options
+from Bcfg2.Compat import ConfigParser
+from fnmatch import fnmatch
+
+# config file path
+CONFIG = "/etc/bcfg2-commit.conf"
+
+# config defaults. all config options are in the [global] section
+DEFAULTS = dict(config='/etc/bcfg2.conf',
+ commit="SSLCA/*, SSHbase/*, Cfg/*")
+
+
+def list_changed_files(repo):
+ return [d for d in repo.index.diff(None)
+ if (d.a_blob is not None and not d.deleted_file and
+ not d.renamed and not d.new_file)]
+
+
+def add_to_commit(patterns, path, repo, relpath):
+ progname = os.path.basename(sys.argv[0])
+ logger = logging.getLogger(progname)
+ for pattern in patterns:
+ if fnmatch(path, os.path.join(relpath, pattern)):
+ logger.debug("%s: Adding %s to commit" % (progname, path))
+ repo.index.add([path])
+ return True
+ return False
+
+
+def parse_options():
+ config = ConfigParser.SafeConfigParser(DEFAULTS)
+ config.read(CONFIG)
+
+ optinfo = dict(
+ profile=Bcfg2.Options.CLIENT_PROFILE,
+ dryrun=Bcfg2.Options.CLIENT_DRYRUN,
+ groups=Bcfg2.Options.Option("Groups",
+ default=[],
+ cmd="-g",
+ odesc='<group>:<group>',
+ cook=Bcfg2.Options.colon_split))
+ optinfo.update(Bcfg2.Options.CLI_COMMON_OPTIONS)
+ optinfo.update(Bcfg2.Options.SERVER_COMMON_OPTIONS)
+ argv = [Bcfg2.Options.CFILE.cmd, config.get("global", "config")]
+ argv.extend(sys.argv[1:])
+ setup = Bcfg2.Options.OptionParser(optinfo, argv=argv)
+ setup.parse(argv)
+
+ setup['commit'] = Bcfg2.Options.list_split(config.get("global",
+ "commit"))
+ for opt in ['debug', 'verbose']:
+ try:
+ setup[opt] = config.getboolean("global", opt)
+ except ConfigParser.NoOptionError:
+ pass
+
+ try:
+ hostname = setup['args'][0]
+ except IndexError:
+ print(setup.hm)
+ raise SystemExit(1)
+ return (setup, hostname)
+
+
+def setup_logging(setup):
+ progname = os.path.basename(sys.argv[0])
+ log_args = dict(to_syslog=setup['syslog'], to_console=sys.stdout.isatty(),
+ to_file=setup['logging'], level=logging.WARNING)
+ if setup['debug']:
+ log_args['level'] = logging.DEBUG
+ elif setup['verbose']:
+ log_args['level'] = logging.INFO
+ Bcfg2.Logger.setup_logging(progname, **log_args)
+ return logging.getLogger(progname)
+
+
+def main():
+ progname = os.path.basename(sys.argv[0])
+ setup, hostname = parse_options()
+ logger = setup_logging(setup)
+ if setup['dryrun']:
+ logger.info("%s: In dry-run mode, changes will not be committed" %
+ progname)
+
+ if setup['vcs_root']:
+ gitroot = os.path.realpath(setup['vcs_root'])
+ else:
+ gitroot = os.path.realpath(setup['repo'])
+ logger.info("%s: Using Git repo at %s" % (progname, gitroot))
+ try:
+ repo = git.Repo(gitroot)
+ except: # pylint: disable=W0702
+ logger.error("%s: Error setting up Git repo at %s: %s" %
+ (progname, gitroot, sys.exc_info()[1]))
+ return 1
+
+ # canonicalize the repo path so that git will recognize it as
+ # being inside the git repo
+ bcfg2root = os.path.realpath(setup['repo'])
+
+ if not bcfg2root.startswith(gitroot):
+ logger.error("%s: Bcfg2 repo %s is not inside Git repo %s" %
+ (progname, bcfg2root, gitroot))
+ return 1
+
+ # relative path to Bcfg2 root from VCS root
+ if gitroot == bcfg2root:
+ relpath = ''
+ else:
+ relpath = bcfg2root[len(gitroot) + 1:]
+
+ new = 0
+ changed = 0
+ logger.debug("%s: Untracked files: %s" % (progname, repo.untracked_files))
+ for path in repo.untracked_files:
+ if add_to_commit(setup['commit'], path, repo, relpath):
+ new += 1
+ else:
+ logger.debug("%s: Not adding %s to commit" % (progname, path))
+ logger.debug("%s: Untracked files after building commit: %s" %
+ (progname, repo.untracked_files))
+
+ changes = list_changed_files(repo)
+ logger.info("%s: Changed files: %s" % (progname,
+ [d.a_blob.path for d in changes]))
+ for diff in changes:
+ if add_to_commit(setup['commit'], diff.a_blob.path, repo, relpath):
+ changed += 1
+ else:
+ logger.debug("%s: Not adding %s to commit" % (progname,
+ diff.a_blob.path))
+ logger.info("%s: Changed files after building commit: %s" %
+ (progname, [d.a_blob.path for d in list_changed_files(repo)]))
+
+ if new + changed > 0:
+ logger.debug("%s: Committing %s new files and %s changed files" %
+ (progname, new, changed))
+ if setup['dryrun']:
+ logger.warning("%s: In dry-run mode, skipping commit and push" %
+ progname)
+ else:
+ output = repo.index.commit("Auto-commit with %s from %s run" %
+ (progname, hostname))
+ if output:
+ logger.debug("%s: %s" % (progname, output))
+ remote = repo.remote()
+ logger.debug("%s: Pushing to remote %s at %s" % (progname, remote,
+ remote.url))
+ output = remote.push()
+ if output:
+ logger.debug("%s: %s" % (progname, output))
+ else:
+ logger.info("%s: No changes to commit" % progname)
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/tools/posixusers_baseline.py b/tools/posixusers_baseline.py
index c45e54f1a..4c78a757e 100755
--- a/tools/posixusers_baseline.py
+++ b/tools/posixusers_baseline.py
@@ -46,8 +46,8 @@ def main():
config = lxml.etree.parse(setup['file']).getroot()
else:
config = lxml.etree.Element("Configuration")
- users = POSIXUsers(logging.getLogger('posixusers_baseline.py'),
- setup, config)
+ logger = logging.getLogger('posixusers_baseline.py')
+ users = POSIXUsers(logger, setup, config)
baseline = lxml.etree.Element("Bundle", name="posixusers_baseline")
for entry in users.FindExtra():
@@ -59,7 +59,12 @@ def main():
continue
entry.set(attr, str(data[idx]))
if entry.tag == 'POSIXUser':
- entry.set("group", grp.getgrgid(data[3])[0])
+ try:
+ entry.set("group", grp.getgrgid(data[3])[0])
+ except KeyError:
+ logger.warning("User %s is a member of nonexistent group %s" %
+ (entry.get("name"), data[3]))
+ entry.set("group", str(data[3]))
for group in users.user_supplementary_groups(entry):
memberof = lxml.etree.SubElement(entry, "MemberOf",
group=group[0])
diff --git a/tools/upgrade/1.3/migrate_configs.py b/tools/upgrade/1.3/migrate_configs.py
index b7adb2528..76b2392e7 100755
--- a/tools/upgrade/1.3/migrate_configs.py
+++ b/tools/upgrade/1.3/migrate_configs.py
@@ -16,13 +16,13 @@ def copy_section(src_file, tgt_cfg, section, newsection=None):
tgt_cfg.add_section(newsection)
except ConfigParser.DuplicateSectionError:
print("[%s] section already exists in %s, adding options" %
- (newsection, setup['cfile']))
+ (newsection, setup['configfile']))
for opt in cfg.options(section):
val = cfg.get(section, opt)
if tgt_cfg.has_option(newsection, opt):
print("%s in [%s] already populated in %s, skipping" %
- (opt, newsection, setup['cfile']))
- print(" %s: %s" % (setup['cfile'],
+ (opt, newsection, setup['configfile']))
+ print(" %s: %s" % (setup['configfile'],
tgt_cfg.get(newsection, opt)))
print(" %s: %s" % (src_file, val))
else:
@@ -43,7 +43,7 @@ def main():
if os.path.exists(rules_conf):
remove.append(rules_conf)
copy_section(rules_conf, setup.cfp, "rules")
-
+
# move packages config out of packages.conf and into bcfg2.conf
pkgs_conf = os.path.join(setup['repo'], 'Packages', 'packages.conf')
if os.path.exists(pkgs_conf):
diff --git a/tools/upgrade/1.3/migrate_dbstats.py b/tools/upgrade/1.3/migrate_dbstats.py
index 07def2ac8..34430e3df 100755
--- a/tools/upgrade/1.3/migrate_dbstats.py
+++ b/tools/upgrade/1.3/migrate_dbstats.py
@@ -10,11 +10,12 @@ import time
import Bcfg2.Logger
import Bcfg2.Options
from django.core.cache import cache
-from django.db import connection, transaction, backend
+from django.db import connection, backend
from Bcfg2.Server.Admin.Reports import Reports
from Bcfg2.Reporting import models as new_models
from Bcfg2.Reporting.utils import BatchFetch
+from Bcfg2.Reporting.Compat import transaction
from Bcfg2.Server.Reports.reports import models as legacy_models
logger = logging.getLogger(__name__)
@@ -38,7 +39,7 @@ def _quote(value):
return _our_backend.quote_name(value)
-@transaction.commit_on_success
+@transaction.atomic
def _migrate_perms():
"""helper"""
@@ -57,7 +58,7 @@ def _migrate_perms():
return fperms
-@transaction.commit_on_success
+@transaction.atomic
def _migrate_transaction(inter, entries, fperms):
"""helper"""
@@ -187,7 +188,7 @@ def _shove(old_table, new_table, columns):
cursor.close()
-@transaction.commit_on_success
+@transaction.atomic
def migrate_stage1():
logger.info("Migrating clients")
try:
diff --git a/tools/upgrade/1.3/migrate_perms_to_mode.py b/tools/upgrade/1.3/migrate_perms_to_mode.py
index 18abffec2..ee440bc8e 100755
--- a/tools/upgrade/1.3/migrate_perms_to_mode.py
+++ b/tools/upgrade/1.3/migrate_perms_to_mode.py
@@ -3,7 +3,8 @@
import lxml.etree
import os
import sys
-
+from fnmatch import fnmatch
+from Bcfg2.Compat import any
import Bcfg2.Options
@@ -53,9 +54,15 @@ def convertstructure(structfile):
writefile(structfile, xdata)
+def skip_path(path, setup):
+ return any(fnmatch(path, p) or fnmatch(os.path.basename(path), p)
+ for p in setup['ignore'])
+
+
def main():
opts = dict(repo=Bcfg2.Options.SERVER_REPOSITORY,
configfile=Bcfg2.Options.CFILE,
+ ignore=Bcfg2.Options.SERVER_FAM_IGNORE,
plugins=Bcfg2.Options.SERVER_PLUGINS)
setup = Bcfg2.Options.OptionParser(opts)
setup.parse(sys.argv[1:])
@@ -64,11 +71,17 @@ def main():
for plugin in setup['plugins']:
if plugin in ['Base', 'Bundler', 'Rules']:
for root, dirs, files in os.walk(os.path.join(repo, plugin)):
+ if skip_path(root, setup):
+ continue
for fname in files:
+ if skip_path(fname, setup):
+ continue
convertstructure(os.path.join(root, fname))
if plugin not in ['Cfg', 'TGenshi', 'TCheetah', 'SSHbase', 'SSLCA']:
continue
for root, dirs, files in os.walk(os.path.join(repo, plugin)):
+ if skip_path(root, setup):
+ continue
for fname in files:
if fname == 'info.xml':
convertinfo(os.path.join(root, fname))