summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.travis.yml31
-rw-r--r--README3
-rwxr-xr-x[-rw-r--r--]debian/bcfg2.cron.daily14
-rwxr-xr-x[-rw-r--r--]debian/bcfg2.cron.hourly14
-rw-r--r--debian/changelog6
-rw-r--r--debian/control2
-rw-r--r--doc/appendix/files/ntp.txt93
-rw-r--r--doc/appendix/guides/authentication.txt2
-rw-r--r--doc/appendix/guides/fedora.txt14
-rw-r--r--doc/appendix/guides/nat_howto.txt6
-rw-r--r--doc/appendix/guides/ubuntu.txt14
-rw-r--r--doc/appendix/guides/web-reports-install.txt16
-rw-r--r--doc/conf.py2
-rw-r--r--doc/development/client-driver.txt2
-rw-r--r--doc/reports/dynamic.txt18
-rw-r--r--doc/server/backends.txt44
-rw-r--r--doc/server/configurationentries.txt167
-rw-r--r--doc/server/database.txt53
-rw-r--r--doc/server/index.txt3
-rw-r--r--doc/server/info.txt126
-rw-r--r--doc/server/plugins/connectors/properties.txt194
-rw-r--r--doc/server/plugins/connectors/puppetenc.txt123
-rw-r--r--doc/server/plugins/connectors/templatehelper.txt18
-rw-r--r--doc/server/plugins/generators/cfg.txt129
-rw-r--r--doc/server/plugins/generators/nagiosgen.txt10
-rw-r--r--doc/server/plugins/generators/packages.txt224
-rw-r--r--doc/server/plugins/generators/rules.txt361
-rw-r--r--doc/server/plugins/generators/semodules.txt66
-rw-r--r--doc/server/plugins/generators/sshbase.txt4
-rw-r--r--doc/server/plugins/generators/sslca.txt8
-rw-r--r--doc/server/plugins/generators/tgenshi/clientsxml.txt2
-rw-r--r--doc/server/plugins/generators/tgenshi/index.txt2
-rw-r--r--doc/server/plugins/grouping/metadata.txt239
-rw-r--r--doc/server/plugins/misc/trigger.txt4
-rw-r--r--doc/server/plugins/plugin-roles.txt134
-rw-r--r--doc/server/plugins/probes/fileprobes.txt57
-rw-r--r--doc/server/plugins/probes/index.txt95
-rw-r--r--doc/server/selinux.txt97
-rw-r--r--examples/bcfg2.confHostbase3
-rw-r--r--gentoo/bcfg2-1.3.0.ebuild (renamed from gentoo/bcfg2-1.2.2.ebuild)14
-rw-r--r--man/bcfg2-admin.8474
-rw-r--r--man/bcfg2-build-reports.874
-rw-r--r--man/bcfg2-crypt.883
-rw-r--r--man/bcfg2-info.8263
-rw-r--r--man/bcfg2-lint.8239
-rw-r--r--man/bcfg2-lint.conf.5259
-rw-r--r--man/bcfg2-ping-sweep.820
-rw-r--r--man/bcfg2-reports.8168
-rw-r--r--man/bcfg2-server.8104
-rw-r--r--man/bcfg2.1353
-rw-r--r--man/bcfg2.conf.5819
-rw-r--r--misc/bcfg2.spec44
-rw-r--r--osx/Makefile4
-rw-r--r--redhat/VERSION2
-rw-r--r--reports/reports.wsgi2
-rw-r--r--reports/site_media/bcfg2_base.css16
-rw-r--r--schemas/base.xsd3
-rw-r--r--schemas/bundle.xsd129
-rw-r--r--schemas/clients.xsd4
-rw-r--r--schemas/defaults.xsd27
-rw-r--r--schemas/info.xsd7
-rw-r--r--schemas/metadata.xsd51
-rw-r--r--schemas/packages.xsd7
-rw-r--r--schemas/pathentry.xsd21
-rw-r--r--schemas/pkgtype.xsd2
-rw-r--r--schemas/rules.xsd113
-rw-r--r--schemas/services.xsd33
-rw-r--r--schemas/servicetype.xsd14
-rw-r--r--schemas/types.xsd138
-rwxr-xr-xsetup.py10
-rw-r--r--solaris/Makefile7
-rw-r--r--solaris/gen-prototypes.sh2
-rw-r--r--solaris/pkginfo.bcfg22
-rw-r--r--solaris/pkginfo.bcfg2-server2
-rw-r--r--solaris/prototype.bcfg2-server3
-rw-r--r--src/lib/Bcfg2/Bcfg2Py3k.py58
-rw-r--r--src/lib/Bcfg2/Client/Frame.py210
-rw-r--r--src/lib/Bcfg2/Client/Tools/APK.py4
-rw-r--r--src/lib/Bcfg2/Client/Tools/APT.py46
-rw-r--r--src/lib/Bcfg2/Client/Tools/Action.py9
-rw-r--r--src/lib/Bcfg2/Client/Tools/Chkconfig.py46
-rw-r--r--src/lib/Bcfg2/Client/Tools/DebInit.py5
-rw-r--r--src/lib/Bcfg2/Client/Tools/OpenCSW.py33
-rw-r--r--src/lib/Bcfg2/Client/Tools/POSIX.py943
-rw-r--r--src/lib/Bcfg2/Client/Tools/POSIX/Device.py66
-rw-r--r--src/lib/Bcfg2/Client/Tools/POSIX/Directory.py90
-rw-r--r--src/lib/Bcfg2/Client/Tools/POSIX/File.py225
-rw-r--r--src/lib/Bcfg2/Client/Tools/POSIX/Hardlink.py43
-rw-r--r--src/lib/Bcfg2/Client/Tools/POSIX/Nonexistent.py45
-rw-r--r--src/lib/Bcfg2/Client/Tools/POSIX/Permissions.py11
-rw-r--r--src/lib/Bcfg2/Client/Tools/POSIX/Symlink.py46
-rw-r--r--src/lib/Bcfg2/Client/Tools/POSIX/__init__.py151
-rw-r--r--src/lib/Bcfg2/Client/Tools/POSIX/base.py642
-rw-r--r--src/lib/Bcfg2/Client/Tools/Portage.py27
-rw-r--r--src/lib/Bcfg2/Client/Tools/RPMng.py124
-rw-r--r--src/lib/Bcfg2/Client/Tools/RcUpdate.py40
-rw-r--r--src/lib/Bcfg2/Client/Tools/SELinux.py716
-rw-r--r--src/lib/Bcfg2/Client/Tools/SMF.py5
-rw-r--r--src/lib/Bcfg2/Client/Tools/Systemd.py17
-rw-r--r--src/lib/Bcfg2/Client/Tools/Upstart.py5
-rw-r--r--src/lib/Bcfg2/Client/Tools/YUM24.py21
-rw-r--r--src/lib/Bcfg2/Client/Tools/YUMng.py121
-rw-r--r--src/lib/Bcfg2/Client/Tools/__init__.py117
-rw-r--r--src/lib/Bcfg2/Client/Tools/launchd.py5
-rwxr-xr-xsrc/lib/Bcfg2/Client/Tools/rpmtools.py23
-rw-r--r--src/lib/Bcfg2/Component.py52
-rwxr-xr-xsrc/lib/Bcfg2/Encryption.py75
-rw-r--r--src/lib/Bcfg2/Logger.py42
-rw-r--r--src/lib/Bcfg2/Options.py1073
-rw-r--r--src/lib/Bcfg2/Proxy.py121
-rw-r--r--src/lib/Bcfg2/SSLServer.py13
-rw-r--r--src/lib/Bcfg2/Server/Admin/Bundle.py20
-rw-r--r--src/lib/Bcfg2/Server/Admin/Client.py42
-rw-r--r--src/lib/Bcfg2/Server/Admin/Compare.py3
-rw-r--r--src/lib/Bcfg2/Server/Admin/Group.py63
-rw-r--r--src/lib/Bcfg2/Server/Admin/Init.py79
-rw-r--r--src/lib/Bcfg2/Server/Admin/Pull.py11
-rw-r--r--src/lib/Bcfg2/Server/Admin/Query.py11
-rw-r--r--src/lib/Bcfg2/Server/Admin/Reports.py81
-rw-r--r--src/lib/Bcfg2/Server/Admin/Syncdb.py36
-rw-r--r--src/lib/Bcfg2/Server/Admin/Tidy.py7
-rw-r--r--src/lib/Bcfg2/Server/Admin/Viz.py2
-rw-r--r--src/lib/Bcfg2/Server/Admin/__init__.py11
-rw-r--r--src/lib/Bcfg2/Server/BuiltinCore.py103
-rw-r--r--src/lib/Bcfg2/Server/CherryPyCore.py131
-rw-r--r--src/lib/Bcfg2/Server/Core.py458
-rw-r--r--src/lib/Bcfg2/Server/FileMonitor.py315
-rw-r--r--src/lib/Bcfg2/Server/FileMonitor/Fam.py82
-rw-r--r--src/lib/Bcfg2/Server/FileMonitor/Gamin.py64
-rw-r--r--src/lib/Bcfg2/Server/FileMonitor/Inotify.py126
-rw-r--r--src/lib/Bcfg2/Server/FileMonitor/Pseudo.py25
-rw-r--r--src/lib/Bcfg2/Server/FileMonitor/__init__.py143
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/backends.py5
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/ldapauth.py1
-rw-r--r--src/lib/Bcfg2/Server/Lint/Bundles.py54
-rw-r--r--src/lib/Bcfg2/Server/Lint/Comments.py5
-rw-r--r--src/lib/Bcfg2/Server/Lint/Deltas.py25
-rw-r--r--src/lib/Bcfg2/Server/Lint/Duplicates.py5
-rwxr-xr-xsrc/lib/Bcfg2/Server/Lint/Genshi.py1
-rw-r--r--src/lib/Bcfg2/Server/Lint/GroupNames.py78
-rw-r--r--src/lib/Bcfg2/Server/Lint/GroupPatterns.py35
-rw-r--r--src/lib/Bcfg2/Server/Lint/InfoXML.py41
-rw-r--r--src/lib/Bcfg2/Server/Lint/Pkgmgr.py38
-rw-r--r--src/lib/Bcfg2/Server/Lint/RequiredAttrs.py163
-rw-r--r--src/lib/Bcfg2/Server/Lint/TemplateHelper.py64
-rw-r--r--src/lib/Bcfg2/Server/Lint/Validate.py62
-rw-r--r--src/lib/Bcfg2/Server/Lint/__init__.py10
-rw-r--r--src/lib/Bcfg2/Server/Plugin.py688
-rw-r--r--src/lib/Bcfg2/Server/Plugins/BB.py83
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Base.py5
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Bundler.py138
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgCheetahGenerator.py9
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedCheetahGenerator.py14
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenerator.py63
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenshiGenerator.py26
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgGenshiGenerator.py72
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgInfoXML.py8
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgLegacyInfo.py4
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py100
-rw-r--r--src/lib/Bcfg2/Server/Plugins/DBStats.py52
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Decisions.py5
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Deps.py21
-rw-r--r--src/lib/Bcfg2/Server/Plugins/FileProbes.py62
-rw-r--r--src/lib/Bcfg2/Server/Plugins/GroupPatterns.py53
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Hostbase.py23
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Ldap.py2
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Metadata.py957
-rw-r--r--src/lib/Bcfg2/Server/Plugins/NagiosGen.py25
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Ohai.py2
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Apt.py17
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Collection.py75
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Pac.py7
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py32
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Source.py45
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Yum.py211
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/__init__.py99
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Pkgmgr.py56
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Probes.py247
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Properties.py111
-rw-r--r--src/lib/Bcfg2/Server/Plugins/PuppetENC.py117
-rw-r--r--src/lib/Bcfg2/Server/Plugins/SEModules.py45
-rw-r--r--src/lib/Bcfg2/Server/Plugins/SGenshi.py97
-rw-r--r--src/lib/Bcfg2/Server/Plugins/SSHbase.py12
-rw-r--r--src/lib/Bcfg2/Server/Plugins/SSLCA.py110
-rw-r--r--src/lib/Bcfg2/Server/Plugins/ServiceCompat.py32
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Snapshots.py27
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Statistics.py26
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Svcmgr.py10
-rw-r--r--src/lib/Bcfg2/Server/Plugins/TCheetah.py9
-rw-r--r--src/lib/Bcfg2/Server/Plugins/TGenshi.py18
-rw-r--r--src/lib/Bcfg2/Server/Plugins/TemplateHelper.py120
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Trigger.py65
-rw-r--r--src/lib/Bcfg2/Server/Plugins/__init__.py2
-rwxr-xr-xsrc/lib/Bcfg2/Server/Reports/importscript.py307
-rw-r--r--src/lib/Bcfg2/Server/Reports/reports/fixtures/initial_version.xml43
-rw-r--r--src/lib/Bcfg2/Server/Reports/reports/models.py139
-rw-r--r--src/lib/Bcfg2/Server/Reports/reports/sql/client.sql7
-rw-r--r--src/lib/Bcfg2/Server/Reports/reports/templates/base-timeview.html5
-rw-r--r--src/lib/Bcfg2/Server/Reports/reports/templates/base.html3
-rw-r--r--src/lib/Bcfg2/Server/Reports/reports/templates/clients/detail.html56
-rw-r--r--src/lib/Bcfg2/Server/Reports/reports/templates/clients/detailed-list.html18
-rw-r--r--src/lib/Bcfg2/Server/Reports/reports/templates/clients/index.html1
-rw-r--r--src/lib/Bcfg2/Server/Reports/reports/templates/clients/manage.html2
-rw-r--r--src/lib/Bcfg2/Server/Reports/reports/templates/config_items/common.html42
-rw-r--r--src/lib/Bcfg2/Server/Reports/reports/templates/config_items/entry_status.html30
-rw-r--r--src/lib/Bcfg2/Server/Reports/reports/templates/config_items/listing.html22
-rw-r--r--src/lib/Bcfg2/Server/Reports/reports/templates/widgets/filter_bar.html16
-rw-r--r--src/lib/Bcfg2/Server/Reports/reports/templatetags/bcfg2_tags.py130
-rw-r--r--src/lib/Bcfg2/Server/Reports/reports/templatetags/syntax_coloring.py10
-rw-r--r--src/lib/Bcfg2/Server/Reports/reports/urls.py9
-rw-r--r--src/lib/Bcfg2/Server/Reports/reports/views.py266
-rw-r--r--src/lib/Bcfg2/Server/Reports/settings.py161
-rw-r--r--src/lib/Bcfg2/Server/Reports/updatefix.py281
-rwxr-xr-xsrc/lib/Bcfg2/Server/Reports/utils.py4
-rw-r--r--src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_0_x.py11
-rw-r--r--src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_1_x.py59
-rw-r--r--src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_2_x.py15
-rw-r--r--src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_3_0.py27
-rw-r--r--src/lib/Bcfg2/Server/SchemaUpdater/Changes/__init__.py0
-rw-r--r--src/lib/Bcfg2/Server/SchemaUpdater/Routines.py279
-rw-r--r--src/lib/Bcfg2/Server/SchemaUpdater/__init__.py257
-rw-r--r--src/lib/Bcfg2/Server/Snapshots/model.py8
-rw-r--r--src/lib/Bcfg2/Server/__init__.py11
-rw-r--r--src/lib/Bcfg2/Server/models.py77
-rwxr-xr-xsrc/lib/Bcfg2/manage.py14
-rw-r--r--src/lib/Bcfg2/settings.py161
-rw-r--r--src/lib/Bcfg2/version.py115
-rwxr-xr-xsrc/sbin/bcfg2164
-rwxr-xr-xsrc/sbin/bcfg2-admin35
-rwxr-xr-xsrc/sbin/bcfg2-build-reports6
-rwxr-xr-xsrc/sbin/bcfg2-crypt362
-rwxr-xr-xsrc/sbin/bcfg2-info225
-rwxr-xr-xsrc/sbin/bcfg2-lint95
-rwxr-xr-xsrc/sbin/bcfg2-ping-sweep70
-rwxr-xr-xsrc/sbin/bcfg2-reports576
-rwxr-xr-xsrc/sbin/bcfg2-server82
-rwxr-xr-xsrc/sbin/bcfg2-test30
-rwxr-xr-xsrc/sbin/bcfg2-yum-helper213
-rw-r--r--testsuite/Testlib/TestClient/TestTools/TestPOSIX/TestDevice.py144
-rw-r--r--testsuite/Testlib/TestClient/TestTools/TestPOSIX/TestDirectory.py159
-rw-r--r--testsuite/Testlib/TestClient/TestTools/TestPOSIX/TestFile.py447
-rw-r--r--testsuite/Testlib/TestClient/TestTools/TestPOSIX/TestHardlink.py85
-rw-r--r--testsuite/Testlib/TestClient/TestTools/TestPOSIX/TestNonexistent.py91
-rw-r--r--testsuite/Testlib/TestClient/TestTools/TestPOSIX/TestPermissions.py5
-rw-r--r--testsuite/Testlib/TestClient/TestTools/TestPOSIX/TestSymlink.py81
-rw-r--r--testsuite/Testlib/TestClient/TestTools/TestPOSIX/Test__init.py252
-rw-r--r--testsuite/Testlib/TestClient/TestTools/TestPOSIX/Testbase.py991
-rw-r--r--testsuite/Testlib/TestClient/TestTools/TestPOSIX/__init__.py0
-rw-r--r--testsuite/Testlib/TestClient/TestTools/__init__.py0
-rw-r--r--testsuite/Testlib/TestClient/__init__.py0
-rw-r--r--testsuite/Testlib/TestOptions.py244
-rw-r--r--testsuite/Testlib/TestServer/TestPlugin.py2296
-rw-r--r--testsuite/Testlib/TestServer/TestPlugins/TestMetadata.py1087
-rw-r--r--testsuite/Testlib/TestServer/TestPlugins/TestProbes.py549
-rw-r--r--testsuite/Testlib/TestServer/TestPlugins/TestSEModules.py109
-rw-r--r--testsuite/Testlib/TestServer/TestPlugins/TestTemplateHelper.py120
-rw-r--r--testsuite/Testlib/TestServer/TestPlugins/__init__.py0
-rw-r--r--testsuite/Testlib/TestServer/__init__.py0
-rw-r--r--testsuite/Testlib/__init__.py0
-rw-r--r--testsuite/__init__.py0
-rwxr-xr-xtestsuite/before_install.sh9
-rw-r--r--testsuite/common.py287
-rwxr-xr-xtestsuite/install.sh21
-rw-r--r--testsuite/requirements.txt4
-rwxr-xr-xtools/accounts2xml.py4
-rwxr-xr-xtools/bcfg2-profile-templates.py4
-rwxr-xr-xtools/bcfg2_svnlog.py2
-rwxr-xr-xtools/export.py28
-rw-r--r--tools/manpagegen/bcfg2-admin.8.ronn220
-rw-r--r--tools/manpagegen/bcfg2-build-reports.8.ronn34
-rw-r--r--tools/manpagegen/bcfg2-crypt.8.ronn108
-rw-r--r--tools/manpagegen/bcfg2-info.8.ronn110
-rw-r--r--tools/manpagegen/bcfg2-lint.8.ronn119
-rw-r--r--tools/manpagegen/bcfg2-lint.conf.5.ronn114
-rw-r--r--tools/manpagegen/bcfg2-reports.8.ronn82
-rw-r--r--tools/manpagegen/bcfg2-server.8.ronn43
-rw-r--r--tools/manpagegen/bcfg2.1.ronn158
-rw-r--r--tools/manpagegen/bcfg2.conf.5.ronn539
-rw-r--r--tools/manpagegen/generate-manpages.bash17
-rwxr-xr-xtools/selinux_baseline.py51
-rwxr-xr-xtools/upgrade/1.3/migrate_configs.py42
-rwxr-xr-xtools/upgrade/1.3/migrate_info.py45
-rwxr-xr-xtools/upgrade/1.3/service_modes.py50
-rwxr-xr-x[-rw-r--r--]tools/yum-listpkgs-xml.py1
284 files changed, 22115 insertions, 8666 deletions
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 000000000..8786dcc77
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,31 @@
+language: python
+python:
+ - "2.5"
+ - "2.6"
+ - "2.7"
+ - "3.2"
+env:
+ - WITH_OPTIONAL_DEPS=yes
+ - WITH_OPTIONAL_DEPS=no
+matrix:
+ exclude:
+ - python: "3.2"
+ env: WITH_OPTIONAL_DEPS=yes
+before_install:
+ - testsuite/before_install.sh
+install:
+ - testsuite/install.sh
+ - pip install -e .
+script:
+ - nosetests testsuite
+branches:
+ except:
+ - maint
+ - 1.1.0-stable
+ - py3k
+notifications:
+ email: chris.a.st.pierre@gmail.com
+ irc:
+ channels:
+ - "irc.freenode.org#bcfg2"
+ use_notice: true
diff --git a/README b/README
index 7bc3388ce..1d55387ff 100644
--- a/README
+++ b/README
@@ -46,4 +46,5 @@ Want to help
* Wiki: http://bcfg2.org/wiki/Contribute
-Bcfg2 is licensed under BSD, for more details check COPYING.
+Bcfg2 is licensed under a Simplified (2-clause) BSD license, for more
+details check COPYRIGHT.
diff --git a/debian/bcfg2.cron.daily b/debian/bcfg2.cron.daily
index 92e8ff02c..b28b2062b 100644..100755
--- a/debian/bcfg2.cron.daily
+++ b/debian/bcfg2.cron.daily
@@ -1,3 +1,13 @@
#!/bin/sh
-[ -x /usr/lib/bcfg2/bcfg2-cron ] || exit 0
-/usr/lib/bcfg2/bcfg2-cron --daily > /dev/null 2>&1 || true
+BCFG2CRON=
+if [[ -x /usr/libexec/bcfg2-cron ]]; then
+ BCFG2CRON=/usr/libexec/bcfg2-cron
+elif [[ -x /usr/lib/bcfg2/bcfg2-cron ]]; then
+ BCFG2CRON=/usr/lib/bcfg2/bcfg2-cron
+elif type bcfg2-cron >& /dev/null; then
+ BCFG2CRON=bcfg2-cron
+else
+ echo "No bcfg2-cron command found"
+ exit 1
+fi
+$BCFG2CRON --daily 2>&1 | logger -t bcfg2-cron -p daemon.info
diff --git a/debian/bcfg2.cron.hourly b/debian/bcfg2.cron.hourly
index 1fdb9c30e..300792885 100644..100755
--- a/debian/bcfg2.cron.hourly
+++ b/debian/bcfg2.cron.hourly
@@ -1,3 +1,13 @@
#!/bin/sh
-[ -x /usr/lib/bcfg2/bcfg2-cron ] || exit 0
-/usr/lib/bcfg2/bcfg2-cron --hourly > /dev/null 2>&1 || true
+BCFG2CRON=
+if [[ -x /usr/libexec/bcfg2-cron ]]; then
+ BCFG2CRON=/usr/libexec/bcfg2-cron
+elif [[ -x /usr/lib/bcfg2/bcfg2-cron ]]; then
+ BCFG2CRON=/usr/lib/bcfg2/bcfg2-cron
+elif type bcfg2-cron >& /dev/null; then
+ BCFG2CRON=bcfg2-cron
+else
+ echo "No bcfg2-cron command found"
+ exit 1
+fi
+$BCFG2CRON --hourly 2>&1 | logger -t bcfg2-cron -p daemon.info
diff --git a/debian/changelog b/debian/changelog
index 2557dd66a..c77acc0e7 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+bcfg2 (1.2.3-0.0) unstable; urgency=low
+
+ * New upstream release
+
+ -- Sol Jerome <sol.jerome@gmail.com> Tue, 03 Jul 2012 09:33:50 -0500
+
bcfg2 (1.2.2-0.0) unstable; urgency=low
* New upstream release
diff --git a/debian/control b/debian/control
index 7835334da..ae8b6a2d5 100644
--- a/debian/control
+++ b/debian/control
@@ -21,7 +21,7 @@ Description: Configuration management client
Package: bcfg2-server
Architecture: all
-Depends: ${python:Depends}, ${misc:Depends}, python-lxml (>= 0.9), libxml2-utils (>= 2.6.23), lsb-base (>= 3.1-9), ucf, bcfg2 (= ${binary:Version}), openssl, python-ssl | python2.6 | python3.0 | python3.1 | python3.2, python-gamin
+Depends: ${python:Depends}, ${misc:Depends}, python-lxml (>= 0.9), libxml2-utils (>= 2.6.23), lsb-base (>= 3.1-9), ucf, bcfg2 (= ${binary:Version}), openssl, python-ssl | python2.6 | python3.0 | python3.1 | python3.2, python-pyinotify | python-gamin
XB-Python-Version: >= 2.4
Recommends: graphviz, patch
Suggests: python-cheetah, python-genshi (>= 0.4.4), python-profiler, python-sqlalchemy (>= 0.5.0), python-django, mail-transport-agent, bcfg2-doc (= ${binary:Version})
diff --git a/doc/appendix/files/ntp.txt b/doc/appendix/files/ntp.txt
index ec1fa3094..e14816f6e 100644
--- a/doc/appendix/files/ntp.txt
+++ b/doc/appendix/files/ntp.txt
@@ -19,95 +19,92 @@ Package only
------------
Our example starts with the bare minimum configuration setup. We have
-a client, a profile group, a list of packages, and a base configuration.
+a client, a profile group, a list of packages, and an NTP bundle.
``Metadata/clients.xml``:
.. code-block:: xml
- <Clients version='3.0'>
- <Client profile='fedora' pingable='N' pingtime='0' name='foo.bar.com'/>
+ <Clients>
+ <Client profile='server' name='foo.bar.com'/>
</Clients>
``Metadata/groups.xml``:
.. code-block:: xml
- <Groups version='3.0'>
- <Group profile='true' name='fedora' toolset='rh'/>
+ <Groups>
+ <Group profile='true' name='server'>
+ <Bundle name="ntp"/>
+ </Group>
</Groups>
-``Base/base.xml``:
+``Bundler/ntp.xml``:
.. code-block:: xml
- <Base>
- <Group name='fedora'>
- <Package name='ntp'/>
- </Group>
- </Base>
+ <Bundle name="ntp">
+ <Package name='ntp'/>
+ </Bundle>
``Pkgmgr/packages.xml``:
.. code-block:: xml
<PackageList type='rpm' priority='0'>
- <Package name='ntp' version='4.2.0.a.20050816-11.FC5'/>
+ <Package name='ntp' version='4.2.0.a.20050816-11.FC5'/>
</PackageList>
+(This can also be performed more elegantly with the
+:ref:`server-plugins-generators-packages` plugin.)
+
Add service
-----------
-Configure the service, and add it to the base.
+Configure the service, and add it to Rules.
-``Svcmgr/services.xml``:
+``Rules/services.xml``:
.. code-block:: xml
<Services priority='0'>
- <Service name='ntpd' status='on'/>
+ <Service name='ntpd' status='on'/>
</Services>
-``Base/base.xml``:
+``Bundler/ntp.xml``:
.. code-block:: xml
- <Base>
- <Group name='fedora'>
- <Package name='ntp'/>
- <Service name='ntpd'/>
- </Group>
- </Base>
+ <Bundle name="ntp">
+ <Package name='ntp'/>
+ <Service name='ntpd'/>
+ </Bundle>
Add config file
---------------
-Setup an ``etc/`` directory structure, and add it to the base.::
+Setup an ``etc/`` directory structure, and add it to the base::
# cat Cfg/etc/ntp.conf/ntp.conf
server ntp1.utexas.edu
``Base/base.xml``:
+``Bundler/ntp.xml``:
+
.. code-block:: xml
- <Base>
- <Group name='fedora'>
- <Package name='ntp'/>
- <Service name='ntpd'/>
- <Path name='/etc/ntp.conf'/>
- </Group>
- </Base>
+ <Bundle name="ntp">
+ <Package name='ntp'/>
+ <Service name='ntpd'/>
+ <Path name='/etc/ntp.conf'/>
+ </Bundle>
Create a bundle
---------------
-The above configuration layout works fine for a single service, but
-that method of organization would quickly become a nightmare as you
-approach the number of packages, services, and config files required
-to represent a fully configured host. Bundles allow the grouping of
-related configuration entries that are used to provide a single
-service. This is done for several reasons:
+Bundles allow the grouping of related configuration entries that are
+used to provide a single service. This is done for several reasons:
* Grouping related things in one place makes it easier to add those
entries for multiple groups of clients
@@ -128,10 +125,10 @@ logically grouped together. We use a bundle to accomplish this.
.. code-block:: xml
- <Bundle name='ntp' version='2.0'>
- <Package name='ntp'/>
- <Service name='ntpd'/>
- <Path name='/etc/ntp.conf'/>
+ <Bundle name='ntp'>
+ <Package name='ntp'/>
+ <Service name='ntpd'/>
+ <Path name='/etc/ntp.conf'/>
</Bundle>
After this bundle is created, it must be associated with a group
@@ -143,15 +140,15 @@ install this bundle.
.. code-block:: xml
<Groups>
- ...
- <Group name='fedora'>
- <Bundle name='ntp'/>
- </Group>
- ...
+ ...
+ <Group profile='true' name='server'>
+ <Bundle name="ntp"/>
+ </Group>
+ ...
</Groups>
-Once this bundle is created, a client reconfigure will install
-these entries. If any are modified, then the *ntpd* service will
-be restarted. If you only want ntp configurations to be updated (and
+Once this bundle is created, a client reconfigure will install these
+entries. If any are modified, then the *ntpd* service will be
+restarted. If you only want ntp configurations to be updated (and
nothing else), the bcfg2 client can be run with a ``-b <bundle name>``
option that will only update entries in the specified bundle.
diff --git a/doc/appendix/guides/authentication.txt b/doc/appendix/guides/authentication.txt
index dab122f80..68a232f6f 100644
--- a/doc/appendix/guides/authentication.txt
+++ b/doc/appendix/guides/authentication.txt
@@ -62,7 +62,7 @@ How Authentication Works
#. Next, the ip address is verified against the client record. If the
address doesn't match, then the client must be set to
- location=floating
+ floating='true'
#. Finally, the password is verified. If the client is set to secure
mode, the only its per-client password is accepted. If it is not set
diff --git a/doc/appendix/guides/fedora.txt b/doc/appendix/guides/fedora.txt
index f8dea2192..1e49084ef 100644
--- a/doc/appendix/guides/fedora.txt
+++ b/doc/appendix/guides/fedora.txt
@@ -143,18 +143,20 @@ The ``bcfg2.conf`` file contains only standard plugins so far.
[statistics]
sendmailpath = /usr/lib/sendmail
- database_engine = sqlite3
+
+ [database]
+ engine = sqlite3
# 'postgresql', 'mysql', 'mysql_old', 'sqlite3' or 'ado_mssql'.
- database_name =
+ name =
# Or path to database file if using sqlite3.
#<repository>/etc/brpt.sqlite is default path if left empty
- database_user =
+ user =
# Not used with sqlite3.
- database_password =
+ password =
# Not used with sqlite3.
- database_host =
+ host =
# Not used with sqlite3.
- database_port =
+ port =
[communication]
protocol = xmlrpc/ssl
diff --git a/doc/appendix/guides/nat_howto.txt b/doc/appendix/guides/nat_howto.txt
index 818d3e644..b3492e871 100644
--- a/doc/appendix/guides/nat_howto.txt
+++ b/doc/appendix/guides/nat_howto.txt
@@ -43,14 +43,14 @@ the Client entry in clients.xml will look something like this:
.. code-block:: xml
- <Client profile="desktop" name="test1" pingable="N"
- uuid='9001ec29-1531-4b16-8198-a71bea093d0a' location='floating'/>
+ <Client profile="desktop" name="test1"
+ uuid='9001ec29-1531-4b16-8198-a71bea093d0a' floating='true'/>
Alternatively, the Client entry can be setup like this:
.. code-block:: xml
- <Client profile="desktop" name="test1" pingable="N"
+ <Client profile="desktop" name="test1"
uuid='9001ec29-1531-4b16-8198-a71bea093d0a' address='ip-address-of-NAT'/>
The difference between these definitions is explained in detail in the
diff --git a/doc/appendix/guides/ubuntu.txt b/doc/appendix/guides/ubuntu.txt
index f72247220..5a67d0a37 100644
--- a/doc/appendix/guides/ubuntu.txt
+++ b/doc/appendix/guides/ubuntu.txt
@@ -121,18 +121,20 @@ Replace Pkgmgr with Packages in the plugins line of ``bcfg2.conf``::
[statistics]
sendmailpath = /usr/lib/sendmail
- database_engine = sqlite3
+
+ [database]
+ engine = sqlite3
# 'postgresql', 'mysql', 'mysql_old', 'sqlite3' or 'ado_mssql'.
- database_name =
+ name =
# Or path to database file if using sqlite3.
#<repository>/etc/brpt.sqlite is default path if left empty
- database_user =
+ user =
# Not used with sqlite3.
- database_password =
+ password =
# Not used with sqlite3.
- database_host =
+ host =
# Not used with sqlite3.
- database_port =
+ port =
[communication]
protocol = xmlrpc/ssl
diff --git a/doc/appendix/guides/web-reports-install.txt b/doc/appendix/guides/web-reports-install.txt
index c03682974..489a7673d 100644
--- a/doc/appendix/guides/web-reports-install.txt
+++ b/doc/appendix/guides/web-reports-install.txt
@@ -52,18 +52,18 @@ then have something like this::
[statistics]
sendmailpath = /usr/lib/sendmail
- database_engine = sqlite3
+
+ [database]
+ engine = sqlite3
# 'postgresql', 'mysql', 'mysql_old', 'sqlite3' or 'ado_mssql'.
- database_name =
- # Or path to database file if using sqlite3.
- #<repository>/etc/brpt.sqlite is default path if left empty
- database_user =
+ name =
+ user =
# Not used with sqlite3.
- database_password =
+ password =
# Not used with sqlite3.
- database_host =
+ host =
# Not used with sqlite3.
- database_port =
+ port =
Restart apache and point a browser to your Bcfg2 server.
diff --git a/doc/conf.py b/doc/conf.py
index 5903b009a..b3d6ade7a 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -55,7 +55,7 @@ else:
# The short X.Y version.
version = '1.2'
# The full version, including alpha/beta/rc tags.
-release = '1.2.2'
+release = '1.2.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
diff --git a/doc/development/client-driver.txt b/doc/development/client-driver.txt
index 32bb0aff4..c42d2b964 100644
--- a/doc/development/client-driver.txt
+++ b/doc/development/client-driver.txt
@@ -20,7 +20,7 @@ an existing driver, and the process that was used to create it.
* Otherwise, subclass ``Bcfg2.Client.Tools.Tool`` (from here referenced
as branch [T])
-#. Set ``__name__`` to "RPM"
+#. Set ``name`` to "RPM"
#. Add any required executable programs to ``__execs__``
#. Set ``__handles__`` to a list of (**entry.tag**, **entry.get('type')**)
tuples. This determines which entries the Tool module can be used
diff --git a/doc/reports/dynamic.txt b/doc/reports/dynamic.txt
index 8267bffe3..99cce43a2 100644
--- a/doc/reports/dynamic.txt
+++ b/doc/reports/dynamic.txt
@@ -120,19 +120,19 @@ Notes on Alternative Databases
------------------------------
If you choose to use a different database, you'll need to edit
-``/etc/bcfg2.conf``. These fields should be updated in the [statistics]
+``/etc/bcfg2.conf``. These fields should be updated in the [database]
section:
-* database_engine
+* engine
- * ex: database_engine = mysql
- * ex: database_engine = postgresql_psycopg2
+ * ex: engine = mysql
+ * ex: engine = postgresql_psycopg2
-* database_name
-* database_user
-* database_password
-* database_host
-* database_port (optional)
+* name
+* user
+* password
+* host
+* port (optional)
Summary and Features
====================
diff --git a/doc/server/backends.txt b/doc/server/backends.txt
new file mode 100644
index 000000000..71ecac10b
--- /dev/null
+++ b/doc/server/backends.txt
@@ -0,0 +1,44 @@
+.. -*- mode: rst -*-
+
+.. _server-backends:
+
+===============
+Server Backends
+===============
+
+.. versionadded:: 1.3.0
+
+Bcfg2 supports two different server backends: a builtin server
+based on the Python SimpleXMLRPCServer object, and a server that uses
+CherryPy (http://www.cherrypy.org). Each one has advantages and
+disadvantages.
+
+The builtin server:
+
+* Is very stable and mature;
+* Supports certificate authentication;
+* Works on Python 2.4;
+* Is slow with larger numbers of clients.
+
+The CherryPy server:
+
+* Is very new and potentially buggy;
+* Does not support certificate authentication, only password
+ authentication;
+* Requires CherryPy 3.2, which requires Python 2.5;
+* Is faster with large numbers of clients.
+
+Basically, the builtin server should be used unless you have a
+particular need for performance, and can sacrifice certificate
+authentication.
+
+To select which backend to use, set the ``backend`` option in the
+``[server]`` section of ``/etc/bcfg2.conf``. Options are:
+
+* ``cherrypy``
+* ``builtin``
+* ``best`` (the default; currently the same as ``builtin``)
+
+If the certificate authentication issues (a limitation in CherryPy
+itself) can be resolved and the CherryPy server proves to be stable,
+it will likely become the default (and ``best``) in a future release.
diff --git a/doc/server/configurationentries.txt b/doc/server/configurationentries.txt
index 8e669b90a..fb1589926 100644
--- a/doc/server/configurationentries.txt
+++ b/doc/server/configurationentries.txt
@@ -1,138 +1,13 @@
.. -*- mode: rst -*-
-.. NOTE: these are relative links (change when directory structure
-.. changes)
-
-.. _Base: plugins/structures/base
-.. _Bundler: plugins/structures/bundler
-.. _Cfg: plugins/generators/cfg.html
-.. _TGenshi: plugins/generators/tgenshi
-.. _TCheetah: plugins/generators/tcheetah.html
-.. _Rules: plugins/generators/rules.html
-
.. _server-configurationentries:
=====================
Configuration Entries
=====================
-This page describes the names and semantics of each of the configuration
-entries used by Bcfg2.
-
-Non-POSIX entries
-=================
-
-+-------------+---------------------+--------------------------------------------+
-| TagName | Description | Attributes |
-+=============+=====================+============================================+
-| Action | Command | name, command, when, timing, status, build |
-+-------------+---------------------+--------------------------------------------+
-| Package | Software Packages | name, type, version, url |
-+-------------+---------------------+--------------------------------------------+
-| PostInstall | PostInstall command | name |
-+-------------+---------------------+--------------------------------------------+
-| Service | System Services | name, type, status, target |
-+-------------+---------------------+--------------------------------------------+
-
-.. note::
-
- PostInstall entries are deprecated in favor of Action entries. In
- fact, a PostInstall entry is simply a specific type of Action.
- Basically, the following are equivalent:
-
- .. code-block:: xml
-
- <PostInstall name='foo'/>
-
- and
-
- .. code-block:: xml
-
- <Action timing='post' when='modified' name='n' command='foo' status='ignore'/>
-
-POSIX entries
-=============
-
-.. versionadded:: 1.0.0
-
-The unified POSIX Path entries prevent inconsistent configuration
-specifications of multiple entries for a given path. The following table
-describes the various types available for new **Path** entries.
-
-The abstract specification of these entries (i.e. In `Bundler`_)
-will only contain a *name* attribute. The type will be added by the
-plugin that handles the entry in the case of `Cfg`_, `TGenshi`_, or
-`TCheetah`_. If the entry is handled by the `Rules`_ plugin (i.e. it is
-a device, directory, hardlink, symlink, etc), then you will specify both
-the *type* and any other necessary attributes in `Rules`_.
-
-Running ``bcfg2-lint`` will check your configuration specification for
-the presence of any mandatory attributes that are necessary for the
-Path type specified.
-
-.. note:: A tool for converting old POSIX entries is available in the
- Bcfg2 source directory at tools/posixunified.py
-
-+-------------+----------------------+-----------------+--------------------------+
-| Type | Replacement/New | Description | Attributes |
-+=============+======================+=================+==========================+
-| device | New | Create block, | name, owner, group, |
-| | | character, and | dev_type |
-| | | fifo devices | (block, char, fifo), |
-| | | | major/minor |
-| | | | (for block/char devices) |
-+-------------+----------------------+-----------------+--------------------------+
-| directory | Replaces Directory | Directories | name, owner, group, |
-| | entries | | perms, prune |
-+-------------+----------------------+-----------------+--------------------------+
-| file | Replaces ConfigFile | Configuration | name, owner, group, |
-| | entries | File | perms, encoding, empty |
-| | | | |
-| | | | **Note:** see below |
-+-------------+----------------------+-----------------+--------------------------+
-| hardlink | New | Create | name, to |
-| | | hardlinks | |
-+-------------+----------------------+-----------------+--------------------------+
-| symlink | Replaces SymLink | SymLinks | name, to |
-| | entries | | |
-+-------------+----------------------+-----------------+--------------------------+
-| ignore | New | Ignore files | name |
-| | | that cause | |
-| | | package | |
-| | | verification | |
-| | | failures | |
-| | | (currently | |
-| | | applies to only | |
-| | | APT and YUMng) | |
-+-------------+----------------------+-----------------+--------------------------+
-| nonexistent | New | Specify a path | name, recursive |
-| | | that should not | |
-| | | exist | |
-+-------------+----------------------+-----------------+--------------------------+
-| permissions | Replaces Permissions | Permissions of | name, owner, group, |
-| | entries | POSIX entities | perms, recursive |
-| | | | |
-+-------------+----------------------+-----------------+--------------------------+
-| vcs | New | Create version | vcstype (git), |
-| | | control | sourceurl, revision |
-| | | checkout | |
-+-------------+----------------------+-----------------+--------------------------+
-
-Keep in mind that permissions for files handled by Cfg/TGenshi/TCheetah
-are still handled via the traditional :ref:`server-info` mechanisms.
-
-Additional information
-----------------------
-
-This section describes some additional behavior relating to POSIX entry
-attributes.
-
-Recursive permissions
-^^^^^^^^^^^^^^^^^^^^^
-
-As per the request in ticket 871, Path type='permissions' entries allow you to
-set a recursive attribute which allows the owner/group to be set recursively
-for a directory.
+The full semantics of each configuration entry is documented with the
+:ref:`server-plugins-generators-rules` plugin.
.. _boundentries:
@@ -178,13 +53,14 @@ Use Cases
Examples
--------
-* Consider the case of ``/etc/hosts`` on linux and ``/etc/inet/hosts`` on
- solaris. These files contain the same data in the same format,
+* Consider the case of ``/etc/hosts`` on linux and ``/etc/inet/hosts``
+ on solaris. These files contain the same data in the same format,
and should typically be synchronized, however, exist in different
locations. Classically, one would need to create one entry for each
- in `Cfg`_ or `TCheetah`_ and perform manual synchronization. Or,
- you could use symlinks and pray. Altsrc is driven from the bundle
- side. For example:
+ in :ref:`server-plugins-generators-cfg` or
+ :ref:`server-plugins-generators-tcheetah` and perform manual
+ synchronization. Or, you could use symlinks and pray. Altsrc is
+ driven from the bundle side. For example:
.. code-block:: xml
@@ -220,10 +96,12 @@ Examples
named "openssl" with different types.
* Finally, consider the case where there exist complicated, but
- completely independent specifications for the same configuration entry
- but different groups of clients. The following bundle will allow the use
- of two different `TCheetah`_ templates ``/etc/firewall-rules-external``
- and ``/etc/firewall-rules-internal`` for different clients based on
+ completely independent specifications for the same configuration
+ entry but different groups of clients. The following bundle will
+ allow the use of two different
+ :ref:`server-plugins-generators-tcheetah` templates
+ ``/etc/firewall-rules-external`` and
+ ``/etc/firewall-rules-internal`` for different clients based on
their group membership.
.. code-block:: xml
@@ -239,11 +117,13 @@ Examples
</Bundle>
* Consider the case where a variety of files can be constructed by a
- single template (`TCheetah`_ or `TGenshi`_). It would be possible to
- copy this template into the proper location for each file, but that
- requires proper synchronization upon modification and knowing up front
- what the files will all be called. Instead, the following bundle allows
- the use of a single template for all proper config file instances.
+ single template (:ref:`server-plugins-generators-tcheetah` or
+ :ref:`server-plugins-generators-tgenshi-index`). It would be
+ possible to copy this template into the proper location for each
+ file, but that requires proper synchronization upon modification and
+ knowing up front what the files will all be called. Instead, the
+ following bundle allows the use of a single template for all proper
+ config file instances.
.. code-block:: xml
@@ -253,5 +133,6 @@ Examples
<Path name='/etc/sysconfig/network-scripts/ifcfg-eth2' altsrc='/etc/ifcfg-template'/>
</Bundle>
- altsrc can be used as a parameter for any entry type, and can be used
- in any structure, including `Bundler`_ and `Base`_.
+ altsrc can be used as a parameter for any entry type, and can be
+ used in any structure, including
+ :ref:`server-plugins-structures-bundler-index`.
diff --git a/doc/server/database.txt b/doc/server/database.txt
new file mode 100644
index 000000000..70dc43319
--- /dev/null
+++ b/doc/server/database.txt
@@ -0,0 +1,53 @@
+.. -*- mode: rst -*-
+
+.. _server-database:
+
+========================
+Global Database Settings
+========================
+
+.. versionadded:: 1.3.0
+
+Several Bcfg2 plugins, including
+:ref:`server-plugins-grouping-metadata` and
+:ref:`server-plugins-probes-index`, can connect use a relational
+database to store data. They use the global database settings in
+``bcfg2.conf``, described in this document, to connect.
+
+.. note::
+
+ The :ref:`server-plugins-statistics-dbstats` plugin and the
+ :ref:`reports-dynamic` do *not* currently use the global database
+ settings. They use their own separate database configuration.
+
+Configuration Options
+=====================
+
+All of the following options should go in the ``[database]`` section
+of ``/etc/bcfg2.conf``.
+
++-------------+------------------------------------------------------------+-------------------------------+
+| Option name | Description | Default |
++=============+============================================================+===============================+
+| engine | The full name of the Django database backend to use. See | "django.db.backends.sqlite3" |
+| | https://docs.djangoproject.com/en/dev/ref/settings/#engine | |
+| | for available options | |
++-------------+------------------------------------------------------------+-------------------------------+
+| name | The name of the database | "/var/lib/bcfg2/bcfg2.sqlite" |
++-------------+------------------------------------------------------------+-------------------------------+
+| user | The user to connect to the database as | None |
++-------------+------------------------------------------------------------+-------------------------------+
+| password | The password to connect to the database with | None |
++-------------+------------------------------------------------------------+-------------------------------+
+| host | The host to connect to | "localhost" |
++-------------+------------------------------------------------------------+-------------------------------+
+| port | The port to connect to | None |
++-------------+------------------------------------------------------------+-------------------------------+
+
+Database Schema Sync
+====================
+
+After making changes to the configuration options or adding a plugin
+that uses the global database, you should run ``bcfg2-admin syncdb``
+to resync the database schema.
+
diff --git a/doc/server/index.txt b/doc/server/index.txt
index 9c427a0f4..1b832dbee 100644
--- a/doc/server/index.txt
+++ b/doc/server/index.txt
@@ -28,3 +28,6 @@ clients.
info
snapshots/index
bcfg2-info
+ selinux
+ backends
+ database
diff --git a/doc/server/info.txt b/doc/server/info.txt
index d949aab68..5c2279b73 100644
--- a/doc/server/info.txt
+++ b/doc/server/info.txt
@@ -1,8 +1,5 @@
.. -*- mode: rst -*-
-.. NOTE: these are relative links (change when directory structure
-.. changes)
-
.. _server-info:
====
@@ -14,84 +11,74 @@ Various file properties for entries served by the :ref:`Cfg
<server-plugins-generators-tgenshi-index>`, :ref:`TCheetah
<server-plugins-generators-tcheetah>`, and :ref:`SSHbase
<server-plugins-generators-sshbase>` plugins are controlled through
-the use of ``:info``, ``info``, or ``info.xml`` files.
+the use of ``info.xml`` files.
+
+By default, these plugins are set to write files to the filesystem
+with owner **root**, group **root**, and mode **644** (read and write
+for owner, read only for group and other). These options, and a few
+others, can be overridden through use of ``info.xml`` files. Each
+config file directory can have a ``info.xml`` file if needed.
-By default, these plugins are set to write files to the filesystem with
-owner **root**, group **root**, and mode **644** (read and write for
-owner, read only for group and other). These options, and a few others,
-can be overridden through use of ``:info`` or ``info`` files. Each config
-file directory can have a ``:info`` or ``info`` file if needed. The
-possible fields in an info file are:
+An ``info.xml`` file consists of a ``<FileInfo>`` tag containing an
+``<Info>`` tag; the following attributes are allowed on the ``<Info>`` tag:
+------------+-------------------+----------------------------------+---------+
| Field | Possible values | Description | Default |
+============+===================+==================================+=========+
-| encoding: | ascii | base64 | Encoding of the file. Use | ascii |
+| encoding | ascii | base64 | Encoding of the file. Use | ascii |
| | | base64 for binary files | |
+------------+-------------------+----------------------------------+---------+
-| group: | Any valid group | Sets group of the file | root |
+| owner | Any valid user | Sets owner of the file | root |
+------------+-------------------+----------------------------------+---------+
-| important: | true | false | Important entries are | false |
-| | | installed first during client | |
-| | | execution | |
+| group | Any valid group | Sets group of the file | root |
++------------+-------------------+----------------------------------+---------+
+| perms | Numeric file mode | Sets the permissions of the file | 0644 |
+| | | 'inherit' | (or inherits from the files on | |
+| | | disk if set to 'inherit') | |
+------------+-------------------+----------------------------------+---------+
-| owner: | Any valid user | Sets owner of the file | root |
+| secontext | A valid SELinux | Sets the SELinux context of the | default |
+| | context string or | file, or sets to the default | |
+| | '__default__' | context set by policy if set to | |
+| | | '__default__' | |
+------------+-------------------+----------------------------------+---------+
-| paranoid: | true | false | Backup file before replacement? | true |
+| important | true | false | Important entries are | false |
+| | | installed first during client | |
+| | | execution | |
+------------+-------------------+----------------------------------+---------+
-| perms: | Numeric file mode | Sets the permissions of the file | 0644 |
-| | | 'inherit' | (or inherits from the files on | |
-| | | disk if set to inherit) | |
+| paranoid | true | false | Backup file before replacement? | true |
+------------+-------------------+----------------------------------+---------+
-| sensitive: | true | false | The contents of sensitive | false |
+| sensitive | true | false | The contents of sensitive | false |
| | | entries aren't included in | |
| | | reports | |
+------------+-------------------+----------------------------------+---------+
-A sample info file for CGI script on a web server might look like::
+A sample info file for CGI script on a web server might look like:
- owner: www
- group: www
- perms: 0755
+.. code-block:: xml
+
+ <FileInfo>
+ <Info owner="www" group="www" perms="0755"/>
+ </FileInfo>
Back to the fstab example again, our final ``Cfg/etc/fstab/`` directory
might look like::
- :info
+ info.xml
fstab
fstab.G50_server
fstab.G99_fileserver
fstab.H_host.example.com
-Important attribute
-===================
+See :ref:`server-selinux` for more information on the ``secontext``
+attribute and managing SELinux in general.
-.. versionadded:: 1.1.0
-
-Having important entries hardcoded into the various client tools has
-worked relatively well so far. However, this method allows for a bit
-more flexibility as the entries can be controlled via the configuration
-specification.
-
-+------------+-------------------+----------------------------------+---------+
-| Field | Possible values | Description | Default |
-+============+===================+==================================+=========+
-| important: | true | false | Important entries are | root |
-| | | installed first during client | |
-| | | execution | |
-+------------+-------------------+----------------------------------+---------+
-
-.. _server-info-info-xml:
-
-info.xml files
-==============
-
-``info.xml`` files add the ability to specify different sets of file
-metadata on a group by group or host by host basis, or by path (for
-files using :ref:`altsrc <server-plugins-structures-altsrc>`). These
-files are XML, and work similarly to those used by :ref:`Rules
-<server-plugins-generators-rules>` or :ref:`Pkgmgr
-<server-plugins-generators-pkgmgr>`.
+``info.xml`` files also have the ability to specify different sets of
+file metadata on a group by group or host by host basis, or by path
+(for files using :ref:`altsrc
+<server-plugins-structures-altsrc>`). These files are XML, and work
+similarly to those used by :ref:`Rules
+<server-plugins-generators-rules>` or :ref:`Bundler
+<server-plugins-structures-bundler-index>`.
The following specifies a different global set of permissions
(root/sys/0651) than on clients in group webserver or named
@@ -120,3 +107,34 @@ of permissions depending on the path of the file::
<Info owner="root" group="root" perms="0600"/>
</Path>
</FileInfo>
+
+.. versionadded:: 1.3.0
+
+You can also specify ACLs as children of ``<Info>`` tags in
+``info.xml``. See :ref:`server-plugins-generators-rules-acls` for
+more information on the formatting of ACL tags.
+
+:info and info files
+====================
+
+Historically, Bcfg2 also accepted the use of ``:info`` and ``info``
+files, which function the same as ``info.xml``, but are not XML. They
+lack the ability to specify different permissions based on client,
+group, or path, and cannot be used to specify ACLs, either.
+
+.. note::
+
+ ``:info`` and ``info`` files are deprecated and will be removed in
+ a future release.
+
+An example ``:info`` or ``info`` file would look like::
+
+ owner: www
+ group: www
+ perms: 0755
+
+All attributes allowed on the ``<Info>`` tag of an ``info.xml`` file
+can be used in an ``:info`` or ``info`` file.
+
+You should not use more than one ``:info``, ``info``, or ``info.xml``
+file for a single entry.
diff --git a/doc/server/plugins/connectors/properties.txt b/doc/server/plugins/connectors/properties.txt
index 7695e902c..b56c2a488 100644
--- a/doc/server/plugins/connectors/properties.txt
+++ b/doc/server/plugins/connectors/properties.txt
@@ -33,25 +33,95 @@ be checked when running ``bcfg2-lint``. For instance, given::
``dns-config.xml`` will be validated against ``dns-config.xsd``.
+Although Properties files are technically freeform XML, the top-level
+XML tag should be ``<Properties>``.
+
Usage
=====
-Specific property files can be referred to in
-templates as ``metadata.Properties[<filename>]``. The
-``xdata`` attribute is an LXML element object. (Documented
-`here <http://codespeak.net/lxml/tutorial.html#the-element-class>`_)
+Specific property files can be referred to in templates as
+``metadata.Properties[<filename>]``. The ``xdata`` attribute is an
+lxml.etree._Element object. (Documented `here
+<http://codespeak.net/lxml/tutorial.html#the-element-class>`_)
+
+In addition to the ``xdata`` attribute that can be used to access the
+raw data, the following access methods are defined:
-Currently, only one access method is defined for this data, ``Match``.
-``Match`` parses the Group and Client tags in the file and returns a
-list of elements that apply to the client described by a set of
-metadata. (See :ref:`server-plugins-structures-bundler-index` for
-more details on how Group and Client tags are parsed.) For instance::
+* ``Match()`` parses the Group and Client tags in the file and returns
+ a list of elements that apply to the client described by a set of
+ metadata. For instance::
{% python
ntp_servers = [el.text
- for el in metadata.Properties['ntp.xml'].Match(metadata):
+ for el in metadata.Properties['ntp.xml'].Match(metadata)
if el.tag == "Server"]
%}
+* ``XMLMatch()`` parses the Group and Client tags in the file and
+ returns an XML document containing only the data that applies to the
+ client described by a set of metadata. (The Group and Client tags
+ themselves are also removed, leaving only the tags and data
+ contained in them.) For instance::
+
+ {% python
+ ntp_servers = [el.text
+ for el in metadata.Properties['ntp.xml'].XMLMatch(metadata).findall("//Server")]
+ %}
+
+As we formulate more common use cases, we will add them to the
+``PropertyFile`` class as methods. This will simplify templates.
+
+You can also access the XML data that comprises a property file
+directly in one of several ways:
+
+* ``metadata.Properties['prop-file'].xdata`` is an lxml.etree._Element
+ object representing the top-level element in the file.
+* ``metadata.Properties['prop-file'].data`` is the raw contents of the
+ property file as a string.
+* ``metadata.Properties['prop-file'].entries`` is a list of
+ lxml.etree._Element objects representing the direct children of the
+ top-level element. (I.e., everything directly under the
+ ``<Properties>`` tag.)
+
+.. _server-plugins-connectors-properties-automatch:
+
+Automatch
+=========
+
+.. versionadded:: 1.3.0
+
+You can enable ``XMLMatch()`` for all Property files by setting
+``automatch`` to ``true`` in the ``[properties]`` section of
+``bcfg2.conf``. This makes ``metadata.Properties`` values
+lxml.etree._Element objects that contain only matching data. (This
+makes it impossible to do
+:ref:`server-plugins-connectors-properties-write-back` as a
+side-effect.)
+
+In Python terms, setting ``automatch=true`` is the same as doing the
+following at the top of each template::
+
+ {% python
+ for prop in metadata.Properties.values():
+ prop = prop.XMLMatch(metadata)
+ %}
+
+The example above that describes ``XMLMatch()`` would then become
+simply::
+
+ {% python
+ ntp_servers = [el.text
+ for el in metadata.Properties['ntp.xml'].findall("//Server")]
+ %}
+
+You can also enable automatch for individual Property files by setting
+the attribute ``automatch="true"`` in the top-level ``<Property>`` tag.
+
+.. _server-plugins-connectors-properties-write-back:
+
+Writing to Properties files
+===========================
+
+.. versionadded:: 1.2.0
If you need to make persistent changes to properties data, you can use
the ``write`` method of the ``PropertyFile`` class::
@@ -68,20 +138,82 @@ the ``write`` method of the ``PropertyFile`` class::
The ``write`` method checks the data in the object against its schema
before writing it; see `Data Structures`_ for details.
-As we formulate more common use cases, we will add them to the
-``PropertyFile`` class as methods. This will simplify templates.
+Note that use of the ``write`` method can cause race conditions if you
+run more than one Bcfg2 server. If you run more than one Bcfg2
+server, you can disable Properties write-back by setting the following
+in ``bcfg2.conf``::
-You can also access the XML data that comprises a property file
-directly in one of several ways:
+ [properties]
+ writes_enabled = false
-* ``metadata.Properties['prop-file'].xdata`` is an lxml.etree._Element
- object representing the top-level element in the file.
-* ``metadata.Properties['prop-file'].data`` is the raw contents of the
- property file as a string.
-* ``metadata.Properties['prop-file'].entries`` is a list of
- lxml.etree._Element objects representing the direct children of the
- top-level element. (I.e., everything directly under the
- ``<Properties>`` tag.)
+.. _server-plugins-connectors-properties-encrypted:
+
+Encrypted Properties data
+=========================
+
+.. versionadded:: 1.3.0
+
+You can encrypt selected data in Properties files to protect that data
+from other people who need access to the repository. See
+:ref:`server-plugins-generators-cfg-configuring-encryption` for
+details on configuring encryption passphrases. The data is decrypted
+transparently on-the-fly by the server; you never need to decrypt the
+data in your templates.
+
+.. note::
+
+ This feature is *not* intended to secure the files against a
+ malicious attacker who has gained access to your Bcfg2 server, as
+ the encryption passphrases are held in plaintext in
+ ``bcfg2.conf``. This is only intended to make it easier to use a
+ single Bcfg2 repository with multiple admins who should not
+ necessarily have access to each other's sensitive data.
+
+Properties files are encrypted on a per-element basis; that is, rather
+than encrypting the whole file, only the character content of
+individual elements is encrypted. This makes it easier to track
+changes to the file in a VCS, and also lets unprivileged users work
+with the other data in the file. Only character content of an element
+can be encrypted; attribute content and XML elements themselves cannot
+be encrypted.
+
+To encrypt a file, use ``bcfg2-crypt``, e.g.::
+
+ bcfg2-crypt foo.xml
+
+If the top-level tag of a Properties file is not ``<Properties>``,
+then you need to use the ``--properties`` flag to ``bcfg2-crypt``::
+
+ bcfg2-crypt --properties foo.xml
+
+The first time you run ``bcfg2-crypt`` on a Properties file, it will
+encrypt all character data of all elements. Additionally, it will add
+``encrypted="<key name>"`` to each element that has encrypted character
+data. It also adds ``encryption="true"`` to the top-level
+``<Properties>`` tag as a flag to the server that it should try to
+decrypt the data in that file. (If you are using Properties schemas,
+you will need to make sure to add support for these attributes.) On
+subsequent runs, only those elements flagged with ``encrypted="*"``
+are encrypted or decrypted.
+
+To decrypt a Properties file, simply re-run ``bcfg2-crypt``::
+
+ bcfg2-crypt foo.xml
+
+This decrypts the encrypted elements, but it does *not* remove the
+``encrypted`` attribute; this way, you can decrypt a Properties
+file, modify the contents, and then simply re-run ``bcfg2-crypt`` to
+encrypt it again. If you added elements that you also want to be
+encrypted, you can either add the ``encrypted`` attribute to
+them manually, or run::
+
+ bcfg2-crypt --xpath '*' foo.xml
+
+You can also use the ``--xpath`` option to specify more restrictive
+XPath expressions to only encrypt a subset of elements, or to encrypt
+different elements with different passphrases. Alternatively, you can
+manally set the ``encrypted`` attribute on various elements and
+``bcfg2-crypt`` will automatically do the right thing.
Accessing Properties contents from TGenshi
==========================================
@@ -89,3 +221,21 @@ Accessing Properties contents from TGenshi
Access contents of ``Properties/auth.xml``::
${metadata.Properties['auth.xml'].xdata.find('file').find('bcfg2.key').text}
+
+Configuration
+=============
+
+``bcfg2.conf`` contains several miscellaneous configuration options
+for the Properties plugin, which can be set in the ``[properties]``
+section. Any booleans in the config file accept the values "1", "yes",
+"true", and "on" for True, and "0", "no", "false", and "off" for
+False.
+
+It understands the following directives:
+
+* ``automatch``: Enable
+ :ref:`server-plugins-connectors-properties-automatch`. Default is
+ false.
+* ``writes_enabled``: Enable
+ :ref:`server-plugins-connectors-properties-write-back`. Default is
+ true.
diff --git a/doc/server/plugins/connectors/puppetenc.txt b/doc/server/plugins/connectors/puppetenc.txt
new file mode 100644
index 000000000..dc472c546
--- /dev/null
+++ b/doc/server/plugins/connectors/puppetenc.txt
@@ -0,0 +1,123 @@
+.. -*- mode: rst -*-
+
+.. _server-plugins-connectors-puppetenc:
+
+=========
+PuppetENC
+=========
+
+PuppetENC is a connector plugin that adds support for Puppet External
+Node Classifiers
+(`<http://docs.puppetlabs.com/guides/external_nodes.html>`_), or ENCs.
+
+Output Format
+=============
+
+The PuppetENC plugin implements the Puppet 2.6.5+ ENC output format
+with some modifications. The basic output format is described `here
+<http://docs.puppetlabs.com/guides/external_nodes.html#puppet-265-and-higher>`_.
+The following modifications apply:
+
+* ``classes`` are considered to be Bcfg2 groups. (This is basically
+ just a difference in terminology between Puppet and Bcfg2; Bcfg2
+ calls "groups" what Puppet calls "classes.")
+* As an alternative to the Puppet-specific ``classes`` value, you may
+ use ``groups`` if you are writing an ENC from scratch specifically
+ for Bcfg2.
+* Since Bcfg2 does not have the notion of parameterized classes, any
+ class parameters provided will be merged in with the ``parameters``
+ dict.
+* ``parameters`` are presented as connector data. (See Usage
+ below.)
+* The ``environment`` value is not supported. If present, PuppetENC
+ will issue a warning and skip it.
+
+The ``parameters`` from separate ENCs are all merged together,
+including parameters from any parameterized classes. This is a
+shallow merge; in other words, only the top-level keys are
+considered. For instance, assuming you had one ENC that produced::
+
+ parameters:
+ ntp_servers:
+ - 0.pool.ntp.org
+ - ntp1.example.com
+
+And another that produced::
+
+ parameters:
+ ntp_servers:
+ - ntp2.example.com
+
+This would result in connector data that included *either* the first
+value of ``ntp_servers`` *or* the second, but not both; this would
+depend on the order in which the ENCs were run, which is
+non-deterministic and should not be relied upon. However, if you add
+one ENC that produced::
+
+ parameters:
+ ntp_servers:
+ - 0.pool.ntp.org
+ - ntp1.example.com
+
+And another that produced::
+
+ parameters:
+ mail_servers:
+ - mail.example.com
+
+Then the connector data would consist of::
+
+ {"ntp_servers": ["0.pool.ntp.org", "ntp1.example.com"],
+ "mail_servers": ["mail.example.com"]}
+
+Usage
+=====
+
+To use the PuppetENC plugin, first do ``mkdir
+/var/lib/bcfg2/PuppetENC``. Add ``PuppetENC`` to your ``plugins``
+line in ``/etc/bcfg2.conf``. Now you can place any ENCs you wish to
+run in ``/var/lib/bcfg2/PuppetENC``. Note that ENCs are run each time
+client metadata is generated, so if you have a large number of ENCs or
+ENCs that are very time-consuming, they could have a significant
+impact on server performance. In that case, it could be worthwhile to
+write a dedicated Connector plugin.
+
+PuppetENC parameters can be accessed in templates as
+``metadata.PuppetENC``, which is a dict of all parameter data merged
+together. For instance, given the following ENC output::
+
+ ---
+ classes:
+ common:
+ puppet:
+ ntp:
+ ntpserver: 0.pool.ntp.org
+ aptsetup:
+ additional_apt_repos:
+ - deb localrepo.example.com/ubuntu lucid production
+ - deb localrepo.example.com/ubuntu lucid vendor
+ parameters:
+ ntp_servers:
+ - 0.pool.ntp.org
+ - ntp.example.com
+ mail_server: mail.example.com
+ iburst: true
+ environment: production
+
+``metadata.PuppetENC`` would contain::
+
+ 'additional_apt_repos': ['deb localrepo.example.com/ubuntu lucid production',
+ 'deb localrepo.example.com/ubuntu lucid vendor'],
+ 'iburst': True,
+ 'mail_server': 'mail.example.com',
+ 'ntp_servers': ['0.pool.ntp.org', 'ntp.example.com'],
+ 'ntpserver': '0.pool.ntp.org'}
+
+(Note that the duplication of NTP server data doesn't make this an
+especially *good* example; it's just the official Puppet example.)
+
+So, in a template you could do something like::
+
+ {% for repo in metadata.PuppetENC['additional_apt_repos'] %}\
+ ${repo}
+ {% end %}\
diff --git a/doc/server/plugins/connectors/templatehelper.txt b/doc/server/plugins/connectors/templatehelper.txt
index 24d7f18b5..374aeb171 100644
--- a/doc/server/plugins/connectors/templatehelper.txt
+++ b/doc/server/plugins/connectors/templatehelper.txt
@@ -23,9 +23,8 @@ will be read and added to matching client metadata objects. See
:ref:`writing-templatehelpers` below for more information on how to
write TemplateHelper scripts.
-TemplateHelper supports group- and host-specific helpers, so you could
-create, e.g., ``foo.py.G80_test`` to create a helper that only applied
-to machines in the group ``test``.
+TemplateHelper does not support group- or host-specific helpers. All
+helpers will be available to all clients.
.. _writing-templatehelpers:
@@ -34,15 +33,14 @@ Writing Helpers
A helper module is just a Python module with three special conditions:
-* The filename must end with ``.py`` (before any specificity
- strings, e.g., ``.G80_foo`` or ``.H_blah.example.com``
+* The filename must end with ``.py``
* The module must have an attribute, ``__export__``, that lists all of
the classes, functions, variables, or other symbols you wish to
export from the module.
-* ``data``, ``handle_event``, ``name``, and ``specific`` are reserved
- names. You should not include symbols with a reserved name in
- ``__export__``. Additionally, including symbols that start with an
- underscore or double underscore is bad form, and may also produce
+* ``data``, ``name``, ``fam``, ``Index``, and ``HandleEvent`` are
+ reserved names. You should not include symbols with a reserved name
+ in ``__export__``. Additionally, including symbols that start with
+ an underscore or double underscore is bad form, and may also produce
errors.
See ``examples/TemplateHelper`` for examples of helper modules.
@@ -60,7 +58,7 @@ a HelperModule object will has, as attributes, all symbols listed in
def hello(metadata):
return "Hello, %s!" % metadata.hostname
-To use this in a TGenshi template, we could do::
+To use this in a Genshi template, we could do::
${metadata.TemplateHelper['hello'].hello(metadata)}
diff --git a/doc/server/plugins/generators/cfg.txt b/doc/server/plugins/generators/cfg.txt
index ba02b929d..6c848fddb 100644
--- a/doc/server/plugins/generators/cfg.txt
+++ b/doc/server/plugins/generators/cfg.txt
@@ -35,24 +35,24 @@ templating -- see below).
Group-Specific Files
====================
-It is often that you want one version of a config file for all of your
-machines except those in a particular group. For example, ``/etc/fstab``
-should look alike on all of your desktop machines, but should be
-different on your file servers. Bcfg2 can handle this case through use
-of group-specific files.
+It is often the case that you want one version of a config file for
+all of your machines except those in a particular group. For example,
+``/etc/fstab`` should look alike on all of your desktop machines, but
+should be different on your file servers. Bcfg2 can handle this case
+through use of group-specific files.
As mentioned above, all Cfg entries live in like-named directories
at the end of their directory tree. In the case of fstab, the file at
``Cfg/etc/fstab/fstab`` will be handed out by default to any client that
asks for a copy of ``/etc/fstab``. Group-specific files are located in
-the same directory and are named with the syntax::
+the same directory and are named with the following syntax::
/path/to/filename/filename.GNN_groupname
-in which **NN** is a priority number where **00** is lowest and
-**99** is highest, and **groupname** is the name of a group defined in
+**NN** is a priority number where **00** is lowest and **99**
+is highest, and **groupname** is the name of a group defined in
``Metadata/groups.xml``. Back to our fstab example, we might have a
-``Cfg/etc/fstab/`` directory that looks like::
+``Cfg/etc/fstab/`` directory that looks like this::
fstab
fstab.G50_server
@@ -139,6 +139,99 @@ using different host-specific or group-specific files. For example:
Cfg/etc/fstab/fstab.H_host.example.com.genshi
Cfg/etc/fstab/fstab.G50_server.cheetah
+Encrypted Files
+===============
+
+.. versionadded:: 1.3.0
+
+Bcfg2 allows you to encrypt files stored in ``Cfg/`` to protect the
+data in them from other people who need access to the repository. See
+also :ref:`server-plugins-connectors-properties-encrypted` for
+information on encrypting elements in Properties files, which is often
+more friendly for tracking changes in a VCS.
+
+.. note::
+
+ This feature is *not* intended to secure the files against a
+ malicious attacker who has gained access to your Bcfg2 server, as
+ the encryption passphrases are held in plaintext in
+ ``bcfg2.conf``. This is only intended to make it easier to use a
+ single Bcfg2 repository with multiple admins who should not
+ necessarily have access to each other's sensitive data.
+
+Encrypting Files
+----------------
+
+An encrypted file should end with ``.crypt``, e.g.::
+
+ Cfg/etc/foo.conf
+ Cfg/etc/foo.conf/foo.conf.crypt
+ Cfg/etc/foo.conf/foo.conf.G10_foo.crypt
+
+Encrypted Genshi or Cheetah templates can have the extensions in
+either order, e.g.::
+
+ Cfg/etc/foo.conf/foo.conf.crypt.genshi
+ Cfg/etc/foo.conf/foo.conf.G10_foo.genshi.crypt
+ Cfg/etc/foo.conf/foo.conf.H_bar.example.com.crypt.cheetah
+
+To encrypt a file, you can use ``bcfg2-crypt``, e.g.::
+
+ bcfg2-crypt foo.conf
+
+Once you are satisfied that the file has been encrypted as you wish,
+you can remove the plaintext version, or you can use the ``--remove``
+flag of ``bcfg2-crypt``.
+
+To decrypt a file, simply run ``bcfg2-crypt`` again::
+
+ bcfg2-crypt foo.conf
+
+See the ``bcfg2-crypt`` man page for more information.
+
+``bcfg2-crypt`` simply performs an AES256 encryption, and is
+more-or-less equivalent to the following commands (encryption and
+decryption, respectively::
+
+ openssl enc -aes-256-cbc -k <passphrase> -in foo.conf -out foo.conf.crypt -a
+ openssl enc -d -aes-256-cbc -k <passphrase> -in foo.conf.crypt -out foo.conf -a
+
+.. _server-plugins-generators-cfg-configuring-encryption:
+
+Configuring Encryption
+----------------------
+
+To configure encryption, add a ``[encryption]`` section to
+``bcfg2.conf`` with any number of name-passphrase pairs. When
+decrypting a file, _all_ passphrases will be tried; the passphrase
+name is currently purely cosmetic, but at some point in the future the
+ability to give Bcfg2 a "hint" about which passphrase to use will be
+added.
+
+For instance::
+
+ [encryption]
+ foo_team=P4ssphr4se
+ bar_team=Pa55phra5e
+
+This would define two separate encryption passphrases, presumably for
+use by two separate teams. The passphrase names are completely
+arbitrary.
+
+Note that this does entail a chicken-and-egg problem. In order for
+the Bcfg2 server to be able to decrypt encrypted files, the
+passphrases must exist in ``bcfg2.conf`` in plaintext; but, if you're
+encrypting data, presumably you don't want to include those plaintext
+passphrases in your Bcfg2 repository, so you'll want to encrypt
+``bcfg2.conf``. The best way to solve this is:
+
+#. On your Bcfg2 server, manually add the ``[encryption]`` section to
+ ``bcfg2.conf`` and restart the Bcfg2 server.
+#. Update ``bcfg2.conf`` in your Bcfg2 repository with the
+ passphrases, and encrypt it.
+
+The first (manual) step breaks the mutual dependency.
+
Deltas
======
@@ -237,13 +330,19 @@ For ``sudoers``, a very simple validator is::
This uses the ``visudo`` command's built-in validation.
-.. note:
+If you wish to disable validation, this can be done with the following
+setting in ``bcfg2.conf``::
+
+ [cfg]
+ validation=no
- Before 1.3 is released, it will be possible to disable validation
- in the configuration, but enable it for ``bcfg2-test``. This is
- recommended for heavily-used servers, since running an external
- command is fairly resource intensive and could quickly exhaust the
- file descriptors of a server.
+If you have a very large number of validators, you may wish to disable
+validation by default to avoid slowing down the generation of
+configurations on the server, and use ``bcfg2-test`` (for instance, as
+a post-commit hook or as part of a code review process) to run
+validation. You can do this by setting ``validation=no`` in
+``bcfg2.conf`` as described above, and then calling ``bcfg2-test``
+with the ``--cfg-validation`` flag.
File permissions
================
diff --git a/doc/server/plugins/generators/nagiosgen.txt b/doc/server/plugins/generators/nagiosgen.txt
index b839c10ca..ee99b2dc1 100644
--- a/doc/server/plugins/generators/nagiosgen.txt
+++ b/doc/server/plugins/generators/nagiosgen.txt
@@ -20,7 +20,7 @@ Create the NagiosGen directory::
Create default host, and group specs in:
-* /var/lib/bcfg2/NagiosGen/default-host.cfg::
+``/var/lib/bcfg2/NagiosGen/default-host.cfg``::
define host{
name default
@@ -44,7 +44,7 @@ Create default host, and group specs in:
retry_interval 1
}
-* /var/lib/bcfg2/NagiosGen/default-group.cfg::
+``/var/lib/bcfg2/NagiosGen/default-group.cfg``::
define service{
name default-service
@@ -73,7 +73,7 @@ Create default host, and group specs in:
Create group configuration files (Named identical to Bcfg2 groups) and
add services, and commands specific to the hostgroup (Bcfg2 group) in
-* /var/lib/bcfg2/NagiosGen/base-group.cfg::
+``/var/lib/bcfg2/NagiosGen/base-group.cfg``::
define hostgroup{
hostgroup_name base
@@ -100,7 +100,7 @@ add services, and commands specific to the hostgroup (Bcfg2 group) in
hostgroup_name base
}
-* /var/lib/bcfg2/NagiosGen/web-server-group.cfg::
+``/var/lib/bcfg2/NagiosGen/web-server-group.cfg``::
define hostgroup{
hostgroup_name web-server
@@ -182,7 +182,7 @@ options for hosts or groups. E.g.:
<NagiosGen>
<Group name="datacenter-2">
<Option name="parents">dc-2-switch</Option>
- <Group>
+ </Group>
<Group name="non-production">
<Option name="notification_period">workhours</Option>
<Option name="notification_options">d</Option>
diff --git a/doc/server/plugins/generators/packages.txt b/doc/server/plugins/generators/packages.txt
index 62574be76..43db251b0 100644
--- a/doc/server/plugins/generators/packages.txt
+++ b/doc/server/plugins/generators/packages.txt
@@ -126,17 +126,19 @@ Disabling dependency resolution
.. versionadded:: 1.1.0
-Dependency resolution can be disabled by adding this to
-``Packages/packages.conf`` in the ``global`` section::
+Dependency resolution can be disabled by adding the following setting
+to ``bcfg2.conf`` in the ``packages`` section::
- [global]
+ [packages]
resolver=0
All metadata processing can be disabled as well::
- [global]
+ [packages]
metadata=0
+This setting implies disabling the resolver.
+
Blacklisting faulty dependencies
--------------------------------
@@ -145,7 +147,9 @@ Packages, please file a bug report so that we can fix the problem in
future releases. In the meantime, you can work around this issue by
blacklisting the offending Package in your Sources. The blacklist
element should immediately follow the Component section of your source
-and should look like the following::
+and should look like the following:
+
+.. code-block:: xml
<Blacklist>unwanted-packagename</Blacklist>
@@ -162,7 +166,9 @@ If you have yum libraries installed, Packages can automatically handle
GPG signing keys for Yum and Pulp repositories. (You do not need to
use the native yum resolver; if yum libraries are available, GPG
signing keys can be handled automatically.) Simply specify the URL to
-the GPG key(s) for a repository in ``sources.xml``::
+the GPG key(s) for a repository in ``sources.xml``:
+
+.. code-block:: xml
<Source type="yum"
rawurl="http://mirror.example.com/centos6-x86_64/RPMS.os">
@@ -176,17 +182,53 @@ With the keys specified thusly, Packages will include the keys in the
generated yum config file, and will ensure that the keys are imported
on the client.
-There is no need to specify ``<GPGKey>`` tags for :ref:``Pulp sources
-<pulp-source-support>``; that data is pulled directly from the Pulp
+There is no need to specify ``<GPGKey>`` tags for :ref:`Pulp sources
+<pulp-source-support>`; that data is pulled directly from the Pulp
REST API.
+Arbitrary Repo Options
+----------------------
+
+.. versionadded:: 1.2.3
+
+You can specify arbitrary options to be added to the repository config
+on the server side, if you are using the native yum libraries, and on
+the client side if you are using the ability of Packages to
+automatically generate your Yum config. To do this, add an
+``<Options>`` tag to a Source; all of its attributes will be added
+verbatim to the repository in the generated config. For instance:
+
+.. code-block:: xml
+
+ <Source type="yum" rawurl="http://mirror.example.com/centos-6-os">
+ <Arch>x86_64</Arch>
+ <Options proxy="http://proxy.example.com"/>
+ </Source>
+
+If you are using native yum libraries and need to set options only on
+the Bcfg2 server, you can set the ``serveronly`` attribute to "true";
+or, if you need to set options only on the client, you can set the
+``clientonly`` attribute to "true". For instance, if your Bcfg2
+server needed to use a proxy to access a repo, and you wanted to
+expire metadata caches very quickly on the client, you could do:
+
+.. code-block:: xml
+
+ <Source type="yum" rawurl="http://mirror.example.com/centos-6-os">
+ <Arch>x86_64</Arch>
+ <Options serveronly="true" proxy="http://proxy.example.com"/>
+ <Options clientonly="true" metadata_expire="0"/>
+ </Source>
+
.. _packages-exampleusage:
Example usage
=============
Create a ``sources.xml`` file in the Packages directory that looks
-something like this::
+something like this:
+
+.. code-block:: xml
<Sources>
<Group name="ubuntu-intrepid">
@@ -228,7 +270,9 @@ something like this::
<Source type="apt" essential="false" ...>
-Yum sources can be similarly specified::
+Yum sources can be similarly specified:
+
+.. code-block:: xml
<Sources>
<Group name="centos-5.2">
@@ -248,8 +292,10 @@ Yum sources can be similarly specified::
For sources with a **URL** attribute, the **Version** attribute is
also necessary.
-:ref:``Pulp sources <pulp-source-support>`` are very simple to specify
-due to the amount of data that can be queried from Pulp itself::
+:ref:`Pulp sources <pulp-source-support>` are very simple to specify
+due to the amount of data that can be queried from Pulp itself:
+
+.. code-block:: xml
<Sources>
<Group name="centos-6-x86_64">
@@ -353,20 +399,13 @@ be validated using ``bcfg2-lint``.
.. note:: The schema requires that elements be specified in the above order.
-Limitations
-===========
-
-Packages does not do traditional caching as other plugins
-do. Modifying sources in the Packages ``sources.xml`` file requires a
-server restart for the time being. You do not have to restart the
-server after changing ``packages.conf`` or after adding new sources to
-``sources.xml``.
-
Package Checking and Verification
=================================
In order to do disable per-package verification Pkgmgr style, you will
-need to use :ref:`BoundEntries <boundentries>`, e.g.::
+need to use :ref:`BoundEntries <boundentries>`, e.g.:
+
+.. code-block:: xml
<BoundPackage name="mem-agent" priority="1" version="auto"
type="yum" verify="false"/>
@@ -380,10 +419,10 @@ Generating Client APT/Yum Configurations
.. versionadded:: 1.2.0
The Packages plugin has native support for generating Yum configs.
-You must set ``yum_config`` in ``Packages/packages.conf`` to the path
-to the yum config file you want to generate::
+You must set ``yum_config`` in ``bcfg2.conf`` to the path to the yum
+config file you want to generate::
- [global]
+ [packages]
yum_config=/etc/yum.repos.d/all.repo
Then add the corresponding Path entry to your Yum bundle.
@@ -414,7 +453,7 @@ resolution and other routines so that the Bcfg2 server can be run on a
host that does not support Yum itself. If you run the Bcfg2 server on
a machine that does have Yum libraries, however, you can enable use of
those native libraries in Bcfg2 by setting ``use_yum_libraries`` to
-``1`` in the ``[yum]`` section of ``Packages/packages.conf``.
+``1`` in the ``[packages:yum]`` section of ``bcfg2.conf``.
Benefits to this include:
@@ -440,23 +479,24 @@ Configuring the Yum Helper
Due to poor memory management by the Yum API, the long-lived
bcfg2-server process uses an external short-lived helper,
``bcfg2-yum-helper``, to do the actual Yum API calls for native yum
-library support. By default, Bcfg2 looks for this helper at
-``/usr/sbin/bcfg2-yum-helper``. If you have installed the helper
-elsewhere, you will need to configure that location with the
-``helper`` option in the ``[yum]`` section, e.g.::
+library support. By default, Bcfg2 looks for this helper in
+``$PATH``, or, failing that, at ``/usr/sbin/bcfg2-yum-helper``. If
+you have installed the helper elsewhere, you will need to configure
+that location with the ``helper`` option in the ``[packages:yum]``
+section, e.g.::
- [yum]
+ [packages:yum]
use_yum_libraries = 1
helper = /usr/local/sbin/bcfg2-yum-helper
Setting Yum Options
-------------------
-In ``Packages/packages.conf``, any options you set in the ``[yum]``
+In ``bcfg2.conf``, any options you set in the ``[packages:yum]``
section other than ``use_yum_libraries`` and ``helper`` will be passed
along verbatim to the configuration of the Yum objects used in the
-Bcfg2 server. The following options are set by default, and should
-not generally be overridden:
+Bcfg2 server. The following options are set by default, and should not
+generally be overridden:
* ``cachedir`` is set to a hashed value unique to each distinct Yum
configuration. Don't set this unless you know what you're doing.
@@ -472,14 +512,18 @@ Package Groups
Yum package groups are supported by the native Yum libraries. To
include a package group, use the ``group`` attribute of the
``Package`` tag. You can use either the short group ID or the long
-group name::
+group name:
+
+.. code-block:: xml
<Package group="SNMP Support"/>
<Package group="system-management-snmp"/>
By default, only those packages considered the "default" packages in a
group will be installed. You can change this behavior using the
-"type" attribute::
+"type" attribute:
+
+.. code-block:: xml
<Package group="development" type="optional"/>
<Package group="Administration Tools" type="mandatory"/>
@@ -506,7 +550,9 @@ Pulp Support
Bcfg2 contains explicit support for repositories managed by Pulp
(http://pulpproject.org/). Due to the amount of data about a
repository that can be retrieved directly from Pulp, the only thing
-necessary to configure a Pulp repo is the repo ID::
+necessary to configure a Pulp repo is the repo ID:
+
+.. code-block:: xml
<Sources>
<Group name="centos-6-x86_64">
@@ -521,8 +567,8 @@ server must have a valid ``/etc/pulp/consumer/consumer.conf`` that is
readable by the user your Bcfg2 server runs as; the Pulp server,
URLs, and so on, are determined from this.
-Secondly, in ``Packages/packages.conf`` you must set the following
-options in the ``[pulp]`` section:
+Secondly, in ``bcfg2.conf`` you must set the following
+options in the ``[packages:pulp]`` section:
* ``username`` and ``password``: The username and password of a Pulp
user that will be used to register new clients and bind them to
@@ -643,49 +689,73 @@ multiple data sources need to be multiplexed.
The APT source in ``src/lib/Server/Plugins/Packages.py`` provides a
relatively simple implementation of a source.
-packages.conf
+Configuration
=============
-``packages.conf`` contains miscellaneous configuration options for the
+``bcfg2.conf`` contains miscellaneous configuration options for the
Packages plugin. Any booleans in the config file accept the values
"1", "yes", "true", and "on" for True, and "0", "no", "false", and
-"off" for False
+"off" for False. For historical reasons, ``resolver`` and
+``metadata`` also accept "enabled" and "disabled".
It understands the following directives:
-[global] section
-----------------
-
-* ``resolver``: Enable dependency resolution. Default is ``1``
- (true). For historical reasons, this also accepts "enabled" and
- "disabled".
-* ``metadata``: Enable metadata processing. Default is ``1``
- (true). For historical reasons, this also accepts "enabled" and
- "disabled".
-* ``yum_config``: The path at which to generate Yum configs. No
- default.
-* ``apt_config``: The path at which to generate APT configs. No
- default.
-* ``gpg_keypath``: The path on the client RPM GPG keys will be copied
- to before they are imported on the client. Default is
- "/etc/pki/rpm-gpg".
-* ``version``: Set the version attribute used when binding
- Packages. Default is ``auto``.
-
-[yum] section
--------------
-
-* ``use_yum_libraries``: Whether or not to use the :ref:`native yum
- library support <native-yum-libraries>`. Default is ``0`` (false).
-
-All other options in the ``[yum]`` section will be passed along
-verbatim to the Yum configuration if you are using the native Yum
-library support.
-
-[pulp] section
---------------
+[packages] section
+------------------
-* ``username`` and ``password``: The username and password of a Pulp
- user that will be used to register new clients and bind them to
- repositories. Membership in the default ``consumer-users`` role is
- sufficient.
++-------------+------------------------------------------------------+----------+-----------------------------+
+| Name | Description | Values | Default |
++=============+======================================================+==========+=============================+
+| resolver | Enable dependency resolution | Boolean | True |
++-------------+------------------------------------------------------+----------+-----------------------------+
+| metadata | Enable metadata processing. Disabling ``metadata`` | Boolean | True |
+| | implies disabling ``resolver`` as well. | | |
++-------------+------------------------------------------------------+----------+-----------------------------+
+| yum_config | The path at which to generate Yum configs. | String | /etc/yum.repos.d/bcfg2.repo |
++-------------+------------------------------------------------------+----------+-----------------------------+
+| apt_config | The path at which to generate APT configs. | String | /etc/apt/sources.d/bcfg2 |
++-------------+------------------------------------------------------+----------+-----------------------------+
+| gpg_keypath | The path on the client RPM GPG keys will be copied | String | /etc/pki/rpm-gpg |
+| | to before they are imported on the client. | | |
++-------------+------------------------------------------------------+----------+-----------------------------+
+| version | Set the version attribute used when binding Packages | any|auto | auto |
++-------------+------------------------------------------------------+----------+-----------------------------+
+| cache | Path where Packages will store its cache | String | <repo>/Packages/cache |
++-------------+------------------------------------------------------+----------+-----------------------------+
+| keycache | Path where Packages will cache downloaded GPG keys | String | <repo>/Packages/keys |
++-------------+------------------------------------------------------+----------+-----------------------------+
+
+
+[packages:yum] section
+----------------------
+
++-------------------+----------------------------------------------------------+---------+-----------+
+| Name | Description | Values | Default |
++===================+==========================================================+=========+===========+
+| use_yum_libraries | Whether or not to use the | Boolean | False |
+| | :ref:`native yum library support <native-yum-libraries>` | | |
++-------------------+----------------------------------------------------------+---------+-----------+
+| helper | Path to ``bcfg2-yum-helper`` | String | See below |
++-------------------+----------------------------------------------------------+---------+-----------+
+
+To find ``bcfg2-yum-helper`` if none is specified, Bcfg2 looks first
+in ``$PATH`` and then in ``/usr/sbin/bcfg2-yum-helper`` for the
+helper.
+
+All other options in the ``[packages:yum]`` section will be passed
+along verbatim to the Yum configuration if you are using the native
+Yum library support.
+
+[packages:pulp] section
+-----------------------
+
++----------+-----------------------------------------------------+--------+---------+
+| Name | Description | Values | Default |
++==========+=====================================================+========+=========+
+| username | The username of a Pulp user that will be used to | String | None |
+| | register new clients and bind them to repositories. | | |
++----------+-----------------------------------------------------+--------+---------+
+| password | The password of the Pulp user | String | None |
++----------+-----------------------------------------------------+--------+---------+
+
+The user should be a member of the default ``consumer-users`` role.
diff --git a/doc/server/plugins/generators/rules.txt b/doc/server/plugins/generators/rules.txt
index c084c5681..107ec148a 100644
--- a/doc/server/plugins/generators/rules.txt
+++ b/doc/server/plugins/generators/rules.txt
@@ -46,6 +46,10 @@ Group membership may be negated.
Tag Attributes in Rules
=======================
+Running ``bcfg2-lint`` will check your configuration specification for
+the presence of any mandatory attributes that are necessary for the
+entry specified.
+
Rules Tag
---------
@@ -118,7 +122,14 @@ Service Tag
+------------+-------------------------------+---------------------------------------------------------+
| Name | Description | Values |
+============+===============================+=========================================================+
-| mode | Per Service Mode (New in 1.0) | (manual | default | supervised | interactive_only ) |
+| restart | Whether to restart the | ( true | false | interactive ) |
+| | service when the bundle | |
+| | changes (new in 1.3; replaces | |
+| | "mode" attribute) | |
++------------+-------------------------------+---------------------------------------------------------+
+| install | Whether to install the | ( true | false ) |
+| | service (new in 1.3; replaces | |
+| | "mode" attribute) | |
+------------+-------------------------------+---------------------------------------------------------+
| name | Service name or regular | String or regex |
| | expression | |
@@ -139,29 +150,33 @@ Service Tag
| | (Upstart services only) | |
+------------+-------------------------------+---------------------------------------------------------+
-Service mode descriptions
-^^^^^^^^^^^^^^^^^^^^^^^^^
-
-.. versionadded:: 1.0.0
-
-* manual
-
- * do not start/stop/restart this service
- * service installation is not performed
+Service mode specification
+^^^^^^^^^^^^^^^^^^^^^^^^^^
-* interactive_only
+.. versionadded:: 1.3.0
- * only attempt to start/stop/restart this service if the client is run interactively
- * service installation is performed
+In the 1.3.0 release, the "mode" attribute has been replaced by a pair
+of attributes, "restart" and "install," which control how a service is
+handled more granularly than the old "mode" attribute. The old "mode"
+attribute values are equivalent as follows:
-* default
++-----------------------------+------------------------------------------+
+| Mode attribute | Equivalent |
++=============================+==========================================+
+| ``mode="default"`` | ``restart="true" install="true"`` |
++-----------------------------+------------------------------------------+
+| ``mode="interactive_only"`` | ``restart="interactive" install="true"`` |
++-----------------------------+------------------------------------------+
+| ``mode="supervised"`` | ``restart="true" install="true"`` |
++-----------------------------+------------------------------------------+
+| ``mode="manual"`` | ``restart="false" install="false"`` |
++-----------------------------+------------------------------------------+
- * perform appropriate service operations
+The default is ``restart="true" install="true"``
-* supervised
-
- * default and ensure service is running (or stopped) when verification is performed
- * deprecates supervised='true'
+Previously, "supervised" could be used to start a service during the
+verification phase; this is no longer supported. Services that have
+been stopped on a client will be started during the install phase.
Service status descriptions
^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -206,6 +221,12 @@ The Path tag has different values depending on the *type* attribute of
the path specified in your configuration. Below is a set of tables which
describe the attributes available for various Path types.
+Note that ``secontext`` below expects a full context, not just the
+type. For instance, "``system_u:object_r:etc_t:s0``", not just
+``etc_t``. You can also specify "``__default__``", which will restore
+the context of the file to the default set by policy. See
+:ref:`server-selinux` for more information.
+
Attributes common to all Path tags:
+----------+---------------------------------------------------+-----------------+
@@ -218,46 +239,58 @@ Attributes common to all Path tags:
device
^^^^^^
-+----------+---------------------+-------------------+
-| Name | Description | Values |
-+==========+=====================+===================+
-| dev_type | Type of device | (block|char|fifo) |
-+----------+---------------------+-------------------+
-| owner | Device owner | String |
-+----------+---------------------+-------------------+
-| group | Device group | String |
-+----------+---------------------+-------------------+
-| major | Major number (block | integer |
-| | or char devices) | |
-+----------+---------------------+-------------------+
-| minor | Minor number (block | integer |
-| | or char devices) | |
-+----------+---------------------+-------------------+
++-----------+---------------------+-------------------+
+| Name | Description | Values |
++===========+=====================+===================+
+| dev_type | Type of device | (block|char|fifo) |
++-----------+---------------------+-------------------+
+| owner | Device owner | String |
++-----------+---------------------+-------------------+
+| group | Device group | String |
++-----------+---------------------+-------------------+
+| secontext | SELinux context | String |
++-----------+---------------------+-------------------+
+| major | Major number (block | integer |
+| | or char devices) | |
++-----------+---------------------+-------------------+
+| minor | Minor number (block | integer |
+| | or char devices) | |
++-----------+---------------------+-------------------+
directory
^^^^^^^^^
-+-------+------------------------------+------------+
-| Name | Description | Values |
-+=======+==============================+============+
-| perms | Permissions of the directory | String |
-+-------+------------------------------+------------+
-| owner | Owner of the directory | String |
-+-------+------------------------------+------------+
-| group | Group Owner of the directory | String |
-+-------+------------------------------+------------+
-| prune | prune unspecified entries | true|false |
-| | from the Directory | |
-+-------+------------------------------+------------+
++-----------+------------------------------+------------+
+| Name | Description | Values |
++===========+==============================+============+
+| perms | Permissions of the directory | String |
++-----------+------------------------------+------------+
+| owner | Owner of the directory | String |
++-----------+------------------------------+------------+
+| group | Group Owner of the directory | String |
++-----------+------------------------------+------------+
+| secontext | SELinux context | String |
++-----------+------------------------------+------------+
+| prune | prune unspecified entries | true|false |
+| | from the Directory | |
++-----------+------------------------------+------------+
hardlink
^^^^^^^^
-+------+----------------------+--------+
-| Name | Description | Values |
-+======+======================+========+
-| to | File to link to | String |
-+------+----------------------+--------+
++-----------+------------------------------+--------+
+| Name | Description | Values |
++===========+==============================+========+
+| to | File to link to | String |
++-----------+------------------------------+--------+
+| perms | Permissions of the directory | String |
++-----------+------------------------------+--------+
+| owner | Owner of the directory | String |
++-----------+------------------------------+--------+
+| group | Group Owner of the directory | String |
++-----------+------------------------------+--------+
+| secontext | SELinux context | String |
++-----------+------------------------------+--------+
nonexistent
^^^^^^^^^^^
@@ -274,15 +307,17 @@ nonexistent
permissions
^^^^^^^^^^^
-+-------+--------------------------+--------+
-| Name | Description | Values |
-+=======+==========================+========+
-| perms | Permissions of the file. | String |
-+-------+--------------------------+--------+
-| owner | Owner of the file. | String |
-+-------+--------------------------+--------+
-| group | Group of the file. | String |
-+-------+--------------------------+--------+
++-----------+--------------------------+--------+
+| Name | Description | Values |
++===========+==========================+========+
+| perms | Permissions of the file. | String |
++-----------+--------------------------+--------+
+| owner | Owner of the file. | String |
++-----------+--------------------------+--------+
+| group | Group of the file. | String |
++-----------+--------------------------+--------+
+| secontext | SELinux context | String |
++-----------+--------------------------+--------+
symlink
^^^^^^^
@@ -293,6 +328,205 @@ symlink
| to | File to link to | String |
+------+----------------------+--------+
+.. _server-plugins-generators-rules-acls:
+
+ACLs
+^^^^
+
+.. versionadded:: 1.3.0
+
+ACLs on a Path entry are specified not by attributes on the tag but by
+child ``<ACL>`` tags. For instance:
+
+.. code-block:: xml
+
+ <Path name="/etc/foo" type="directory" owner="root" group="root"
+ perms="0775">
+ <ACL type="default" scope="user" user="foouser" perms="rw"/>
+ <ACL type="default" scope="group" group="users" perms="rx"/>
+ </Path>
+
+The ACL tag has the following attributes:
+
++-------+---------------------------------------------------+----------------+
+| Name | Description | Values |
++=======+===================================================+================+
+| type | ACL type | default|access |
++-------+---------------------------------------------------+----------------+
+| scope | ACL scope | user|group |
++-------+---------------------------------------------------+----------------+
+| user | User the ACL applies to ``(with scope="user"``) | String |
++-------+---------------------------------------------------+----------------+
+| group | Group the ACL applies to ``(with scope="group"``) | String |
++-------+---------------------------------------------------+----------------+
+| perms | Permissions for the ACL | See below |
++-------+---------------------------------------------------+----------------+
+
+The ``perms`` attribute can either be a single octal digit (e.g.,
+``6`` would indicate read and write, but not execute), or a symbolic
+mode including 'r', 'w', and 'x'. You can include '-' for operations
+that are not permitted, but it's not required. I.e., all of the
+following are identical::
+
+ perms="5"
+ perms="rx"
+ perms="r-x"
+
+It is not currently possible to manually set an effective rights mask;
+the mask will be automatically calculated from the given ACLs when
+they are applied.
+
+Note that it is possible to set ACLs that demand different permissions
+on a file than those specified in the ``perms`` attribute on the
+``Path`` tag. For instance:
+
+.. code-block:: xml
+
+ <Path name="/etc/foo" perms="0644" group="root" owner="root">
+ <ACL type="access" scope="user" user="foouser" perms="rwx"/>
+ </Path>
+
+In this case, we've specified permissions of ``0644``, but the
+effective rights mask will be "rwx," so setting the ACL will change
+the permissions to ``0674``. When this happens, Bcfg2 will change the
+permissions and set the ACLs on every run and the entry will be
+eternally marked as bad.
+
+SELinux Tag
+-----------
+
+The SELinux tag has different values depending on the *type* attribute
+of the SELinux entry specified in your configuration. Below is a set
+of tables which describe the attributes available for various SELinux
+types. The types (except for ``module``) correspond to ``semanage``
+subcommands.
+
+Note that the ``selinuxtype`` attribute takes only an SELinux type,
+not a full context; e.g., "``etc_t``", not
+"``system_u:object_r:etc_t:s0``".
+
+As it can be very tedious to create a baseline of all existing SELinux
+entries, you can use ``selinux_baseline.py`` located in the ``tools/``
+directory to do that for you.
+
+In certain cases, it may be necessary to create multiple SELinux
+entries with the same name. For instance, "root" is both an SELinux
+user and an SELinux login record; or a given fcontext may need two
+different SELinux types depending on whether it's a symlink or a plain
+file. In these (few) cases, it is necessary to create BoundSELinux
+entries directly in Bundler rather than using abstract SELinux entries
+in Bundler and binding them with Rules.
+
+See :ref:`server-selinux` for more information.
+
+boolean
+^^^^^^^
+
++-------+----------------------+---------+----------+
+| Name | Description | Values | Required |
++=======+======================+=========+==========+
+| name | Name of the boolean | String | Yes |
++-------+----------------------+---------+----------+
+| value | Value of the boolean | on|off | Yes |
++-------+----------------------+---------+----------+
+
+port
+^^^^
+
++-------------+------------------------+---------------------------+----------+
+| Name | Description | Values | Required |
++=============+========================+===========================+==========+
+| name | Port number or range | ``<port>/<proto>`` or | Yes |
+| | and protocol (tcp|udp) | ``<start>-<end>/<proto>`` | |
++-------------+------------------------+---------------------------+----------+
+| selinuxtype | SELinux type to apply | String | Yes |
+| | to this port | | |
++-------------+------------------------+---------------------------+----------+
+
+fcontext
+^^^^^^^^
+
++-------------+-------------------------+---------------------+----------+
+| Name | Description | Values | Required |
++=============+=========================+=====================+==========+
+| name | File specification | String | Yes |
++-------------+-------------------------+---------------------+----------+
+| selinuxtype | SELinux type to apply | String | Yes |
+| | to files matching this | | |
+| | specification | | |
++-------------+-------------------------+---------------------+----------+
+| filetype | File type to match. | (regular|directory| | No |
+| | Default: all | symlink|pipe|all| | |
+| | | socket|block|char) | |
++-------------+-------------------------+---------------------+----------+
+
+node
+^^^^
+
++-------------+------------------------------------+------------------+----------+
+| Name | Description | Values | Required |
++=============+====================================+==================+==========+
+| name | IP address and netmask of node. | <addr>/<netmask> | Yes |
+| | Netmask can be numeric (/16) or | | |
+| | dotted-quad (/255.255.0.0) | | |
++-------------+------------------------------------+------------------+----------+
+| selinuxtype | SELinux type to apply to this node | String | Yes |
++-------------+------------------------------------+------------------+----------+
+| proto | Protocol | (ipv4|ipv6) | Yes |
++-------------+------------------------------------+------------------+----------+
+| netmask | Netmask | String | Yes |
++-------------+------------------------------------+------------------+----------+
+
+login
+^^^^^
+
++-------------+-------------------------------+-----------+----------+
+| Name | Description | Values | Required |
++=============+===============================+===========+==========+
+| name | Unix username | String | Yes |
++-------------+-------------------------------+-----------+----------+
+| selinuxuser | SELinux username | String | Yes |
++-------------+-------------------------------+-----------+----------+
+
+user
+^^^^
+
++-------------+-------------------------------+-----------+----------+
+| Name | Description | Values | Required |
++=============+===============================+===========+==========+
+| name | SELinux username | String | Yes |
++-------------+-------------------------------+-----------+----------+
+| roles | Space-separated list of roles | String | No |
++-------------+-------------------------------+-----------+----------+
+| prefix | Home directory context prefix | String | No |
++-------------+-------------------------------+-----------+----------+
+
+interface
+^^^^^^^^^
+
++-------------+-------------------------+-------------+----------+
+| Name | Description | Values | Required |
++=============+=========================+=============+==========+
+| name | Interface name | String | Yes |
++-------------+-------------------------+-------------+----------+
+| selinuxtype | SELinux type to apply | String | Yes |
+| | to this interface | | |
++-------------+-------------------------+-------------+----------+
+
+permissive
+^^^^^^^^^^
+
++-------------+------------------------------------+-------------+----------+
+| Name | Description | Values | Required |
++=============+====================================+=============+==========+
+| name | SELinux type to make permissive | String | Yes |
++-------------+------------------------------------+-------------+----------+
+
+module
+^^^^^^
+
+See :ref:`server-plugins-generators-semodules`
+
Rules Directory
===============
@@ -356,14 +590,11 @@ Using Regular Expressions in Rules
If you wish, you can configure the Rules plugin to support regular
expressions. This entails a small performance and memory usage
-penalty. To do so, create a file, "Rules/rules.conf", and add the
-following text::
+penalty. To do so, add the following setting to ``bcfg2.conf``::
[rules]
regex = yes
-You will have to restart the Bcfg2 server after making that change.
-
With regular expressions enabled, you can use a regex in the ``name``
attribute to match multiple abstract configuration entries.
@@ -372,4 +603,4 @@ name="bcfg2".../>`` will *not* match a Service named ``bcfg2-server``;
you'd have to explicitly specify ``<Service name="bcfg2.*".../>``.
Note that only one Rule can apply to any abstract entry, so you cannot
-specify multiple regexs to match the same rule.
+specify multiple regexes to match the same rule.
diff --git a/doc/server/plugins/generators/semodules.txt b/doc/server/plugins/generators/semodules.txt
new file mode 100644
index 000000000..9a6bbaefd
--- /dev/null
+++ b/doc/server/plugins/generators/semodules.txt
@@ -0,0 +1,66 @@
+.. -*- mode: rst -*-
+
+.. _server-plugins-generators-semodules:
+
+=========
+SEModules
+=========
+
+.. versionadded:: 1.3.0
+
+The SEModules plugin handles SELinux module entries. It supports
+group- and host-specific module versions, and enabling/disabling
+modules.
+
+You can use ``selinux_baseline.py`` located in the tools/ directory to
+create a baseline of all of your installed modules.
+
+See :ref:`server-selinux` for more information.
+
+Usage
+=====
+
+To use the SEModules plugin, first do ``mkdir
+/var/lib/bcfg2/SEModules``. Add ``SEModules`` to your ``plugins``
+line in ``/etc/bcfg2.conf`` and restart bcfg2-server.
+
+The SEModules directory contains modules in a layout similar to the
+Cfg plugin: at the top level, SEModules should contain directories
+named after the modules you want to install, and each of those
+directories can contain a global module, plus any number of group- and
+host-specific modules. For instance::
+
+ $ ls -F SEModules
+ foo.pp/ bar.pp/
+ $ ls SEModules/foo.pp/
+ foo.pp
+ foo.pp.G50_server
+ foo.pp.H_baz.example.com
+
+For more information on this directory layout, see
+:ref:`server-plugins-generators-cfg`.
+
+Entries
+=======
+
+SEModules handles ``<SELinux>`` entries with the ``module`` type. For
+instance:
+
+.. code-block:: xml
+
+ <Bundle name="foo">
+ <SELinux type="module" name="foo.pp"/>
+ </Bundle>
+
+The ``.pp`` extension is optional.
+
+.. note::
+
+ If you use a ``BoundSELinux`` tag, you must *not* include the
+ ``.pp`` extension. This is not recommend, though.
+
+You can also install a disabled module:
+
+.. code-block:: xml
+
+ <SELinux type="module" name="foo" disabled="true"/>
diff --git a/doc/server/plugins/generators/sshbase.txt b/doc/server/plugins/generators/sshbase.txt
index e6d51a335..b62a4a454 100644
--- a/doc/server/plugins/generators/sshbase.txt
+++ b/doc/server/plugins/generators/sshbase.txt
@@ -125,8 +125,8 @@ Permissions and Metadata
.. versionadded:: 1.2.0
-SSHbase supports use of an :ref:`info.xml <server-info-info-xml>` file
-to control the permissions and other metadata for the keys and
+SSHbase supports use of an :ref:`info.xml <server-info>` file to
+control the permissions and other metadata for the keys and
``ssh_known_hosts`` file. You can use the ``<Path>`` directive in
``info.xml`` to change the metadata for different keys, e.g.::
diff --git a/doc/server/plugins/generators/sslca.txt b/doc/server/plugins/generators/sslca.txt
index 8e33148cb..d2b051535 100644
--- a/doc/server/plugins/generators/sslca.txt
+++ b/doc/server/plugins/generators/sslca.txt
@@ -33,7 +33,7 @@ must contain full (not relative) paths.
#. Add SSLCA to the **plugins** line in ``/etc/bcfg2.conf`` and restart the
server -- This enabled the SSLCA plugin on the Bcfg2 server.
-#. Add a section to your ``/etc/bcfg2.conf`` called sslca_foo, replacing foo
+#. Add a section to your ``/etc/bcfg2.conf`` called ``sslca_foo``, replacing foo
with the name you wish to give your CA so you can reference it in certificate
definitions.
@@ -51,6 +51,12 @@ must contain full (not relative) paths.
specification. If you're using a self signing CA this would be the CA cert
that you generated.
+#. Optionally, add ``verify_certs = false`` if you don't wish to
+ perform certificate verification on the certs SSLCA generates.
+ Verification includes ``openssl verify`` to verify the CA chain,
+ and ensuring that both the key file and certificate file contain
+ the same key.
+
#. Once all this is done, you should have a section in your ``/etc/bcfg2.conf``
that looks similar to the following::
diff --git a/doc/server/plugins/generators/tgenshi/clientsxml.txt b/doc/server/plugins/generators/tgenshi/clientsxml.txt
index 7a8d1fcc4..87d6d728a 100644
--- a/doc/server/plugins/generators/tgenshi/clientsxml.txt
+++ b/doc/server/plugins/generators/tgenshi/clientsxml.txt
@@ -65,7 +65,7 @@ Possible improvements:
name="${name}"
uuid="${name}"
password="${metadata.Properties['passwords.xml'].xdata.find('password').find('bcfg2-client').find(name).text}"
- location="floating"
+ floating="true"
secure="true"
/>\
{% end %}\
diff --git a/doc/server/plugins/generators/tgenshi/index.txt b/doc/server/plugins/generators/tgenshi/index.txt
index 21ef8f17f..0d4a7ffb0 100644
--- a/doc/server/plugins/generators/tgenshi/index.txt
+++ b/doc/server/plugins/generators/tgenshi/index.txt
@@ -17,7 +17,7 @@ on the client in the created files.
To begin, you will need to download and install the Genshi templating engine.
-To install on CentOS or RHEL 5, run::
+To install on CentOS or RHEL, run::
sudo yum install python-genshi
diff --git a/doc/server/plugins/grouping/metadata.txt b/doc/server/plugins/grouping/metadata.txt
index 305857578..88bb0c460 100644
--- a/doc/server/plugins/grouping/metadata.txt
+++ b/doc/server/plugins/grouping/metadata.txt
@@ -6,11 +6,11 @@
Metadata
========
-The metadata mechanism has two types of information, client metadata and
-group metadata. The client metadata describes which top level group a
-client is associated with.The group metadata describes groups in terms
-of what bundles and other groups they include. Each aspect grouping
-and clients' memberships are reflected in the ``Metadata/groups.xml`` and
+The metadata mechanism has two types of information, client metadata
+and group metadata. The client metadata describes which top level
+group a client is associated with.The group metadata describes groups
+in terms of what bundles and other groups they include. Group data and
+clients' memberships are reflected in the ``Metadata/groups.xml`` and
``Metadata/clients.xml`` files, respectively.
Usage of Groups in Metadata
@@ -37,11 +37,11 @@ describe one host. A sample file is below:
.. code-block:: xml
<Clients version="3.0">
- <Client profile="backup-server" pingable="Y" pingtime="0" name="backup.example.com"/>
- <Client profile="console-server" pingable="Y" pingtime="0" name="con.example.com"/>
- <Client profile="kerberos-master" pingable="Y" pingtime="0" name="kdc.example.com"/>
- <Client profile="mail-server" pingable="Y" pingtime="0" name="mail.example.com"/>
- <Client name='foo' address='10.0.0.1' pingable='N' pingtime='-1'>
+ <Client profile="backup-server" name="backup.example.com"/>
+ <Client profile="console-server" name="con.example.com"/>
+ <Client profile="kerberos-master" name="kdc.example.com"/>
+ <Client profile="mail-server" name="mail.example.com"/>
+ <Client name='foo' address='10.0.0.1'>
<Alias name='foo-mgmt' address='10.1.0.1'/>
</Client>
</Clients>
@@ -85,20 +85,14 @@ Additionally, the following properties can be specified:
| address | Establishes an extra IP address that | ip address |
| | resolves to this client. | |
+----------+----------------------------------------+----------------+
-| location | Requires requests to come from an IP | fixed|floating |
-| | address that matches the client | |
-| | record. | |
+| floating | Allows requests to come from any IP, | true|false |
+| | rather than requiring requests to come | |
+| | from an IP associated with the client | |
+----------+----------------------------------------+----------------+
| password | Establishes a per-node password that | String |
| | can be used instead of the global | |
| | password. | |
+----------+----------------------------------------+----------------+
-| pingable | If the client is pingable (deprecated; | Y|N |
-| | for old reporting system) | |
-+----------+----------------------------------------+----------------+
-| pingtime | Last time the client was pingable | String |
-| | (deprecated; for old reporting system) | |
-+----------+----------------------------------------+----------------+
| secure | Requires the use of the per-client | true|false |
| | password for this client. | |
+----------+----------------------------------------+----------------+
@@ -107,9 +101,50 @@ Additionally, the following properties can be specified:
| | resolution. | |
+----------+----------------------------------------+----------------+
+Floating can also be configured by setting ``location="floating"``,
+but that is deprecated.
+
For detailed information on client authentication see
:ref:`appendix-guides-authentication`
+================
+Clients Database
+================
+
+.. versionadded:: 1.3.0
+
+It is also possible to store client records in a database rather than
+writing back to ``clients.xml``. This provides several advantages:
+
+* ``clients.xml`` will never be written by the server, removing an
+ area of contention between the user and server.
+* ``clients.xml`` can be removed entirely for many sites.
+* The Bcfg2 client list can be queried by other machines without
+ obtaining and parsing ``clients.xml``.
+* A single client list can be shared amongst multiple Bcfg2 servers.
+
+In general, storing clients in the database works almost the same as
+``clients.xml``. ``groups.xml`` is parsed identically. If
+``clients.xml`` is present, it is parsed, but ``<Client>`` tags in
+``clients.xml`` *do not* assert client existence; they are only used
+to set client options *if* the client exists (in the database). That
+is, the two purposes of ``clients.xml`` -- to track which clients
+exist, and to set client options -- have been separated.
+
+With the improvements in ``groups.xml`` parsing in 1.3, client groups
+can now be set directly in ``groups.xml`` with ``<Client>`` tags. (See
+:ref:`metadata-client-tag` for more details.) As a result,
+``clients.xml`` is only necessary if you need to set
+options (e.g., aliases, floating clients, per-client passwords, etc.)
+on clients.
+
+To use the database backend instead of ``clients.xml``, set
+``use_database`` in the ``[metadata]`` section of ``bcfg2.conf`` to
+``true``. You will also need to configure the :ref:`Global Server
+Database Settings <server-database>`.
+
+The ``clients.xml``-based model remains the default.
+
Metadata/groups.xml
===================
@@ -118,31 +153,88 @@ definitions. Here's a simple ``Metadata/groups.xml`` file:
.. code-block:: xml
- <Groups version='3.0'>
+ <Groups>
<Group name='mail-server' profile='true'
- public='false'
comment='Top level mail server group' >
<Bundle name='mail-server'/>
<Bundle name='mailman-server'/>
<Group name='apache-server'/>
- <Group name='rhel-as-5-x86'/>
<Group name='nfs-client'/>
<Group name='server'/>
+ <Group name='rhel5'>
+ <Group name='sendmail-server'/>
+ </Group>
+ <Group name='rhel6'>
+ <Group name='postfix-server'/>
+ </Group>
</Group>
- <Group name='rhel-as-5-x86'>
- <Group name='rhel'/>
+ <Group name='rhel'>
+ <Group name='selinux-enabled'/>
</Group>
- <Group name='apache-server'/>
- <Group name='nfs-client'/>
- <Group name='server'/>
- <Group name='rhel'/>
+ <Group name='oracle-server'>
+ <Group name='selinux-enabled' negate='true'/>
+ </Group>
+ <Client name='foo.eample.com'>
+ <Group name='oracle-server'/>
+ <Group name='apache-server'/>
+ </Client>
</Groups>
+A Group or Client tag that does not contain any child tags is a
+declaration of membership; a Group or Client tag that does contain
+children is a conditional. So the example above does not assign
+either the ``rhel5`` or ``rhel6`` groups to machines in the
+``mail-server`` group, but conditionally assigns the
+``sendmail-server`` or ``postfix-server`` groups depending on the OS
+of the client. (Presumably in this example the OS groups are set by a
+probe.)
+
+Consequently, a client that is RHEL 5 and a member of the
+``mail-server`` profile group would also be a member of the
+``apache-server``, ``nfs-client``, ``server``, and ``sendmail-server``
+groups; a RHEL 6 client that is a member of the ``mail-server``
+profile group would be a member of the ``apache-server``,
+``nfs-client``, ``server``, and ``postfix-server`` groups.
+
+Client tags in ``groups.xml`` allow you to supplement the profile
+group declarations in ``clients.xml`` and/or client group assignments
+with the :ref:`server-plugins-grouping-grouppatterns` plugin. They
+should be used sparingly. (They are more useful when you are using
+the database backend for client records.)
+
+You can also declare that a group should be negated; this allows you
+to set defaults and override them efficiently. Negation is applied
+after other group memberships are calculated, so it doesn't matter how
+many times a client is assigned to a group or how many times it is
+negated; a single group negation is sufficient to remove a client from
+that group. For instance, in the following example,
+``foo.example.com`` is **not** a member of ``selinux-enabled``, even
+though it is a member of the ``foo-server`` and ``every-server``
+groups:
-Nested/chained groups definitions are conjunctive (logical and). For
-instance, in the above example, a client associated with the Profile
-Group ``mail-server`` is also a member of the ``apache-server``,
-``rhel-as-5-x86``, ``nfs-client``, ``server``, and ``rhel`` groups.
+.. code-block:: xml
+
+ <Groups>
+ <Group name="foo-server">
+ <Group name="apache-server"/>
+ <Group name="selinux-enabled"/>
+ </Group>
+ <Group name="apache-server">
+ <Group name="selinux-enabled"/>
+ </Group>
+ <Group name="every-server">
+ <Group name="selinux-enabled"/>
+ </Group>
+ <Client name="foo.example.com">
+ <Group name="selinux-enabled" negate="true"/>
+ </Client>
+
+.. note::
+
+ Nested Group conditionals, Client tags, and negated Group tags are
+ all new in 1.3.0.
+
+Order of ``groups.xml`` does not matter.
Groups describe clients in terms for abstract, disjoint aspects. Groups
can be combined to form complex descriptions of clients that use
@@ -171,33 +263,62 @@ Metadata Group Tag
The Group Tag has the following possible attributes:
-+----------+------------------------------------------+--------------+
-| Name | Description | Values |
-+==========+==========================================+==============+
-| name | Name of the group | String |
-+----------+------------------------------------------+--------------+
-| profile | If a client can be directly associated | True|False |
-| | with this group | |
-+----------+------------------------------------------+--------------+
-| public | If a client can freely associate itself | True|False |
-| | with this group. For use with the | |
-| | *bcfg2 -p* option on the client. | |
-+----------+------------------------------------------+--------------+
-| category | A group can only contain one instance of | String |
-| | a group in any one category. This | |
-| | provides the basis for representing | |
-| | groups which are conjugates of one | |
-| | another in a rigorous way. It also | |
-| | provides the basis for negation. | |
-+----------+------------------------------------------+--------------+
-| default | Set as the profile to use for clients | True|False |
-| | that are not associated with a profile | |
-| | in ``clients.xml`` | |
-+----------+------------------------------------------+--------------+
-| comment | English text description of group | String |
-+----------+------------------------------------------+--------------+
-
-Groups can also contain other groups and bundles.
++----------+----------------------------------------------+--------------+
+| Name | Description | Values |
++==========+==============================================+==============+
+| name | Name of the group | String |
++----------+----------------------------------------------+--------------+
+| profile | If a client can be directly associated with | True|False |
+| | this group | |
++----------+----------------------------------------------+--------------+
+| public | If a client can freely associate itself with | True|False |
+| | this group. For use with the ``bcfg2 -p`` | |
+| | option on the client. | |
++----------+----------------------------------------------+--------------+
+| category | A group can only contain one instance of a | String |
+| | group in any one category. This provides the | |
+| | basis for representing groups which are | |
+| | conjugates of one another in a rigorous way. | |
++----------+----------------------------------------------+--------------+
+| default | Set as the profile to use for clients that | True|False |
+| | are not associated with a profile in | |
+| | ``clients.xml`` | |
++----------+----------------------------------------------+--------------+
+| comment | English text description of group | String |
++----------+----------------------------------------------+--------------+
+| negate | When used as a conditional, only apply the | True|False |
+| | children if the named group does not match. | |
+| | When used as a declaration, do not apply | |
+| | the named group to matching clients. | |
++----------+----------------------------------------------+--------------+
+
+The ``profile``, ``public``, ``category``, ``default``, and
+``comment`` attributes are only parsed if a Group tag either a) is the
+direct child of a Groups tag (i.e., at the top level of an XML file);
+or b) has no children. This matches legacy behavior in Bcfg2 1.2 and
+earlier.
+
+Groups can also contain other groups, clients, and bundles.
+
+.. _metadata-client-tag:
+
+Metadata Client Tag
+-------------------
+
+The Client Tag has the following possible attributes:
+
++----------+-----------------------------------------------+--------------+
+| Name | Description | Values |
++==========+===============================================+==============+
+| name | Name of the client | String |
++----------+-----------------------------------------------+--------------+
+| negate | Only apply the child tags if the named client | True|False |
+| | does not match. | |
++----------+-----------------------------------------------+--------------+
+
+Clients can also contain groups, other clients (although that's likely
+nonsensical), and bundles.
+
Use of XInclude
===============
diff --git a/doc/server/plugins/misc/trigger.txt b/doc/server/plugins/misc/trigger.txt
index 7547f2fdd..224b7444b 100644
--- a/doc/server/plugins/misc/trigger.txt
+++ b/doc/server/plugins/misc/trigger.txt
@@ -6,8 +6,8 @@
Trigger
=======
-Trigger is a plugin that calls external scripts (on the server) when
-clients are configured.
+Trigger is a plugin that calls external scripts (on the server) at the
+end of each client run.
Setup
=====
diff --git a/doc/server/plugins/plugin-roles.txt b/doc/server/plugins/plugin-roles.txt
index 2ce7e21ff..b14323952 100644
--- a/doc/server/plugins/plugin-roles.txt
+++ b/doc/server/plugins/plugin-roles.txt
@@ -6,124 +6,62 @@
Plugin Roles
============
-This documents available plugin roles.
+* Metadata
-1. list of plugin roles
+ * Initial metadata construction
+ * Connector data accumulation
+ * ClientMetadata instance delivery
+ * Introspection interface (for bcfg2-info & co)
- +---------------+--------------------+--------+
- | Role | Class | Status |
- +===============+====================+========+
- | Metadata | Metadata | done |
- +---------------+--------------------+--------+
- | Connector | Connector | done |
- +---------------+--------------------+--------+
- | Probing | Probing | done |
- +---------------+--------------------+--------+
- | Structure | Structure | done |
- +---------------+--------------------+--------+
- | Structure Val | StructureValidator | done |
- +---------------+--------------------+--------+
- | Generator | Generator | done |
- +---------------+--------------------+--------+
- | Goals Val | GoalValidator | done |
- +---------------+--------------------+--------+
- | Statistics | Statistics | done |
- +---------------+--------------------+--------+
- | Pull Source | PullSource | done |
- +---------------+--------------------+--------+
- | Pull Target | PullTarget | done |
- +---------------+--------------------+--------+
- | Version | Version | done |
- +---------------+--------------------+--------+
- | Decision | Decision | done |
- +---------------+--------------------+--------+
- | Remote | Remote | none |
- +---------------+--------------------+--------+
- | Syncing | Syncing | none |
- +---------------+--------------------+--------+
+* Connector
-2. Plugin Capabilities
+ * Provide additional data for ClientMetadata instances
- * Metadata
+* Probing
- * Initial metadata construction
- * Connector data accumulation
- * ClientMetadata instance delivery
- * Introspection interface (for bcfg2-info & co)
+ * send executable probes to clients and receive data responses
- * Connector
+* Structure
- * Provide additional data for ClientMetadata instances
+ * Produce a list of configuration entries that should be included in
+ client configurations
+ * Each structure plugin is produces a list of structures
+ * Core verifies that each bundle listed has been constructed
- * Probing
+* StructureValidator
- * send executable probes to clients and receive data responses
+ * Validate a client entry list's internal consistency, modifying if
+ needed
- * Structure
+* Generator
- * Produce a list of configuration entries that should be included in client configurations
- * Each structure plugin is produces a list of structures
- * Core verifies that each bundle listed has been constructed
+* GoalValidator
- * Structure Validation
+ * Validate client goals, modifying if needed
- * Validate a client entry list's internal consistency, modifying if needed
+* PullSource
- * Generator
- * Goals Validation
+ * Plugin can provide entry information about clients
- * Validate client goals, modifying if needed
+* PullTarget
- * Pull Source
+ * Plugin can accept entry data and merge it into the specification
- * Plugin can provide entry information about clients
+* Version
- * Pull Target
+ * Plugin can read revision information from VCS of choice
+ * Will provide an interface for producing commits made by the bcfg2-server
- * Plugin can accept entry data and merge it into the specification
+* Decision
- * Version
+* ClientRunHooks
- * Plugin can read revision information from VCS of choice
- * Will provide an interface for producing commits made by the bcfg2-server
+ * Provides hooks executed at the start and end of each client run
- * Decision
+Configuration of plugins
+========================
-3. Configuration of plugins
-
- Plugin configuration will be simplified substantially. Now, a single
- list of plugins (including plugins of all capabilities) is specified
- upon startup (either via bcfg2.conf or equivalent). This mechanism
- replaces the current split configuration mechanism where generators,
- structures, and other plugins are listed independently. Instead, all
- plugins included in the startup list will be initialized, and each
- will be enabled in all roles that it supports. This will remove a
- current source of confusion and potential configuration errors,
- wherein a plugin is enabled for an improper set of goals. (ie Cfg
- enabled as a structure, etc) This does remove the possibility of
- partially enabling a plugin for one of its roles without activating it
- across the board, but I think this is a corner case, which will be
- poorly supported by plugin implementers. If needed, this use case can
- be explicitly supported by the plugin author, through use of a config
- file directive.
-
-4. User Visible Changes
-
- Connector data is added to ClientMetadata instances using the name of
- the connector plugin. This means that the dictionary of key/val probe
- pairs included with metadata is now available as metadata.Probes
- (instead of metadata.probes). Once properties are available the same
- way, they will likewise change names to metadata.Properties from their
- current name.
-
- Plugin configuration will change. A single field "plugins" in
- bcfg2.conf will supercede the combination of the "generators" and
- "structures" fields.
-
- Default loading of needed plugins is now explicit; this means that
- Statistics (if used) should be listed in the plugins line of
- bcfg2.conf.
-
-5. Notes
-
- * Need to ensure bundle accumulation occurs with connector groups
+A single list of plugins (including plugins of all capabilities) is
+specified upon startup (either via bcfg2.conf or equivalent). All
+plugins included in the startup list are initialized, and each is
+enabled in all roles that it supports.
diff --git a/doc/server/plugins/probes/fileprobes.txt b/doc/server/plugins/probes/fileprobes.txt
new file mode 100644
index 000000000..0baec2c59
--- /dev/null
+++ b/doc/server/plugins/probes/fileprobes.txt
@@ -0,0 +1,57 @@
+.. _server-plugins-probes-fileprobes:
+
+==========
+FileProbes
+==========
+
+The FileProbes plugin allows you to probe a client for a file,
+which is then added to the :ref:`server-plugins-generators-cfg`
+specification. If the file changes on the client, FileProbes can
+either update it in the specification or allow Cfg to replace it.
+
+FileProbes will not probe a file if there's already a file in Cfg that
+will apply to the client. So if, for instance, you have a generic
+file in ``Cfg/etc/foo.conf/foo.conf`` that applies to all hosts,
+FileProbes will not retrieve ``/etc/foo.conf`` from the client (unless
+``update`` is enabled; see Configuration_ below).
+
+When a new config file is first probed, an ``info.xml`` file is also
+written to enforce the permissions from that client. Subsequent
+probes from other clients will not modify or overwrite the data in
+``info.xml``. (This ensures that any manual changes you make to
+``info.xml`` for that file are not circumvented.)
+
+Configuration
+=============
+
+FileProbes is configured in ``FileProbes/config.xml``, which might
+look something like:
+
+.. code-block:: xml
+
+ <FileProbes>
+ <FileProbe name="/etc/foo.conf"/>
+ <Group name="blah-servers">
+ <FileProbe name="/etc/blah.conf" update="true"/>
+ </Group>
+ <Client name="bar.example.com">
+ <FileProbe name="/var/lib/bar.gz" encoding="base64"/>
+ </Client>
+ </FileProbes>
+
+This will result in ``/etc/foo.conf`` being retrieved from all
+clients; if it changes on a client, it will be overwritten by the
+version that was retrieved initially.
+
+Clients in the ``blah-servers`` group will be probed for
+``/etc/blah.conf``; if it changes on a client, those changes will be
+written into the Bcfg2 specification. If the file is deleted from a
+client, it will be rewritten from Bcfg2.
+
+``bar.example.com`` will be probed for ``/var/lib/bar.gz``, which
+contains non-ASCII characters and so needs to use base64 encoding when
+transferring the file.
+
+The paths probed by FileProbes must also be included as Path entries
+in your bundles in order to be handled properly by Cfg. Permissions
+are handled as usual, with ``info.xml`` files in Cfg.
diff --git a/doc/server/plugins/probes/index.txt b/doc/server/plugins/probes/index.txt
index 95aa2d0ce..26c656374 100644
--- a/doc/server/plugins/probes/index.txt
+++ b/doc/server/plugins/probes/index.txt
@@ -155,6 +155,39 @@ the client-specific one will be used.
If you want to to detect information about the client operating system,
the :ref:`server-plugins-probes-ohai` plugin can help.
+Data Storage
+============
+
+.. versionadded:: 1.3.0
+
+The Probes plugin stores the output of client probes locally on the
+Bcfg2 server in order to ensure that probe data and groups are
+available on server startup (rather than having to wait until all
+probes have run every time the server is restarted) and to
+:ref:`bcfg2-info <server-bcfg2-info>` and related tools. There are
+two options for storing this data: ``Probes/probed.xml``, a plain XML
+file stored in the Bcfg2 specification; or in a database.
+
+Advantages and disadvantages of using the database:
+
+* The database is easier to query from other machines, for instance if
+ you run ``bcfg2-info`` or ``bcfg2-test`` on a machine that is not
+ your Bcfg2 server.
+* The database allows multiple Bcfg2 servers to share probe data.
+* The database is likely to handle probe data writes (which happen on
+ every client run) more quickly, since it can only write the probes
+ whose data has changed.
+* The database is likely to handle probe data reads (which happen only
+ on server startup) more slowly, since it must query a database
+ rather than the local filesystem. Once the data has been read in
+ initially (from XML file or from the database) it is kept in memory.
+
+To use the database-backed storage model, set ``use_database`` in the
+``[probes]`` section of ``bcfg2.conf`` to ``true``. You will also
+need to configure the :ref:`server-database`.
+
+The file-based storage model is the default, although that is likely
+to change in future versions of Bcfg2.
Other examples
==============
@@ -170,64 +203,10 @@ Other examples
producttype
serial-console-speed
+Other Probing plugins
+=====================
+
.. toctree::
- :hidden:
ohai
-
-.. _server-plugins-probes-fileprobes:
-
-FileProbes
-==========
-
-The FileProbes plugin allows you to probe a client for a file,
-which is then added to the :ref:`server-plugins-generators-cfg`
-specification. If the file changes on the client, FileProbes can
-either update it in the specification or allow Cfg to replace it.
-
-FileProbes will not probe a file if there's already a file in Cfg that
-will apply to the client. So if, for instance, you have a generic
-file in ``Cfg/etc/foo.conf/foo.conf`` that applies to all hosts,
-FileProbes will not retrieve ``/etc/foo.conf`` from the client (unless
-``update`` is enabled; see Configuration_ below).
-
-When a new config file is first probed, an ``info.xml`` file is also
-written to enforce the permissions from that client. Subsequent
-probes from other clients will not modify or overwrite the data in
-``info.xml``. (This ensures that any manual changes you make to
-``info.xml`` for that file are not circumvented.)
-
-Configuration
--------------
-
-FileProbes is configured in ``FileProbes/config.xml``, which might
-look something like:
-
-.. code-block:: xml
-
- <FileProbes>
- <FileProbe name="/etc/foo.conf"/>
- <Group name="blah-servers">
- <FileProbe name="/etc/blah.conf" update="true"/>
- </Group>
- <Client name="bar.example.com">
- <FileProbe name="/var/lib/bar.gz" base64="true"/>
- </Client>
- </FileProbes>
-
-This will result in ``/etc/foo.conf`` being retrieved from all
-clients; if it changes on a client, it will be overwritten by the
-version that was retrieved initially.
-
-Clients in the ``blah-servers`` group will be probed for
-``/etc/blah.conf``; if it changes on a client, those changes will be
-written into the Bcfg2 specification. If the file is deleted from a
-client, it will be rewritten from Bcfg2.
-
-``bar.example.com`` will be probed for ``/var/lib/bar.gz``, which
-contains non-ASCII characters and so needs to use base64 encoding when
-transferring the file.
-
-The paths probed by FileProbes must also be included as Path entries
-in your bundles in order to be handled properly by Cfg. Permissions
-are handled as usual, with ``info.xml`` files in Cfg.
+ fileprobes
diff --git a/doc/server/selinux.txt b/doc/server/selinux.txt
new file mode 100644
index 000000000..0cbf0985e
--- /dev/null
+++ b/doc/server/selinux.txt
@@ -0,0 +1,97 @@
+.. -*- mode: rst -*-
+
+.. _server-selinux:
+
+=======
+SELinux
+=======
+
+.. versionadded:: 1.3.0
+
+Bcfg2 has the ability to handle the majority of SELinux entries with
+the ``SELinux`` entry type, which handles modules (with the
+:ref:`server-plugins-generators-semodules` plugin), file contexts,
+users and user mappings, permissive domains, nodes, and interfaces.
+In addition, ``info.xml`` files and most types of the ``Path`` tag can
+accept an ``secontext`` attribute to set the context of that entry.
+The full semantics of each configuration entry is documented with the
+:ref:`server-plugins-generators-rules` plugin.
+
+.. note:: The ``secontext`` attribute takes a *full* context,
+ e.g., "``system_u:object_r:etc_t:s0``"; the ``selinuxtype``
+ attribute always takes *only* an SELinux type, e.g.,
+ "``etc_t``". ``secontext`` (but not ``selinuxtype``) can
+ also accept the special value "``__default__``", which will
+ restore the context on the Path entry in question to the
+ default supplied by the SELinux policy.
+
+In its current version, the SELinux support in Bcfg2 is not sufficient
+to manage MCS/MLS policies.
+
+Extra Entries
+=============
+
+As it can be very tedious to create a baseline of all existing SELinux
+entries, you can use ``selinux_baseline.py`` located in the ``tools/``
+directory to do that for you.
+
+The actual definition of an "extra" entry actually depends on the
+version of SELinux available; the SELinux APIs have been extremely
+fluid, so many features available in newer versions are not available
+in older versions. Newer SELinux versions (e.g., in recent versions
+of Fedora) can be queried for only entries that have been locally
+modified; on these versions of SELinux, only locally modified entries
+will be considered extra. On older SELinux versions (e.g., on RHEL
+5), however, that functionality is missing, so *all* SELinux entries
+will be considered extra, making ``selinux_baseline.py`` quite
+necessary.
+
+``selinux_baseline.py`` writes a bundle to stdout that contains
+``BoundSELinux`` entries for the appropriate SELinux entities. It
+does this rather than separate Bundle/Rules files because of the
+:ref:`server-selinux-duplicate-entries` problem.
+
+.. _server-selinux-duplicate-entries:
+
+Duplicate Entries
+=================
+
+In certain cases, it may be necessary to create multiple SELinux
+entries with the same name. For instance, "root" is both an SELinux
+user and an SELinux login record, so to manage both, you would have
+the following in Bundler:
+
+.. code-block:: xml
+
+ <SELinux name="root"/>
+ <SELinux name="root"/>
+
+And in Rules:
+
+.. code-block:: xml
+
+ <SELinux type="login" selinuxuser="root" name="root"/>
+ <SELinux type="user" prefix="user" name="root"
+ roles="system_r sysadm_r user_r"/>
+
+But Rules has no way to tell which "root" is which, and you will get
+errors. In these cases, it is necessary to use ``BoundSELinux`` tags
+directly in Bundler. (See :ref:`boundentries` for more details on
+bound entries.) For instance:
+
+.. code-block:: xml
+
+ <BoundSELinux type="login" selinuxuser="root" name="root"/>
+ <BoundSELinux type="user" prefix="user" name="root"
+ roles="system_r sysadm_r user_r"/>
+
+It may also be necessary to use ``BoundSELinux`` tags if a single
+fcontext needs two different SELinux types depending on whether it's a
+symlink or a plain file. For instance:
+
+.. code-block:: xml
+
+ <BoundSELinux type="fcontext" filetype="symlink"
+ name="/etc/localtime" selinuxtype="etc_t"/>
+ <BoundSELinux type="fcontext" filetype="regular"
+ name="/etc/localtime" selinuxtype="locale_t"/>
diff --git a/examples/bcfg2.confHostbase b/examples/bcfg2.confHostbase
index b130cf4f7..c9420e34a 100644
--- a/examples/bcfg2.confHostbase
+++ b/examples/bcfg2.confHostbase
@@ -1,7 +1,6 @@
[server]
repository = /var/lib/bcfg2
-structures = Bundler,Base
-generators = SSHbase,Cfg,Pkgmgr,Svcmgr
+plugins = Bundler,Rules,Metadata,SSHbase,Cfg
[statistics]
sendmailpath = /usr/sbin/sendmail
diff --git a/gentoo/bcfg2-1.2.2.ebuild b/gentoo/bcfg2-1.3.0.ebuild
index c054446fe..68bfa24ea 100644
--- a/gentoo/bcfg2-1.2.2.ebuild
+++ b/gentoo/bcfg2-1.3.0.ebuild
@@ -1,14 +1,15 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-# $Header: /var/cvsroot/gentoo-x86/app-admin/bcfg2/bcfg2-1.2.0.ebuild,v 1.1 2011/12/28 07:56:20 xmw Exp $
+# $Header: $
+
+EAPI="4"
-EAPI="3"
PYTHON_DEPEND="2:2.6"
SUPPORT_PYTHON_ABIS="1"
# ssl module required.
RESTRICT_PYTHON_ABIS="2.4 2.5 3.*"
-inherit distutils
+inherit distutils eutils
DESCRIPTION="configuration management tool"
HOMEPAGE="http://bcfg2.org"
@@ -17,16 +18,17 @@ SRC_URI="ftp://ftp.mcs.anl.gov/pub/bcfg/${P}.tar.gz"
LICENSE="BSD"
SLOT="0"
KEYWORDS="~amd64 ~x86 ~amd64-linux ~x86-linux ~x64-solaris"
-IUSE="doc genshi server"
+IUSE="doc cheetah genshi server"
DEPEND="dev-python/setuptools
doc? ( dev-python/sphinx )"
RDEPEND="app-portage/gentoolkit
+ cheetah? ( dev-python/cheetah )
genshi? ( dev-python/genshi )
server? (
virtual/fam
dev-python/lxml
- dev-libs/libgamin[python] )"
+ || ( dev-python/pyinotify dev-libs/libgamin[python] ) )"
PYTHON_MODNAME="Bcfg2"
diff --git a/man/bcfg2-admin.8 b/man/bcfg2-admin.8
index 6607344d8..bed51ff9a 100644
--- a/man/bcfg2-admin.8
+++ b/man/bcfg2-admin.8
@@ -1,214 +1,260 @@
-.TH "bcfg2-admin" 8
-.SH NAME
-bcfg2-admin \- Perform repository administration tasks
-.SH SYNOPSIS
-.B bcfg2-admin
-.I [-C config-file]
-.I <mode>
-.I <mode args>
-.I <mode options>
-.SH DESCRIPTION
-.PP
-.B bcfg2-admin
-Perform Bcfg2 repository administration
-.SH OPTIONS
-.PP
-.B \-C <config-file>
-.RS
-Specify the location of the configuration file (if it is not in
-/etc/bcfg2.conf).
-.RE
-.SH MODES
-.PP
-.B init
-.RS
-Initialize a new repository (interactive).
-.RE
-.B backup
-.RS
-Create an archive of the whole Bcfg2 repository.
-.RE
-.B bundle <action>
-.RS
-Display details about the available bundles.
-.RE
-.B client <action> <client> [attribute=value]
-.RS
-Add, edit, or remove clients entries in metadata.
-.RE
-.B query [g=group] [p=profile] [-f output-file] [-n] [-c]
-.RS
-Search for clients based on group or profile.
-.RE
-.B compare <old> <new>
-.RS
-Compare two client configurations. Can be used to verify consistent
-behavior between releases. Determine differences between files or
-directories.
-.RE
-.B minestruct <client> [-f xml-file] [-g groups]
-.RS
-Build structure entries based on client statistics extra entries.
-.RE
-.B pull <client> <entry-type> <entry-name>
-.RS
-Install configuration information into repo based on client bad
-entries.
-.RE
-.B reports [init|load_stats|purge|scrub|update]
-.RS
-Interact with the dynamic reporting system.
-.RE
-.B snapshots [init|dump|query|reports]
-.RS
-Interact with the Snapshots database.
-.RE
-.B tidy
-.RS
-Remove unused files from repository.
-.RE
-.B viz [-H] [-b] [-k] [-o png-file]
-.RS
-Create a graphviz diagram of client, group and bundle information.
-.RE
-.SH BUNDLE OPTIONS
-.PP
-.B mode
-.RS
-List all available xml bundles 'list-xml' or for all available genshi
-bundles 'list-genshi'. 'show' provides an interactive dialog to get
-details about the available bundles.
-.RE
-.SH CLIENT OPTIONS
-.PP
-.B mode
-.RS
-Add a client 'add', delete a client 'del', or 'list' all client entries.
-.RE
-.B client
-.RS
-Specify the client's name.
-.RE
-.B attribute=value
-.RS
-Set attribute values when adding a new client. Allowed attributes
-are 'profile', 'uuid', 'password', 'location', 'secure', and 'address'.
-.RE
-.SH QUERY OPTIONS
-.PP
-.B g=group
-.RS
-Specify a group to search within.
-.RE
-.B p=profile
-.RS
-Specify a profile to search within.
-.RE
-.B \-f <output-file>
-.RS
-Write the results of the query to a file.
-.RE
-.B \-n
-.RS
-Print the results, one on each line.
-.RE
-.B \-c
-.RS
-Print the results, separated by commas.
-.RE
-.SH COMPARE OPTIONS
-.PP
-.B old
-.RS
-Specify the location of the old configuration file.
-.RE
-.B new
-.RS
-Specify the location of the new configuration file.
-.RE
-.SH MINESTRUCT OPTIONS
-.PP
-.B client
-.RS
-Client whose metadata is to be searched for extra entries.
-.RE
-.B \-g <groups>
-.RS
-Hierarchy of groups in which to place the extra entries in.
-.RE
-.B \-f <xml-output-file>
-.RS
-Specify the xml file in which to write the extra entries.
-.RE
-.SH PULL OPTIONS
-.PP
-.B client
-.RS
-Specify the name of the client to search for.
-.RE
-.B entry-type
-.RS
-Specify the type of the entry to pull.
-.RE
-.B entry-name
-.RS
-Specify the name of the entry to pull.
-.RE
-.SH REPORTS OPTIONS
-.PP
-.B init
-.RS
-Initialize the database.
-.RE
-.B load_stats [-s] [-c] [-03]
-.RS
-Load statistics data.
-.RE
-.B purge [--client [n]] [--days [n]] [--expired]
-.RS
-Purge historic and expired data.
-.RE
-.B scrub
-.RS
-Scrub the database for duplicate reasons and orphaned entries.
-.RE
-.B update
-.RS
-Apply any updates to the reporting database.
-.RE
-.SH SNAPSHOTS OPTIONS
-.PP
-.B init
-.RS
-Initialize the snapshots database.
-.RE
-.B query
-.RS
-Query the snapshots database.
-.RE
-.B dump
-.RS
-Dump some of the contents of the snapshots database.
-.RE
-.B reports [-a] [-b] [-e] [--date=<MM-DD-YYYY>]
-.RS
-Generate reports for clients in the snapshots database.
-.RE
-.SH VIZ OPTIONS
-.PP
-.B \-H
-.RS
-Include hosts in diagram.
-.RE
-.B \-b
-.RS
-Include bundles in diagram.
-.RE
-.B \-o <output file>
-.RS
-Write to outfile file instead of stdout.
-.RE
-.B \-k
-.RS
-Add a shape/color key.
-.RE
+.
+.TH "BCFG2\-ADMIN" "8" "August 2012" "" ""
+.
+.SH "NAME"
+\fBbcfg2\-admin\fR \- Perform repository administration tasks
+.
+.SH "SYNOPSIS"
+\fBbcfg2\-admin\fR [\-C \fIconfigfile\fR] \fImode\fR [\fImode args\fR] [\fImode options\fR]
+.
+.SH "DESCRIPTION"
+\fBbcfg2\-admin\fR is used to perform Bcfg2 repository administration
+.
+.SH "OPTIONS"
+.
+.TP
+\fB\-C\fR \fIconfigfile\fR
+Specify alternate bcfg2\.conf location\.
+.
+.TP
+\fB\-E\fR \fIencoding\fR
+Specify the encoding of Cfg files\.
+.
+.TP
+\fB\-Q\fR \fIrepository path\fR
+Specify the path to the server repository\.
+.
+.TP
+\fB\-S\fR \fIhttps://server:port\fR
+Manually specify the server location (as opposed to using the value in bcfg2\.conf)\.
+.
+.TP
+\fB\-d\fR
+Enable debugging output\.
+.
+.TP
+\fB\-h\fR
+Print Usage information\.
+.
+.TP
+\fB\-o\fR \fIlogfile path\fR
+Writes a log to the specified path\.
+.
+.TP
+\fB\-v\fR
+Enable verbose output\.
+.
+.TP
+\fB\-x\fR \fIpassword\fR
+Use ’password’ for client communication\.
+.
+.TP
+\fB\-\-ssl\-key=\fR\fIssl key\fR
+Specifiy the path to the SSL key\.
+.
+.SH "MODES"
+.
+.TP
+\fBbackup\fR
+Create an archive of the entire Bcfg2 repository\.
+.
+.TP
+\fBbundle\fR \fIaction\fR
+Display details about the available bundles (See \fI\fBBUNDLE OPTIONS\fR\fR below)\.
+.
+.TP
+\fBclient\fR \fIaction\fR \fIclient\fR [attribute=value]
+Add, edit, or remove clients entries in metadata (See \fI\fBCLIENT OPTIONS\fR\fR below)\.
+.
+.TP
+\fBcompare\fR \fIold\fR \fInew\fR
+Compare two client configurations\. Can be used to verify consistent behavior between releases\. Determine differences between files or directories (See \fI\fBCOMPARE OPTIONS\fR\fR below)\.
+.
+.TP
+\fBinit\fR
+Initialize a new repository (interactive)\.
+.
+.TP
+\fBminestruct\fR \fIclient\fR [\-f xml\-file] [\-g groups]
+Build structure entries based on client statistics extra entries (See \fI\fBMINESTRUCT OPTIONS\fR\fR below)\.
+.
+.TP
+\fBperf\fR
+Query server for performance data\.
+.
+.TP
+\fBpull\fR \fIclient\fR \fIentry\-type\fR \fIentry\-name\fR
+Install configuration information into repo based on client bad entries (See \fI\fBPULL OPTIONS\fR\fR below)\.
+.
+.TP
+\fBquery\fR [g=group] [p=profile] [\-f output\-file] [\-n] [\-c]
+Search for clients based on group or profile (See \fI\fBQUERY OPTIONS\fR\fR below)\.
+.
+.TP
+\fBreports\fR [init|load_stats|purge|scrub|update]
+Interact with the dynamic reporting system (See \fI\fBREPORTS OPTIONS\fR\fR below)\.
+.
+.TP
+\fBsnapshots\fR [init|dump|query|reports]
+Interact with the Snapshots database (See \fI\fBSNAPSHOTS OPTIONS\fR\fR below)\.
+.
+.TP
+\fBsyncdb\fR
+Sync the Django ORM with the configured database\.
+.
+.TP
+\fBtidy\fR
+Remove unused files from repository\.
+.
+.TP
+\fBviz\fR [\-H] [\-b] [\-k] [\-o png\-file]
+Create a graphviz diagram of client, group and bundle information (See \fI\fBVIZ OPTIONS\fR\fR below)\.
+.
+.TP
+\fBxcmd\fR
+Provides a XML\-RPC Command Interface to the bcfg2\-server\.
+.
+.SS "BUNDLE OPTIONS"
+.
+.TP
+\fBmode\fR
+List all available xml bundles ’list\-xml’ or for all available genshi bundles ’list\-genshi’\. ’show’ provides an interactive dialog to get details about the available bundles\.
+.
+.SS "CLIENT OPTIONS"
+.
+.TP
+\fBmode\fR
+Add a client ’add’, delete a client ’del’, or ’list’ all client entries\.
+.
+.TP
+\fBclient\fR
+Specify the client’s name\.
+.
+.TP
+\fBattribute=value\fR
+Set attribute values when adding a new client\. Allowed attributes are ’profile’, ’uuid’, ’password’, ’location’, ’secure’, and ’address’\.
+.
+.SS "QUERY OPTIONS"
+.
+.TP
+\fBb=bundle\fR
+Specify a bundle to search for within client metadata\.
+.
+.TP
+\fBg=group\fR
+Specify a group to search within\.
+.
+.TP
+\fBp=profile\fR
+Specify a profile to search within\.
+.
+.TP
+\fB\-f\fR \fIoutput file\fR
+Write the results of the query to a file\.
+.
+.TP
+\fB\-n\fR
+Print the results, one on each line\.
+.
+.TP
+\fB\-c\fR
+Print the results, separated by commas\.
+.
+.SS "COMPARE OPTIONS"
+.
+.TP
+\fBold\fR
+Specify the location of the old configuration file\.
+.
+.TP
+\fBnew\fR
+Specify the location of the new configuration file\.
+.
+.SS "MINESTRUCT OPTIONS"
+.
+.TP
+\fBclient\fR
+Client whose metadata is to be searched for extra entries\.
+.
+.TP
+\fB\-g\fR \fIgroups\fR
+Hierarchy of groups in which to place the extra entries in\.
+.
+.TP
+\fB\-f\fR \fIxml output file\fR
+Specify the xml file in which to write the extra entries\.
+.
+.SS "PULL OPTIONS"
+.
+.TP
+\fBclient\fR
+Specify the name of the client to search for\.
+.
+.TP
+\fBentry type\fR
+Specify the type of the entry to pull\.
+.
+.TP
+\fBentry name\fR
+Specify the name of the entry to pull\.
+.
+.SS "REPORTS OPTIONS"
+.
+.TP
+\fBinit\fR
+Initialize the database\.
+.
+.TP
+\fBload_stats\fR [\-s] [\-c] [\-03]
+Load statistics data\.
+.
+.TP
+\fBpurge\fR [\-\-client [n]] [\-\-days [n]] [\-\-expired]
+Purge historic and expired data\.
+.
+.TP
+\fBscrub\fR
+Scrub the database for duplicate reasons and orphaned entries\.
+.
+.TP
+\fBupdate\fR
+Apply any updates to the reporting database\.
+.
+.SS "SNAPSHOTS OPTIONS"
+.
+.TP
+\fBinit\fR
+Initialize the snapshots database\.
+.
+.TP
+\fBquery\fR
+Query the snapshots database\.
+.
+.TP
+\fBdump\fR
+Dump some of the contents of the snapshots database\.
+.
+.TP
+\fBreports\fR [\-a] [\-b] [\-e] [\-\-date=\fIMM\-DD\-YYYY\fR]
+Generate reports for clients in the snapshots database\.
+.
+.SS "VIZ OPTIONS"
+.
+.TP
+\fB\-H\fR
+Include hosts in diagram\.
+.
+.TP
+\fB\-b\fR
+Include bundles in diagram\.
+.
+.TP
+\fB\-o\fR \fIoutput file\fR
+Write to outfile file instead of stdout\.
+.
+.TP
+\fB\-k\fR
+Add a shape/color key\.
+.
+.SH "SEE ALSO"
+bcfg2\-info(8), bcfg2\-server(8)
diff --git a/man/bcfg2-build-reports.8 b/man/bcfg2-build-reports.8
index a14dea728..3c61e8356 100644
--- a/man/bcfg2-build-reports.8
+++ b/man/bcfg2-build-reports.8
@@ -1,40 +1,36 @@
-.TH "bcfg2-build-reports" 8
-.SH NAME
-bcfg2-build-reports \- Generate state reports for Bcfg2 clients
-.SH SYNOPSIS
-.B bcfg2-build-reports
-.I [-A] [-c] [-s] [-N]
-.SH DESCRIPTION
-.PP
-.B bcfg2-build-reports
-Build all client state reports. See the Bcfg2 manual for report setup
-information.
-.SH OPTIONS
-.PP
-.B "\-A"
-.RS
-Displays all data.
-.RE
-.B "\-c <configuration file>"
-.RS
-Specify an alternate report configuration path. The default is
-repo/etc/reports-configuration.xml.
-.RE
-.B "\-h"
-.RS
-Produce a help message.
-.RE
-.B "\-s <statistics Path>"
-.RS
-Use an alternative path for the statistics file. The default is
-repo/etc/statistics.xml
-.RE
-.B "\-N"
-.RS
-No pinging.
-.RE
+.
+.TH "BCFG2\-BUILD\-REPORTS" "8" "August 2012" "" ""
+.
+.SH "NAME"
+\fBbcfg2\-build\-reports\fR \- Generate state reports for Bcfg2 clients
+.
+.SH "SYNOPSIS"
+\fBbcfg2\-build\-reports\fR [\fI\-A\fR] [\fI\-c\fR] [\fI\-s\fR] [\fI\-N\fR]
+.
+.SH "DESCRIPTION"
+\fBbcfg2\-build\-reports\fR is used to build all client state reports\. See the Bcfg2 manual for report setup information\.
+.
+.SH "OPTIONS"
+.
+.TP
+\fB\-A\fR
+Displays all data\.
+.
+.TP
+\fB\-c\fR \fIconfiguration file\fR
+Specify an alternate report configuration path\. The default is repo/etc/reports\-configuration\.xml\.
+.
+.TP
+\fB\-h\fR
+Produce a help message\.
+.
+.TP
+\fB\-s\fR \fIstatistics path\fR
+Use an alternative path for the statistics file\. The default is repo/etc/statistics\.xml\.
+.
+.TP
+\fB\-N\fR
+No pinging\.
+.
.SH "SEE ALSO"
-.BR bcfg(1),
-.BR bcfg2-server(8)
-.SH "BUGS"
-None currently known
+bcfg2(1), bcfg2\-server(8)
diff --git a/man/bcfg2-crypt.8 b/man/bcfg2-crypt.8
new file mode 100644
index 000000000..a73f3e066
--- /dev/null
+++ b/man/bcfg2-crypt.8
@@ -0,0 +1,83 @@
+.
+.TH "BCFG2\-CRYPT" "8" "August 2012" "" ""
+.
+.SH "NAME"
+\fBbcfg2\-crypt\fR \- Bcfg2 encryption and decryption utility
+.
+.SH "SYNOPSIS"
+\fBbcfg2\-crypt\fR [\fI\-C configfile\fR] [\-\-decrypt|\-\-encrypt] [\-\-cfg|\-\-properties] [\-\-remove] [\-\-xpath \fIxpath\fR] [\-p \fIpassphrase\-or\-name\fR] [\-v] \fIfilename\fR [\fIfilename\fR\.\.\.]
+.
+.SH "DESCRIPTION"
+\fBbcfg2\-crypt\fR performs encryption and decryption of Cfg and Properties files\. It\'s often sufficient to run \fBbcfg2\-crypt\fR with only the name of the file you wish to encrypt or decrypt; it can usually figure out what to do\.
+.
+.SH "OPTIONS"
+.
+.TP
+\fB\-C\fR \fIconfigfile\fR
+Specify alternate bcfg2\.conf location
+.
+.TP
+\fB\-\-decrypt\fR, \fB\-\-encrypt\fR
+Specify which operation you\'d like to perform\. \fBbcfg2\-crypt\fR can usually determine which is necessary based on the contents of each file\.
+.
+.TP
+\fB\-\-cfg\fR
+Tell \fBbcfg2\-crypt\fR that an XML file should be encrypted in its entirety rather than element\-by\-element\. This is only necessary if the file is an XML file whose name ends with \fB\.xml\fR and whose top\-level tag is \fB<Properties>\fR\. See \fIMODES\fR below for details\.
+.
+.TP
+\fB\-\-properties\fR
+Tell \fBbcfg2\-crypt\fR to process a file as an XML Properties file, and encrypt the text of each element separately\. This is necessary if, for example, you\'ve used a different top\-level tag than \fB<Properties>\fR in your Properties files\. See \fIMODES\fR below for details\.
+.
+.TP
+\fB\-\-remove\fR
+Remove the plaintext file after it has been encrypted\. Only meaningful for Cfg files\.
+.
+.TP
+\fB\-\-xpath <xpath>\fR
+Encrypt the character content of all elements that match the specified XPath expression\. The default is \fB*[@encrypted]\fR or \fB*\fR; see \fIMODES\fR below for more details\. Only meaningful for Properties files\.
+.
+.TP
+\fB\-p <passphrase>\fR
+Specify the name of a passphrase specified in the \fB[encryption]\fR section of \fBbcfg2\.conf\fR\. See \fISELECTING PASSPHRASE\fR below for more details\.
+.
+.TP
+\fB\-v\fR
+Be verbose\.
+.
+.TP
+\fB\-h\fR
+Display help and exit\.
+.
+.SH "MODES"
+\fBbcfg2\-crypt\fR can encrypt Cfg files or Properties files; they are handled very differently\.
+.
+.TP
+Cfg
+When \fBbcfg2\-crypt\fR is used on a Cfg file, the entire file is encrypted\. This is the default behavior on files that are not XML, or that are XML but whose top\-level tag is not \fB<Properties>\fR\. This can be enforced by use of the \fB\-\-cfg\fR option\.
+.
+.TP
+Properties
+When \fBbcfg2\-crypt\fR is used on a Properties file, it encrypts the character content of elements matching the XPath expression given by \fB\-\-xpath\fR\. By default the expression is \fB*[@encrypted]\fR, which matches all elements with an \fBencrypted\fR attribute\. If you are encrypting a file and that expression doesn\'t match any elements, then the default is \fB*\fR, which matches everything\. When \fBbcfg2\-crypt\fR encrypts the character content of an element, it also adds the \fBencrypted\fR attribute, set to the name of the passphrase used to encrypt that element\. When it decrypts an element it does not remove \fBencrypted\fR, though; this lets you easily and efficiently run \fBbcfg2\-crypt\fR against a single Properties file to encrypt and decrypt it without needing to specify a long list of options\. See the online Bcfg2 docs on Properties files for more information on how this works\.
+.
+.SH "SELECTING PASSPHRASE"
+The passphrase used to encrypt or decrypt a file is discovered in the following order:
+.
+.IP "\(bu" 4
+First, the passphrase given on the command line using \fB\-p\fR is used\.
+.
+.IP "\(bu" 4
+Next, if exactly one passphrase is specified in \fBbcfg2\.conf\fR, it will be used\.
+.
+.IP "\(bu" 4
+Next, if operating in Properties mode, \fBbcfg2\-crypt\fR will attempt to read the name of the passphrase from the encrypted elements\.
+.
+.IP "\(bu" 4
+Next, if decrypting, all passphrases will be tried sequentially\.
+.
+.IP "\(bu" 4
+If no passphrase has been determined at this point, an error is produced and the file being encrypted or decrypted is skipped\.
+.
+.IP "" 0
+.
+.SH "SEE ALSO"
+bcfg2\-server(8)
diff --git a/man/bcfg2-info.8 b/man/bcfg2-info.8
index a644926b5..bb515079f 100644
--- a/man/bcfg2-info.8
+++ b/man/bcfg2-info.8
@@ -1,131 +1,134 @@
-.TH "bcfg2-info" 8
-.SH NAME
-bcfg2-info \- Creates a local version of the bcfg2 server core for
-state observation
-.SH SYNOPSIS
-.B bcfg2-info
-.I [\-C <config file>] [\-E <encoding>] [\-Q <repository path>] [\-h] [\-p] [\-x <password>]
-.I <mode>
-.I <mode args>
-.I <mode options>
-.SH DESCRIPTION
-.PP
-.B bcfg2-info
-Instantiate an instance of the Bcfg2 core for data examination and
-debugging purposes.
-.SH OPTIONS
-.PP
-.B "\-C <config file>"
-.RS
-Specify the location of the configuration file (if it is not in
-/etc/bcfg2.conf).
-.RE
-.B "\-E <encoding>"
-.RS
-Specify the encoding of config files.
-.RE
-.B "\-Q <repository path>
-.RS
-Specify the server repository path.
-.RE
-.B "\-d"
-.RS
-Run in debug mode.
-.RE
-.B "\-h"
-.RS
-Give a bit of help about the command line arguments and
-options. After this bcfg2-info exits.
-.RE
-.B "\-p"
-.RS
-Specify a profile.
-.RE
-.B "\-x <password>"
-.RS
-Set the communication password.
-.RE
-.SH MODES
-.PP
-.B build <hostname> <filename>
-.RS
-Build config for hostname, writing to filename.
-.RE
-.B builddir <hostname> <dirname>
-.RS
-Build config for hostname, writing separate files to dirname.
-.RE
-.B buildall <directory>
-.RS
-Build configs for all clients in directory.
-.RE
-.B buildbundle <filename> <hostname>
-.RS
-Build bundle for hostname (not written to disk). If filename is a bundle
-template, it is rendered.
-.RE
-.B buildfile [--altsrc=<altsrc>] <filename> <hostname>
-.RS
-Build config file for hostname (not written to disk).
-.RE
-.B bundles
-.RS
-Print out group/bundle information.
-.RE
-.B clients
-.RS
-Print out client/profile information.
-.RE
-.B config
-.RS
-Print out the configuration of the Bcfg2 server.
-.RE
-.B debug
-.RS
-Shell out to native python interpreter.
-.RE
-.B event_debug
-.RS
-Display filesystem events as they are processed.
-.RE
-.B groups
-.RS
-List groups
-.RE
-.B help
-.RS
-Print the list of available commands.
-.RE
-.B mappings <type*> <name*>
-.RS
-Print generator mappings for optional type and name.
-.RE
-.B profile <command> <args>
-.RS
-Profile a single bcfg2-info command.
-.RE
-.B quit
-.RS
-Exit bcfg2-info command line.
-.RE
-.B showentries <hostname> <type>
-.RS
-Show abstract configuration entries for a given host.
-.RE
-.B showclient <client1> <client2>
-.RS
-Show metadata for given hosts.
-.RE
-.B update
-.RS
-Process pending file events.
-.RE
-.B version
-.RS
-Print version of this tool.
-.RE
+.
+.TH "BCFG2\-INFO" "8" "August 2012" "" ""
+.
+.SH "NAME"
+\fBbcfg2\-info\fR \- Creates a local version of the Bcfg2 server core for state observation
+.
+.SH "SYNOPSIS"
+\fBbcfg2\-info\fR [\fI\-C configfile\fR] [\-E \fIencoding\fR] [\-Q \fIrepository path\fR] [\-h] [\-p] [\-x \fIpassword\fR] [\fImode\fR] [\fImode args\fR] [\fImode options\fR]
+.
+.SH "DESCRIPTION"
+\fBbcfg2\-info\fR instantiates an instance of the Bcfg2 core for data examination and debugging purposes\.
+.
+.SH "OPTIONS"
+.
+.TP
+\fB\-C\fR \fIconfigfile\fR
+Specify alternate bcfg2\.conf location
+.
+.TP
+\fB\-E\fR \fIencoding\fR
+Specify the encoding of config files\.
+.
+.TP
+\fB\-Q\fR \fIrepository path\fR
+Specify the server repository path\.
+.
+.TP
+\fB\-d\fR
+Run in debug mode\.
+.
+.TP
+\fB\-h\fR
+Give a bit of help about the command line arguments and options\. After this bcfg2\-info exits\.
+.
+.TP
+\fB\-p\fR
+Specify a profile\.
+.
+.TP
+\fB\-x\fR \fIpassword\fR
+Set the communication password\.
+.
+.SH "MODES"
+.
+.TP
+\fBbuild\fR \fIhostname\fR \fIfilename\fR
+Build config for hostname, writing to filename\.
+.
+.TP
+\fBbuildall\fR \fIdirectory\fR
+Build configs for all clients in directory\.
+.
+.TP
+\fBbuildallfile\fR \fIdirectory\fR \fIfilename\fR [\fIhostnames\fR]
+Build config file for all clients in directory\.
+.
+.TP
+\fBbuildbundle\fR \fIfilename\fR \fIhostname\fR
+Build bundle for hostname (not written to disk)\. If filename is a bundle template, it is rendered\.
+.
+.TP
+\fBbuilddir\fR \fIhostname\fR \fIdirname\fR
+Build config for hostname, writing separate files to dirname\.
+.
+.TP
+\fBbuildfile\fR [\-\-altsrc=\fIaltsrc\fR] \fIfilename\fR \fIhostname\fR
+Build config file for hostname (not written to disk)\.
+.
+.TP
+\fBbundles\fR
+Print out group/bundle information\.
+.
+.TP
+\fBclients\fR
+Print out client/profile information\.
+.
+.TP
+\fBconfig\fR
+Print out the configuration of the Bcfg2 server\.
+.
+.TP
+\fBdebug\fR
+Shell out to native python interpreter\.
+.
+.TP
+\fBevent_debug\fR
+Display filesystem events as they are processed\.
+.
+.TP
+\fBgroups\fR
+List groups\.
+.
+.TP
+\fBhelp\fR
+Print the list of available commands\.
+.
+.TP
+\fBmappings\fR [\fIentry type\fR] [\fIentry name\fR]
+Print generator mappings for optional type and name\.
+.
+.TP
+\fBpackageresolve\fR \fIhostname\fR \fIpackage\fR [\fIpackage\fR\.\.\.]
+Resolve the specified set of packages\.
+.
+.TP
+\fBpackagesources\fR \fIhostname\fR
+Show package sources\.
+.
+.TP
+\fBprofile\fR \fIcommand\fR \fIargs\fR
+Profile a single bcfg2\-info command\.
+.
+.TP
+\fBquit\fR
+Exit bcfg2\-info command line\.
+.
+.TP
+\fBshowentries\fR \fIhostname\fR \fItype\fR
+Show abstract configuration entries for a given host\.
+.
+.TP
+\fBshowclient\fR \fIclient1\fR \fIclient2\fR
+Show metadata for given hosts\.
+.
+.TP
+\fBupdate\fR
+Process pending file events\.
+.
+.TP
+\fBversion\fR
+Print version of this tool\.
+.
.SH "SEE ALSO"
-.BR bcfg2(1),
-.BR bcfg2-server(8)
-.SH "BUGS"
-None currently known
+bcfg2(1), bcfg2\-server(8)
diff --git a/man/bcfg2-lint.8 b/man/bcfg2-lint.8
index 25fa30f9e..7a5a69b7a 100644
--- a/man/bcfg2-lint.8
+++ b/man/bcfg2-lint.8
@@ -1,174 +1,99 @@
-.TH "bcfg2-lint" 8
-.SH NAME
-bcfg2-lint \- Check Bcfg2 specification for validity, common mistakes,
-and style
-
-.SH SYNOPSIS
-.B bcfg2-lint
-.I [OPTIONS]
-.I [<plugin> [<plugin>...]]
-
-.SH DESCRIPTION
-.PP
-.B bcfg2-lint
-This script checks the Bcfg2 specification for schema validity, common
-mistakes, and other criteria. It can be quite helpful in finding
-typos or malformed data.
-
-.B bcfg2-lint
-exits with a return value of 2 if errors were found, and 3
-if warnings (but no errors) were found. Any other non-0 exit value
-denotes some failure in the script itself.
-
-.B bcfg2-lint
-is a rewrite of the older
-.B bcfg2-repo-validate
-tool.
-
-.SH OPTIONS
-
+.
+.TH "BCFG2\-LINT" "8" "August 2012" "" ""
+.
+.SH "NAME"
+\fBbcfg2\-lint\fR \- Check Bcfg2 specification for validity, common mistakes, and style
+.
+.SH "SYNOPSIS"
+\fBbcfg2\-lint\fR [\fIoptions\fR] [\fIplugin\fR [\fIplugin\fR\.\.\.]]
+.
+.SH "DESCRIPTION"
+\fBbcfg2\-lint\fR checks the Bcfg2 specification for schema validity, common mistakes, and other criteria\. It can be quite helpful in finding typos or malformed data\.
+.
+.P
+\fBbcfg2\-lint\fR exits with a return value of 2 if errors were found, and 3 if warnings (but no errors) were found\. Any other non\-0 exit value denotes some failure in the script itself\.
+.
+.P
+\fBbcfg2\-lint\fR is a rewrite of the older bcfg2\-repo\-validate tool\.
+.
+.SH "OPTIONS"
+.
.TP
-.BR "-v"
-Be verbose.
-
+\fB\-C\fR \fIconfigfile\fR
+Specify alternate bcfg2\.conf location\.
+.
.TP
-.BR "-C"
-Specify path to bcfg2.conf (default /etc/bcfg2.conf)
-
+\fB\-Q\fR
+Specify the server repository path\.
+.
.TP
-.BR "--lint-config"
-Specify path to bcfg2-lint.conf (default /etc/bcfg2-lint.conf)
-
+\fB\-v\fR
+Be verbose\.
+.
.TP
-.BR "-Q"
-Specify path to Bcfg2 repository (default /var/lib/bcfg2)
-
+\fB\-\-lint\-config\fR
+Specify path to bcfg2\-lint\.conf (default \fB/etc/bcfg2\-lint\.conf\fR)\.
+.
.TP
-.BR "--stdin"
-Rather than operating on all files in the Bcfg2 specification, only
-validate a list of files supplied on stdin. This mode is particularly
-useful in pre-commit hooks.
-
+\fB\-\-stdin\fR
+Rather than operating on all files in the Bcfg2 specification, only validate a list of files supplied on stdin\. This mode is particularly useful in pre\-commit hooks\.
+.
+.IP
This makes a few assumptions:
-
-Metadata files will only be checked if a valid chain of XIncludes can
-be followed all the way from clients.xml or groups.xml. Since there
-are multiple formats of metadata stored in Metadata/ (i.e., clients
-and groups), there is no way to determine which sort of data a file
-contains unless there is a valid chain of XIncludes. It may be useful
-to always specify all metadata files should be checked, even if not
-all of them have changed.
-
-Property files will only be validated if both the property file itself
-and its matching schema are included on stdin.
-
+.
+.IP
+Metadata files will only be checked if a valid chain of XIncludes can be followed all the way from clients\.xml or groups\.xml\. Since there are multiple formats of metadata stored in Metadata/ (i\.e\., clients and groups), there is no way to determine which sort of data a file contains unless there is a valid chain of XIncludes\. It may be useful to always specify all metadata files should be checked, even if not all of them have changed\.
+.
+.IP
+Property files will only be validated if both the property file itself and its matching schema are included on stdin\.
+.
.TP
-.BR "--require-schema"
-Require property files to have matching schema files
-
-.RE
-
+\fBrequire\-schema\fR
+Require property files to have matching schema files\.
+.
.SH "PLUGINS"
-
-See
-.BR bcfg2-lint.conf(5)
-for more information on the configuration of the plugins listed below.
-
+See \fBbcfg2\-lint\.conf\fR(5) for more information on the configuration of the plugins listed below\.
+.
.TP
-.BR Bundles
-Check the specification for several issues with Bundler: bundles
-referenced in metadata but not found in
-.I Bundler/
-; bundles whose
-.I name
-attribute does not match the filename; and Genshi template bundles
-that use the
-.I <Group>
-tag (which is not processed in templated bundles).
-
+\fBBundles\fR
+Check the specification for several issues with Bundler: bundles referenced in metadata but not found in \fBBundler/\fR; bundles whose \fIname\fR attribute does not match the filename; and Genshi template bundles that use the \fI\fIGroup\fR\fR tag (which is not processed in templated bundles)\.
+.
.TP
-.BR Comments
-Check the specification for VCS keywords and any comments that are
-required. By default, this only checks that the
-.I $Id$
-keyword is included and expanded in all files. You may specify VCS
-keywords to check and comments to be required in the config file.
-(For instance, you might require that every file have a "Maintainer"
-comment.)
-
-In XML files, only comments are checked for the keywords and comments
-required.
-
+\fBComments\fR
+Check the specification for VCS keywords and any comments that are required\. By default, this only checks that the \fI$Id$\fR keyword is included and expanded in all files\. You may specify VCS keywords to check and comments to be required in the config file\. (For instance, you might require that every file have a "Maintainer" comment\.)
+.
+.IP
+In XML files, only comments are checked for the keywords and comments required\.
+.
.TP
-.BR Duplicates
-Check for several types of duplicates in the Metadata: duplicate
-groups; duplicate clients; and multiple default groups.
-
+\fBDuplicates\fR
+Check for several types of duplicates in the Metadata: duplicate groups; duplicate clients; and multiple default groups\.
+.
.TP
-.BR InfoXML
-Check that certain attributes are specified in
-.I info.xml
-files. By default, requires that
-.I owner
-,
-.I group
-, and
-.I perms
-are specified. Can also require that an
-.I info.xml
-exists for all Cfg files, and that paranoid mode be enabled for all
-files.
-
+\fBInfoXML\fR
+Check that certain attributes are specified in \fBinfo\.xml\fR files\. By default, requires that \fIowner\fR, \fIgroup\fR, and \fIperms\fR are specified\. Can also require that an \fBinfo\.xml\fR exists for all Cfg files, and that paranoid mode be enabled for all files\.
+.
.TP
-.BR MergeFiles
-Suggest that similar probes and config files be merged into single
-probes or TGenshi templates.
-
+\fBMergeFiles\fR
+Suggest that similar probes and config files be merged into single probes or TGenshi templates\.
+.
.TP
-.BR Pkgmgr
-Check for duplicate packages specified in Pkgmgr.
-
+\fBPkgmgr\fR
+Check for duplicate packages specified in Pkgmgr\.
+.
.TP
-.BR RequiredAttrs
-Check that all
-.I <Path>
-and
-.I <BoundPath>
-tags have the attributes that are required by their type. (E.g., a
-path of type
-.I "symlink"
-must have
-.I name
-and
-.I to
-specified to be valid. This sort of validation is beyond the scope of
-an XML schema.
-
+\fBRequiredAttrs\fR
+Check that all \fIPath\fR and \fIBoundPath\fR tags have the attributes that are required by their type (e\.g\., a path of type symlink must have name and to specified to be valid)\. This sort of validation is beyond the scope of an XML schema\.
+.
.TP
-.BR Validate
-Validate the Bcfg2 specification against the XML schemas.
-
-Property files are freeform XML, but if a
-.I .xsd
-file with a matching filename is provided, then schema validation will
-be performed on property files individually as well. For instance, if
-you have a property file named
-.I ntp.xml
-then by placing a schema for that file in
-.I ntp.xsd
-schema validation will be performed on
-.I ntp.xml
-.
-
-
-.SH "SEE ALSO"
-.BR bcfg2(1),
-.BR bcfg2-server(8),
-.BR bcfg2-lint.conf(5)
-
+\fBValidate\fR
+Validate the Bcfg2 specification against the XML schemas\.
+.
+.IP
+Property files are freeform XML, but if a \fB\.xsd\fR file with a matching filename is provided, then schema validation will be performed on property files individually as well\. For instance, if you have a property file named \fBntp\.xml\fR then by placing a schema for that file in \fBntp\.xsd\fR schema validation will be performed on \fBntp\.xml\fR\.
+.
.SH "BUGS"
-
-bcfg2-lint may not handle some older plugins as well as it handles
-newer ones. For instance, there may be some places where it expects
-all of your configuration files to be handled by Cfg rather than by a
-mix of Cfg and TGenshi or TCheetah.
+\fBbcfg2\-lint\fR may not handle some older plugins as well as it handles newer ones\. For instance, there may be some places where it expects all of your configuration files to be handled by Cfg rather than by a mix of Cfg and TGenshi or TCheetah\.
+.
+.SH "SEE ALSO"
+bcfg2(1), bcfg2\-server(8), bcfg2\-lint\.conf(5)
diff --git a/man/bcfg2-lint.conf.5 b/man/bcfg2-lint.conf.5
index 10a812874..d23afa8dc 100644
--- a/man/bcfg2-lint.conf.5
+++ b/man/bcfg2-lint.conf.5
@@ -1,174 +1,99 @@
-.TH bcfg2-lint.conf 5
-
-.SH NAME
-bcfg2-lint.conf - configuration parameters for bcfg2-lint
-
-.SH DESCRIPTION
-.TP
-bcfg2-lint.conf includes configuration parameters for
-.I bcfg2-lint
-
-.SH FILE FORMAT
-The file is INI-style and consists of sections and options. A section
-begins with the name of the sections in square brackets and continues
-until the next section begins.
-
-Options are specified in the form 'name = value'.
-
-The file is line-based each newline-terminated line represents either
-a comment, a section name or an option.
-
-Any line beginning with a hash (#) is ignored, as are lines containing
-only whitespace.
-
-The file consists of one
-.I [lint]
-section, up to one
-.I [errors]
-section, and then any number of plugin-specific sections, documented below. (Note that this makes it quite feasible to combine your
-.B bcfg2-lint.conf
-into your
-.B bcfg2.conf(5)
-file, if you so desire.)
-
-.SH GLOBAL OPTIONS
-These options apply to
-.I bcfg2-lint
-generally, and must be in the
-.I [lint]
-section.
-
-.TP
-.BR plugins
-A comma-delimited list of plugins to run. By default, all plugins are
-run. This can be overridden by listing plugins on the command line.
-See
-.B bcfg2-lint(8)
-for a list of the available plugins.
-
-.SH ERROR HANDLING
-Error handling is configured in the
-.I [errors]
-section. Each option should be the name of an error and one of
-.I "error"
-,
-.I "warning"
-, or
-.I "silent"
-, which tells
-.B bcfg2-lint(8)
-how to handle the warning. Error names and their defaults can be
-displayed by running
-.B bcfg2-lint(8)
-with the
-.B --list-errors
-option.
-
-.SH PLUGIN OPTIONS
-
-These options apply only to a single plugin. Each option should be in
-a section named for its plugin; for instance, options for the InfoXML
-plugin would be in a section called
-.I [InfoXML]
-.
-
-If a plugin is not listed below, then it has no configuration.
-
-In many cases, the behavior of a plugin can be configured by modifying
-how errors from it are handled. See
-.B ERROR HANDLING
-, above.
-
+.
+.TH "BCFG2\-LINT\.CONF" "5" "August 2012" "" ""
+.
+.SH "NAME"
+\fBbcfg2\-lint\.conf\fR \- configuration parameters for bcfg2\-lint
+.
+.SH "DESCRIPTION"
+\fBbcfg2\-lint\.conf\fR includes configuration parameters for \fBbcfg2\-lint\fR\.
+.
+.SH "FILE FORMAT"
+The file is INI\-style and consists of sections and options\. A section begins with the name of the sections in square brackets and continues until the next section begins\.
+.
+.P
+Options are specified in the form "name=value"\.
+.
+.P
+The file is line\-based each newline\-terminated line represents either a comment, a section name or an option\.
+.
+.P
+Any line beginning with a hash (#) is ignored, as are lines containing only whitespace\.
+.
+.P
+The file consists of one \fB[lint]\fR section, up to one \fB[errors]\fR section, and then any number of plugin\-specific sections, documented below\. (Note that this makes it quite feasible to combine your \fBbcfg2\-lint\.conf\fR into your \fBbcfg2\.conf\fR(5) file, if you so desire)\.
+.
+.SH "GLOBAL OPTIONS"
+These options apply to \fBbcfg2\-lint\fR generally, and must be in the \fB[lint]\fR section\.
+.
.TP
-.BR Comments
-
-The
-.I Comments
-plugin configuration specifies which VCS keywords and comments are
-required for which file types. The valid types of file are
-.I "global"
-(all file types),
-.I "bundler"
-(non-templated bundle files),
-.I "sgenshi"
-(templated bundle files),
-.I "properties"
-(property files),
-.I "cfg"
-(non-templated Cfg files),
-.I "tgenshi"
-(templated Cfg files),
-.I "infoxml"
-(info.xml files), and
-.I "probe"
-(probe files).
-
-The specific types (i.e., types other than "global") all supplement
-global; they do not override it. The exception is if you specify an
-empty option, e.g.:
-
-.nf
+\fBplugins\fR
+A comma\-delimited list of plugins to run\. By default, all plugins are run\. This can be overridden by listing plugins on the command line\. See \fBbcfg2\-lint\fR(8) for a list of the available plugins\.
+.
+.SH "ERROR HANDLING"
+Error handling is configured in the \fB[errors]\fR section\. Each option should be the name of an error and one of \fIerror\fR, \fIwarning\fR, or \fIsilent\fR, which tells \fBbcfg2\-lint\fR(8) how to handle the warning\. Error names and their defaults can be displayed by running \fBbcfg2\-lint\fR(8) with the \fB\-\-list\-errors\fR option\.
+.
+.SH "PLUGIN OPTIONS"
+These options apply only to a single plugin\. Each option should be in a section named for its plugin; for instance, options for the InfoXML plugin would be in a section called \fB[InfoXML]\fR\.
+.
+.P
+If a plugin is not listed below, then it has no configuration\.
+.
+.P
+In many cases, the behavior of a plugin can be configured by modifying how errors from it are handled\. See \fI\fBERROR HANDLING\fR\fR, above\.
+.
+.SS "Comments"
+The \fBComments\fR plugin configuration specifies which VCS keywords and comments are required for which file types\. The valid types of file are \fIglobal\fR (all file types), \fIbundler\fR (non\-templated bundle files), \fIsgenshi\fR (templated bundle files), \fIproperties\fR (property files), \fIcfg\fR (non\-templated Cfg files), \fItgenshi\fR (templated Cfg files), \fIinfoxml\fR (info\.xml files), and \fIprobe\fR (probe files)\.
+.
+.P
+The specific types (i\.e\., types other than "global") all supplement global; they do not override it\. The exception is if you specify an empty option, e\.g\.:
+.
+.P
cfg_keywords =
-.fi
-
-By default, the
-.I $Id$
-keyword is checked for and nothing else.
-
-Multiple keywords or comments should be comma-delimited.
-
-\(bu
-.B <type>_keywords
-
-Ensure that files of the specified type have the given VCS keyword.
-Do
-.I not
-include the dollar signs. I.e.:
-
-.nf
+.
+.P
+By default, the \fI$Id$\fR keyword is checked for and nothing else\.
+.
+.P
+Multiple keywords or comments should be comma\-delimited\.
+.
+.P
+· \fB<type>_keywords\fR
+.
+.P
+Ensure that files of the specified type have the given VCS keyword\. Do \fInot\fR include the dollar signs\. I\.e\.:
+.
+.P
infoxml_keywords = Revision
-.fi
-
-.I not:
-
-.nf
+.
+.P
+\fInot\fR:
+.
+.P
infoxml_keywords = $Revision$
-.fi
-
-\(bu
-.B <type>_comments
-
-Ensure that files of the specified type have a comment containing the
-given string. In XML files, only comments are checked. In plain text
-files, all lines are checked since comment characters may vary.
-
+.
+.P
+\fB· <type>_comments\fR
+.
+.P
+Ensure that files of the specified type have a comment containing the given string\. In XML files, only comments are checked\. In plain text files, all lines are checked since comment characters may vary\.
+.
+.SS "InfoXML"
+.
.TP
-.BR InfoXML
-
-\(bu
-.B required_attrs
-A comma-delimited list of attributes to require on
-.I <Info>
-tags. Default is "owner,group,perms".
-
+\fBrequired_attrs\fR
+A comma\-delimited list of attributes to require on \fB<Info>\fR tags\. Default is "owner,group,perms"\.
+.
+.SS "MergeFiles"
+.
.TP
-.BR MergeFiles
-
-\(bu
-.B threshold
-The threshold at which MergeFiles will suggest merging config files
-and probes. Default is 75% similar.
-
+\fBthreshold\fR
+The threshold at which MergeFiles will suggest merging config files and probes\. Default is 75% similar\.
+.
+.SS "Validate"
+.
.TP
-.BR Validate
-
-\(bu
-.B schema
-The full path to the XML Schema files. Default is
-"/usr/share/bcfg2/schema". This can be overridden with the
-.I --schema
-command-line option
-
-.SH SEE ALSO
-.BR bcfg2-lint(8)
-
+\fBschema\fR
+The full path to the XML Schema files\. Default is \fB/usr/share/bcfg2/schema\fR\. This can be overridden with the \fI\-\-schema\fR command\-line option
+.
+.SH "SEE ALSO"
+bcfg2\-lint(8)
diff --git a/man/bcfg2-ping-sweep.8 b/man/bcfg2-ping-sweep.8
deleted file mode 100644
index 54eaa8e76..000000000
--- a/man/bcfg2-ping-sweep.8
+++ /dev/null
@@ -1,20 +0,0 @@
-.TH "bcfg2-ping-sweep" 8
-.SH NAME
-bcfg2-ping-sweep \- Update pingable and pingtime attributes in
-clients.xml
-.SH SYNOPSIS
-.B bcfg2-ping-sweep
-.SH "DESCRIPTION"
-.PP
-\fBbcfg2-ping-sweep\fR traverses the list of clients in
-Metadata/clients.xml and updates their pingable/pingtime attributes. The
-pingtime value is set to the last time the client was pinged (not the
-RTT value).
-.SH OPTIONS
-.PP
-.B None
-.SH "SEE ALSO"
-.BR bcfg(1),
-.BR bcfg2-server(8)
-.SH "BUGS"
-None currently known
diff --git a/man/bcfg2-reports.8 b/man/bcfg2-reports.8
index 51399e1c9..b2c0cad43 100644
--- a/man/bcfg2-reports.8
+++ b/man/bcfg2-reports.8
@@ -1,94 +1,76 @@
-.TH "bcfg2-reports" 8
-.SH NAME
-bcfg2-reports \- Query reporting system for client status
-.SH SYNOPSIS
-.B bcfg2-reports
-.I [-v]
-.SH DESCRIPTION
-.PP
-\fBbcfg2-reports\fR allows you to retrieve data from the database about
-clients, and the states of their current interactions. It also allows
-you to change the expired/unexpired states.
-The utility runs as a standalone application. It does, however, use
-the models from /src/lib/Server/Reports/reports/models.py.
-.SH OPTIONS
-.PP
-.B "\-a"
-.RS
-Shows all hosts, including expired hosts.
-.RE
-.B "\-b NAME"
-.RS
-Single-host mode \- shows bad entries from the current interaction of
-NAME. NAME is the name of the entry.
-.RE
-.B "-c\"
-.RS
-Shows only clean hosts.
-.RE
-.B "\-d"
-.RS
-Shows only dirty hosts.
-.RE
-.B "\-e NAME"
-.RS
-Single host mode \- shows extra entries from the current interaction
-of NAME. NAME is the name of the entry.
-.RE
-.B "\-h"
-.RS
-Shows help and usage info about bcfg2-reports.
-.RE
-.B "\-m NAME"
-.RS
-Single-host mode \- shows modified entries from the current interaction
-of NAME. NAME is the name of the entry.
-.RE
-.B "\-s NAME"
-.RS
-Single host mode \- shows bad, modified, and extra entries from the
-current interaction of NAME. NAME is the name of the entry.
-.RE
-.B "\-x NAME"
-.RS
-Toggles expired/unexpired state of NAME. NAME is the name of the entry.
-.RE
-.B "\-\-badentry=KIND,NAME"
-.RS
-Shows only hosts whose current interaction has bad entries in of KIND
-kind and NAME name; if a single argument ARG1 is given, then KIND,NAME
-pairs will be read from a file of name ARG1. KIND is the type of entry
-(Package, Path, Service, etc). NAME is the name of the entry.
-.RE
-.B "\-\-extraentry=KIND,NAME"
-.RS
-Shows only hosts whose current interaction has extra entries in of KIND
-kind and NAME name; if a single argument ARG1 is given, then KIND,NAME
-pairs will be read from a file of name ARG1. KIND is the type of entry
-(Package, Path, Service, etc). NAME is the name of the entry.
-.RE
-.B "\-\-fields=ARG1,ARG2,..."
-.RS
-Only displays the fields ARG1,ARG2,... (name, time, state, total, good,
-bad)
-.RE
-.B "\-\-modifiedentry=KIND,NAME"
-.RS
-Shows only hosts whose current interaction has modified entries in of
-KIND kind and NAME name; if a single argument ARG1 is given, then
-KIND,NAME pairs will be read from a file of name ARG1. KIND is the type
-of entry (Package, Path, Service, etc). NAME is the name of the entry.
-.RE
-.B "\-\-sort=ARG1,ARG2,..."
-.RS
-Sorts output on ARG1,ARG2,... (name, time, state, total, good, bad)
-.RE
-.B "\-\-stale"
-.RS
-Shows hosts which haven't run in the last 24 hours
-.RE
+.
+.TH "BCFG2\-REPORTS" "8" "August 2012" "" ""
+.
+.SH "NAME"
+\fBbcfg2\-reports\fR \- Query reporting system for client status
+.
+.SH "SYNOPSIS"
+\fBbcfg2\-reports\fR [\-a] [\-b \fINAME\fR] [\-c] [\-d] [\-e \fINAME\fR] [\-h] [\-m \fINAME\fR] [\-s \fINAME\fR] [\-x \fINAME\fR] [\-\-badentry=\fIKIND,NAME\fR] [\-\-extraentry=\fIKIND,NAME\fR] [\-\-fields=<ARG1,ARG2,\.\.\.>] [\-\-modifiedentry=\fIKIND,NAME\fR] [\-\-sort=<ARG1,ARG2,\.\.\.>] [\-\-stale] [\-v]
+.
+.SH "DESCRIPTION"
+\fBbcfg2\-reports\fR allows you to retrieve data from the database about clients, and the states of their current interactions\. It also allows you to change the expired/unexpired states\. The utility runs as a standalone application\. It does, however, use the models from \fB/src/lib/Server/Reports/reports/models\.py\fR\.
+.
+.SH "OPTIONS"
+.
+.TP
+\fB\-a\fR
+Specify alternate bcfg2\.conf location
+.
+.TP
+\fB\-b\fR \fIhostname\fR
+Single host mode \- shows bad entries from the current interaction of \fIhostname\fR\.
+.
+.TP
+\fB\-c\fR
+Shows only clean hosts\.
+.
+.TP
+\fB\-d\fR
+Shows only dirty hosts\.
+.
+.TP
+\fB\-e\fR \fIhostname\fR
+Single host mode \- shows extra entries from the current interaction of \fIhostname\fR\.
+.
+.TP
+\fB\-h\fR
+Shows help and usage info about \fBbcfg2\-reports\fR\.
+.
+.TP
+\fB\-m\fR \fIhostname\fR
+Single host mode \- shows modified entries from the current interaction of \fIhostname\fR\.
+.
+.TP
+\fB\-s\fR \fIhostname\fR
+Single host mode \- shows bad, modified, and extra entries from the current interaction of \fIhostname\fR\.
+.
+.TP
+\fB\-x\fR \fIhostname\fR
+Toggles expired/unexpired state of \fIhostname\fR\.
+.
+.TP
+\fB\-\-badentry=\fR\fIentry type, entry name\fR
+Shows only hosts whose current interaction has bad entries of type \fIentry type\fR and name \fIentry name\fR\. If a single argument ARG1 is given, then \fIentry type\fR,\fIentry name\fR pairs will be read from a file of name ARG1\.
+.
+.TP
+\fB\-\-extraentry=\fR\fIentry type, entry name\fR
+Shows only hosts whose current interaction has extra entries of type \fIentry type\fR and name \fIentry name\fR\. If a single argument ARG1 is given, then \fIentry type\fR,\fIentry name\fR pairs will be read from a file of name ARG1\.
+.
+.TP
+\fB\-\-fields=\fR<ARG1,ARG2,\.\.\.>
+Only displays the fields \fIARG1,ARG2,\.\.\.\fR (name, time, state, total, good, bad)\.
+.
+.TP
+\fB\-\-modifiedentry=\fR\fIentry type, entry name\fR
+Shows only hosts whose current interaction has modified entries of type \fIentry type\fR and name \fIentry name\fR\. If a single argument ARG1 is given, then \fIentry type\fR,\fIentry name\fR pairs will be read from a file of name ARG1\.
+.
+.TP
+\fB\-\-sort=\fR<ARG1,ARG2,\.\.\.>
+Sorts output on ARG1,ARG2,\.\.\. (name, time, state, total, good, bad)\.
+.
+.TP
+\fB\-\-stale\fR
+Shows hosts which haven’t run in the last 24 hours\.
+.
.SH "SEE ALSO"
-.BR bcfg2(1),
-.BR bcfg2-server(8)
-.SH "BUGS"
-None currently known
+bcfg2(1), bcfg2\-server(8)
diff --git a/man/bcfg2-server.8 b/man/bcfg2-server.8
index 2d132ce6d..955f541c9 100644
--- a/man/bcfg2-server.8
+++ b/man/bcfg2-server.8
@@ -1,58 +1,48 @@
-.TH "bcfg2-server" 8
-.SH NAME
-bcfg2-server \- Server for client configuration specifications
-.SH SYNOPSIS
-.B bcfg2-server
-.I [-D <pidfile>] [-d] [-v] [-C <Client>]
-.SH DESCRIPTION
-.PP
-.B bcfg2-server
-This daemon serves configurations to clients based on the data in its
-repository.
-.SH OPTIONS
-.PP
-.B \-d
-.RS
-Run bcfg2 in debug mode.
-.RE
-.B \-v
-.RS
-Run bcfg2 in verbose mode.
-.RE
-.B "\-C <ConfigFile Path>"
-.RS
-Use an alternative path for bcfg2.conf. The default is /etc/bcfg2.conf
-.RE
-.B \-D
-.RS
-Daemonize, placing the program pid in the specified pidfile.
-.RE
-.B \-o <LogFile Path>
-.RS
-Writes a log to the specified path.
-.RE
-.B \-E <encoding>
-.RS
-Unicode encoding of config files.
-.RE
-.B \-x <password>
-.RS
-Set server password.
-.RE
-.B \-S <server url>
-.RS
-Set server address.
-.RE
-.B \-Q <repo path>
-.RS
-Set repository path.
-.RE
-.B \-\-ssl\-key=<ssl key>
-.RS
-Set path to SSL key.
-.RE
+.
+.TH "BCFG2\-SERVER" "8" "August 2012" "" ""
+.
+.SH "NAME"
+\fBbcfg2\-server\fR \- Server for client configuration specifications
+.
+.SH "SYNOPSIS"
+\fBbcfg2\-server\fR [\-d] [\-v] [\-C \fIconfigfile\fR] [\-D \fIpidfile\fR] [\-E \fIencoding\fR] [\-Q \fIrepo path\fR] [\-S \fIserver url\fR] [\-o \fIlogfile\fR] [\-x \fIpassword\fR] [\-\-ssl\-key=\fIssl key\fR]
+.
+.SH "DESCRIPTION"
+\fBbcfg2\-server\fR is the daemon component of Bcfg2 which serves configurations to clients based on the data in its repository\.
+.
+.SH "OPTIONS"
+.
+.TP
+\fB\-C\fR \fIconfigfile\fR
+Specify alternate bcfg2\.conf location\.
+.
+.TP
+\fB\-D\fR \fIpidfile\fR
+Daemonize, placing the program pid in the specified pidfile\.
+.
+.TP
+\fB\-E\fR \fIencoding\fR
+Specify alternate encoding (default is UTF\-8)\.
+.
+.TP
+\fB\-Q\fR \fIrepo path\fR
+Set repository path\.
+.
+.TP
+\fB\-S\fR \fIserver url\fR
+Set server address\.
+.
+.TP
+\fB\-d\fR
+Run \fBbcfg2\-server\fR in debug mode\.
+.
+.TP
+\fB\-v\fR
+Run \fBbcfg2\-server\fR in verbose mode\.
+.
+.TP
+\fB\-\-ssl\-key=\fR\fIssl key\fR
+Set path to SSL key\.
+.
.SH "SEE ALSO"
-.BR bcfg2(1),
-.BR bcfg2-lint(8)
-.SH "BUGS"
-None currently known
+bcfg2(1), bcfg2\-lint(8)
diff --git a/man/bcfg2.1 b/man/bcfg2.1
index 661153a15..2aa219756 100644
--- a/man/bcfg2.1
+++ b/man/bcfg2.1
@@ -1,180 +1,175 @@
-.TH "bcfg2" 1
-.SH NAME
-bcfg2 \- reconfigure machine based on settings in Bcfg2
-.SH SYNOPSIS
-.B bcfg2
-.I [\-d] [\-v] [\-p] [\-c cache file] [\-e] [\-f config file] [\-I] [\-q] [\-z] [\-b bundle] [\-r removal mode] [\-\-ca\-cert=file] [\-\-ssl\-cns=list] [\-\-ssl\-cert=file] [\-\-ssl\-key=file]
-.SH DESCRIPTION
-.TP
-.BR bcfg2
-Runs the Bcfg2 configuration process on the current host. This process
-consists of first fetching and executing probes, uploading probe
-results, fetching the client configuration, checking the current
-client state, attempting to install the desired configuration, and
-finally uploading statistics about the Bcfg2 execution and client
-state.
-
-.SH OPTIONS
-.TP
-.BR "\-C <configfile>"
-Specify alternate bcfg2.conf location.
-
-.TP
-.BR "\-D <driver1>,<driver2>"
-Specify a set of Bcfg2 tool drivers. NOTE: only drivers listed will be
-loaded. (IE, if you don't include POSIX, you will be unable to
-verify/install ConfigFiles, etc).
-
-.TP
-.BR "\-E <encoding>"
-Specify the encoding of Cfg files.
-
-.TP
-.BR "\-I"
-Run bcfg2 in interactive mode. The user will be prompted before each
-change.
-
-.TP
-.BR "\-O"
-Omit lock check.
-
-.TP
-.BR "\-P"
-Run bcfg2 in paranoid mode. Diffs will be logged for
-configuration files marked as paranoid by the Bcfg2 server.
-
-.TP
-.BR "\-R <retry count>"
-Specify the number of times that the client will attempt to retry
-network communication.
-
-.TP
-.BR "\-S https://server:port"
-Manually specify the server location (as opposed to using the value in
-bcfg2.conf).
-
-.TP
-.BR "\-b <bundle1>:<bundle2>"
-Run bcfg2 against one or multiple bundles in the configuration.
-
-.TP
-.BR "\-c <cachefile>"
-Cache a copy of the configuration in cachefile.
-
-.TP
-.BR "\-\-ca\-cert=<ca cert>"
-Specifiy the path to the SSL CA certificate.
-
-.TP
-.BR "\-d"
-Run bcfg2 in debug mode.
-
-.TP
-.BR "\-e"
-When in verbose mode, display extra entry information (temporary until
-verbosity rework).
-
-.TP
-.BR "\-f <specification path>"
-Configure from a file rather than querying the server.
-
-.TP
-.BR "\-h"
-Print Usage information.
-
-.TP
-.BR "\-k"
-Run in bulletproof mode. This currently only affects behavior in the
-debian toolset; it calls apt\-get update and clean and
-dpkg \-\-configure \-\-pending.
-
-.TP
-.BR "\-l <whitelist|blacklist|none>"
-Run the client in the server decision list mode (unless "none" is
-specified, which can be done in order to override the decision list mode
-specified in bcfg2.conf). This approach is needed when particular
-changes are deemed "high risk". It gives the ability to centrally
-specify these changes, but only install them on clients when
-administrator supervision is available. Because collaborative
-configuration is one of the remaining hard issues in configuration
-management, these issues typically crop up in environments with several
-administrators and much configuration variety. (This setting will be
-ignored if the -f option is also specified.)
-
-.TP
-.BR "\-n"
-Run bcfg2 in dry\-run mode. No changes will be made to the
-system.
-
-.TP
-.BR "\-o <LogFile Path>"
-Writes a log to the specified path.
-
-.TP
-.BR "\-p <profile>"
-Assert a profile for the current client.
-
-.TP
-.BR "\-q"
-Run bcfg2 in quick mode. Package checksum verification won't be
-performed. This mode relaxes the constraints of correctness, and thus
-should only be used in safe conditions.
-
-.TP
-.BR "\-Q"
-Run bcfg2 in "bundle quick" mode, where only entries in a bundle are
-verified or installed. This runs much faster than -q, but doesn't provide
-statistics to the server at all. In order for this option to work, the
--b option must also be provided. This option is incompatible with -r.
-
-.TP
-.BR "\-r <mode>"
-Cause bcfg2 to remove extra configuration elements it detects. Mode is
-one of all, Services, or Packages. All removes all entries. Likewise,
-Services and Packages remove only the extra configuration elements of
-the respective type.
-
-.TP
-.BR "\-s <service mode>"
-Set bcfg2 interaction level for services. Default behavior is to
-modify all services affected by reconfiguration. build mode attempts
-to stop all services started. disabled suppresses all attempts to
-modify services.
-
-.TP
-.BR "\-\-ssl\-cert=<ssl cert>"
-Specifiy the path to the SSL certificate.
-
-.TP
-.BR "\-\-ssl\-cns=<CommonName1:CommonName2 ...>"
-List of acceptable SSL server Common Names.
-
-.TP
-.BR "\-\-ssl\-key=<ssl key>"
-Specifiy the path to the SSL key.
-
-.TP
-.BR "\-u <user>"
-Attempt to authenticate as 'user'.
-
-.TP
-.BR "\-x <password>"
-Use 'password' for client communication.
-
-.TP
-.BR "\-t <timeout>"
-Set the timeout (in seconds) for client communication. Default is 90
-seconds.
-
-.TP
-.BR "\-v"
-Run bcfg2 in verbose mode.
-
-.TP
-.BR "\-z"
-Only configure independent entries, ignore bundles.
-.RE
+.
+.TH "BCFG2" "1" "August 2012" "" ""
+.
+.SH "NAME"
+\fBbcfg2\fR \- Bcfg2 client tool
+.
+.SH "SYNOPSIS"
+\fBbcfg2\fR [\fIoptions\fR][\fI\.\.\.\fR]
+.
+.SH "DESCRIPTION"
+\fBbcfg2\fR runs the Bcfg2 configuration process on the current host\. This process consists of the following steps\.
+.
+.IP "\(bu" 4
+Fetch and execute probes
+.
+.IP "\(bu" 4
+Upload probe results
+.
+.IP "\(bu" 4
+Fetch the client configuration
+.
+.IP "\(bu" 4
+Check the current client state
+.
+.IP "\(bu" 4
+Attempt to install the desired configuration
+.
+.IP "\(bu" 4
+Upload statistics about the Bcfg2 execution and client state
+.
+.IP "" 0
+.
+.SH "OPTIONS"
+.
+.TP
+\fB\-B\fR
+Configure everything except the given bundle(s)\.
+.
+.TP
+\fB\-C\fR \fIconfigfile\fR
+Specify alternate bcfg2\.conf location\.
+.
+.TP
+\fB\-D\fR [\fIdriver1\fR,\fIdriver2\fR]
+Specify a set of Bcfg2 tool drivers\.
+.
+.IP
+\fINOTE: only drivers listed will be loaded\. (e\.g\., if you do not include POSIX, you will be unable to verify/install Path entries)\.\fR
+.
+.TP
+\fB\-E\fR \fIencoding\fR
+Specify the encoding of Cfg files\.
+.
+.TP
+\fB\-I\fR
+Run bcfg2 in interactive mode\. The user will be prompted before each change\.
+.
+.TP
+\fB\-O\fR
+Omit lock check\.
+.
+.TP
+\fB\-P\fR
+Run bcfg2 in paranoid mode\. Diffs will be logged for configuration files marked as paranoid by the Bcfg2 server\.
+.
+.TP
+\fB\-R\fR \fIretry count\fR
+Specify the number of times that the client will attempt to retry network communication\.
+.
+.TP
+\fB\-S\fR \fIhttps://server:port\fR
+Manually specify the server location (as opposed to using the value in bcfg2\.conf)\.
+.
+.TP
+\fB\-Z\fR
+Do not configure independent entries\.
+.
+.TP
+\fB\-b\fR [\fIbundle1:bundle2\fR]
+Run bcfg2 against one or multiple bundles in the configuration\.
+.
+.TP
+\fB\-c\fR \fIcachefile\fR
+Cache a copy of the configuration in cachefile\.
+.
+.TP
+\fB\-\-ca\-cert=\fR\fIca cert\fR
+Specifiy the path to the SSL CA certificate\.
+.
+.TP
+\fB\-d\fR
+Run bcfg2 in debug mode\.
+.
+.TP
+\fB\-e\fR
+When in verbose mode, display extra entry information (temporary until verbosity rework)\.
+.
+.TP
+\fB\-f\fR \fIspecification path\fR
+Configure from a file rather than querying the server\.
+.
+.TP
+\fB\-h\fR
+Print Usage information\.
+.
+.TP
+\fB\-k\fR
+Run in bulletproof mode\. This currently only affects behavior in the debian toolset; it calls apt\-get update and clean and dpkg \-\-configure \-\-pending\.
+.
+.TP
+\fB\-l\fR \fIwhitelist|blacklist|none\fR
+Run the client in the server decision list mode (unless "none" is specified, which can be done in order to override the decision list mode specified in bcfg2\.conf)\. This approach is needed when particular changes are deemed "high risk"\. It gives the ability to centrally specify these changes, but only install them on clients when administrator supervision is available\. Because collaborative configuration is one of the remaining hard issues in configuration management, these issues typically crop up in environments with several administrators and much configuration variety\. (This setting will be ignored if the \-f option is also specified)\.
+.
+.TP
+\fB\-n\fR
+Run bcfg2 in dry\-run mode\. No changes will be made to the system\.
+.
+.TP
+\fB\-o\fR \fIlogfile path\fR
+Writes a log to the specified path\.
+.
+.TP
+\fB\-p\fR \fIprofile\fR
+Assert a profile for the current client\.
+.
+.TP
+\fB\-q\fR
+Run bcfg2 in quick mode\. Package checksum verification won’t be performed\. This mode relaxes the constraints of correctness, and thus should only be used in safe conditions\.
+.
+.TP
+\fB\-Q\fR
+Run bcfg2 in "bundle quick" mode, where only entries in a bundle are verified or installed\. This runs much faster than \-q, but doesn’t provide statistics to the server at all\. In order for this option to work, the \-b option must also be provided\. This option is incompatible with \-r\.
+.
+.TP
+\fB\-r\fR \fImode\fR
+Cause bcfg2 to remove extra configuration elements it detects\. Mode is one of all, Services, or Packages\. All removes all entries\. Likewise, Services and Packages remove only the extra configuration elements of the respective type\.
+.
+.TP
+\fB\-s\fR \fIservice mode\fR
+Set bcfg2 interaction level for services\. Default behavior is to modify all services affected by reconfiguration\. build mode attempts to stop all services started\. disabled suppresses all attempts to modify services\.
+.
+.TP
+\fB\-\-ssl\-cert=\fR\fIssl cert\fR
+Specifiy the path to the SSL certificate\.
+.
+.TP
+\fB\-\-ssl\-cns=\fR[\fICN1:CN2\fR]
+List of acceptable SSL server Common Names\.
+.
+.TP
+\fB\-\-ssl\-key=\fR\fIssl key\fR
+Specifiy the path to the SSL key\.
+.
+.TP
+\fB\-u\fR \fIuser\fR
+Attempt to authenticate as ’user’\.
+.
+.TP
+\fB\-x\fR \fIpassword\fR
+Use ’password’ for client communication\.
+.
+.TP
+\fB\-t\fR \fItimeout\fR
+Set the timeout (in seconds) for client communication\. Default is 90 seconds\.
+.
+.TP
+\fB\-v\fR
+Run bcfg2 in verbose mode\.
+.
+.TP
+\fB\-z\fR
+Only configure independent entries, ignore bundles\.
+.
.SH "SEE ALSO"
-.BR bcfg2-server(8),
-.BR bcfg2-info(8)
-.SH "BUGS"
+bcfg2\-server(8), bcfg2\-info(8)
diff --git a/man/bcfg2.conf.5 b/man/bcfg2.conf.5
index 684586892..b62d223c9 100644
--- a/man/bcfg2.conf.5
+++ b/man/bcfg2.conf.5
@@ -1,447 +1,392 @@
-.TH bcfg2.conf 5
-
-.SH NAME
-bcfg2.conf - configuration parameters for Bcfg2
-
-.SH DESCRIPTION
-.TP
-bcfg2.conf includes configuration parameters for the Bcfg2 server and
-client.
-
-.SH FILE FORMAT
-The file is INI-style and consists of sections and options. A section
-begins with the name of the sections in square brackets and continues
-until the next section begins.
-
-Options are specified in the form 'name = value'.
-
-The file is line-based each newline-terminated line represents either
-a comment, a section name or an option.
-
-Any line beginning with a hash (#) is ignored, as are lines containing
-only whitespace.
-
-
-.SH SERVER OPTIONS
-These options are only necessary on the Bcfg2 server. They are
-specified in the [server] section of the configuration file.
-
-.TP
-.B repository
-Specifies the path to the Bcfg2 repository containing all of the
-configuration specifications. The repository should be created
-using the 'bcfg2-admin init' command.
-
-.TP
-.B filemonitor
-The file monitor used to watch for changes in the repository.
-Values of 'gamin', 'fam', or 'pseudo' are valid.
-
-.TP
-.B listen_all
-This setting tells the server to listen on all available interfaces. The
-default is to only listen on those interfaces specified by the bcfg2
-setting in the components section of bcfg2.conf.
-
-.TP
-.B plugins
-A comma-delimited list of enabled server plugins. Currently available
-plugins are:
-
-\(bu
-.B Account
-The account plugin manages authentication data, including:
-
- * /etc/passwd
- * /etc/group
- * /etc/security/limits.conf
- * /etc/sudoers
- * /root/.ssh/authorized_keys
-
-\(bu
-.B Actions
-
-Action entries are commands that are executed either before bundle
-installation, after bundle installation or both. If exit status is
-observed, a failing pre-action will cause no modification of the
-enclosing bundle to be performed; all entries included in that bundle
-will not be modified. Failing actions are reported through Bcfg2's
-reporting system, so they can be centrally observed.
-
-\(bu
-.B BB
-The BB plugin maps users to machines and metadata to machines.
-(experimental)
-
-\(bu
-.B Base
-A structure plugin that provides the ability to add lists of unrelated
-entries into client configuration entry inventories. Base works much
-like Bundler in its file format. This structure plugin is good for
-the pile of independent configs needed for most actual systems.
-
-\(bu
-.B Bundler
-Bundler is used to describe groups of inter-dependent configuration
-entries, such as the combination of packages, configuration files,
-and service activations that comprise typical Unix daemons. Bundles
-are used to add groups of configuration entries to the inventory of
-client configurations, as opposed to describing particular versions
-of those
-entries.
-
-\(bu
-.B Bzr
-The Bzr plugin allows you to track changes to your Bcfg2 repository
-using a GNU Bazaar version control backend. Currently, it enables
-you to get revision information out of your repository for reporting
-purposes.
-
-\(bu
-.B Cfg
-The Cfg plugin provides a repository to describe configuration file
-contents for clients. In its simplest form, the Cfg repository is
-just a directory tree modeled off of the directory tree on your client
-machines.
-
-\(bu
-.B Cvs
-The Cvs plugin allows you to track changes to your Bcfg2 repository
-using a Concurrent version control backend. Currently, it enables you
-to get revision information out of your repository for reporting
-purposes. (experimental)
-
-\(bu
-.B Darcs
-The Darcs plugin allows you to track changes to your Bcfg2 repository
-using a Darcs version control backend. Currently, it enables you to
-get revision information out of your repository for reporting purposes.
-(experimental)
-
-\(bu
-.B DBStats
-Direct to database statistics plugin. (0.9.6 and later)
-
-\(bu
-.B Decisions
-The Decisions plugin has support for a centralized set of per-entry
-installation decisions. This approach is needed when particular
-changes are deemed "high risk"; this gives the ability to centrally
-specify these changes, but only install them on clients when
-administrator supervision is available. (0.9.6 and later)
-
-\(bu
-.B Deps
-The Deps plugin allows you to make a series of assertions like
-"Package X requires Package Y (and optionally also Package Z etc.)
-
-\(bu
-.B Editor
-The Editor plugin allows you to partially manage configuration for
-a file. Its use is not recommended and not well documented.
-
-\(bu
-.B Fossil
-The Fossil plugin allows you to track changes to your Bcfg2 repository
-using a Fossil SCM version control backend. Currently, it enables
-you to get revision information out of your repository for reporting
-purposes.
-
-\(bu
-.B Git
-The Git plugin allows you to track changes to your Bcfg2 repository
-using a Git version control backend. Currently, it enables you to
-get revision information out of your repository for reporting purposes.
-
-\(bu
-.B GroupPatterns
-The GroupPatterns plugin is a connector that can assign clients group
-membership based on patterns in client hostnames.
-
-\(bu
-.B Hg
-The Hg plugin allows you to track changes to your Bcfg2 repository
-using a Mercurial version control backend. Currently, it enables you
-to get revision information out of your repository for reporting
-purposes. (experimental)
-
-\(bu
-.B Hostbase
-The Hostbase plugin is an IP management system built on top of Bcfg2.
-
-\(bu
-.B Metadata
-The Metadata plugin is the primary method of specifying Bcfg2 server
-metadata.
-
-\(bu
-.B NagiosGen
-NagiosGen is a Bcfg2 plugin that dynamically generates Nagios
-configuration files based on Bcfg2 data.
-
-\(bu
-.B Ohai
-The Ohai plugin is used to detect information about the client
-operating system. The data is reported back to the server using
-JSON. (experimental)
-
-\(bu
-.B POSIXCompat
-The POSIXCompat plugin provides a compatibility layer which turns
-new-style (1.0) POSIX entries into old-style entries which are
-compatible with previous releases.
-
-\(bu
-.B Packages
-The Packages plugin is an alternative to Pkgmgr for specifying
-package entries for clients. Where Pkgmgr explicitly specifies
-package entry information, Packages delegates control of package
-version information to the underlying package manager, installing
-the latest version available from through those channels.
-
-\(bu
-.B Pkgmgr
-The Pkgmgr plugin resolves the Abstract Configuration Entity
-"Package" to a package specification that the client can use to
-detect, verify and install the specified package.
-
-\(bu
-.B Probes
-The Probes plugin gives you the ability to gather information from a
-client machine before you generate its configuration. This information
-can be used with the various templating systems to generate
-configuration based on the results.
-
-\(bu
-.B Properties
-The Properties plugin is a connector plugin that adds information
-from properties files into client metadata instances. (1.0 and later)
-
-\(bu
-.B Rules
-The Rules plugin resolves Abstract Configuration Entities to literal
-configuration entries suitable for the client drivers to consume.
-
-\(bu
-.B SGenshi (Deprecated)
-See Bundler.
-
-\(bu
-.B Snapshots
-The Snapshots plugin stores various aspects of a client's state when
-the client checks in to the server.
-
-\(bu
-.B SSHbase
-The SSHbase generator plugin manages ssh host keys (both v1 and v2)
-for hosts. It also manages the ssh_known_hosts file. It can integrate
-host keys from other management domains and similarly export its keys.
-
-\(bu
-.B Svn
-The Svn plugin allows you to track changes to your Bcfg2 repository
-using a Subversion backend. Currently, it enables you to get revision
-information out of your repository for reporting purposes.
-
-\(bu
-.B TCheetah
-The TCheetah plugin allows you to use the cheetah templating system
-to create files. It also allows you to include the results of probes
-executed on the client in the created files.
-
-\(bu
-.B TGenshi
-The TGenshi plugin allows you to use the Genshi templating system to
-create files. It also allows you to include the results of probes
-executed on the client in the created files.
-
-\(bu
-.B Trigger
-Trigger is a plugin that calls external scripts when clients are
-configured.
-
-.TP
-.B prefix
-Specifies a prefix if the Bcfg2 installation isn't placed in the
-default location (eg. /usr/local).
-
-.SH MDATA OPTIONS
-These options affect the default metadata settings for Paths with
-type='file'.
-
-.TP
-.B owner
+.
+.TH "BCFG2\.CONF" "5" "August 2012" "" ""
+.
+.SH "NAME"
+\fBbcfg2\.conf\fR \- configuration parameters for Bcfg2
+.
+.SH "DESCRIPTION"
+\fBbcfg2\.conf\fR includes configuration parameters for the Bcfg2 server and client\.
+.
+.SH "FILE FORMAT"
+The file is INI\-style and consists of sections and options\. A section begins with the name of the sections in square brackets and continues until the next section begins\.
+.
+.P
+Options are specified in the form "name=value"\.
+.
+.P
+The file is line\-based each newline\-terminated line represents either a comment, a section name or an option\.
+.
+.P
+Any line beginning with a hash (#) is ignored, as are lines containing only whitespace\.
+.
+.SH "SERVER OPTIONS"
+These options are only necessary on the Bcfg2 server\. They are specified in the \fB[server]\fR section of the configuration file\.
+.
+.TP
+\fBrepository\fR
+Specifies the path to the Bcfg2 repository containing all of the configuration specifications\. The repository should be created using the \fBbcfg2\-admin init\fR command\.
+.
+.TP
+\fBfilemonitor\fR
+The file monitor used to watch for changes in the repository\. The default is the best available monitor\. The following values are valid:
+.
+.IP
+\fBinotify\fR, \fBgamin\fR, \fBfam\fR, \fBpseudo\fR
+.
+.TP
+\fBignore_files\fR
+A comma\-separated list of globs that should be ignored by the file monitor\. Default values are:
+.
+.IP
+\fB*~\fR, \fB*#\fR, \fB\.#*\fR, \fB*\.swp\fR, \fB\.*\.swx\fR, \fBSCCS\fR, \fB\.svn\fR, \fB4913\fR, \fB\.gitignore\fR
+.
+.TP
+\fBlisten_all\fR
+This setting tells the server to listen on all available interfaces\. The default is to only listen on those interfaces specified by the bcfg2 setting in the components section of \fBbcfg2\.conf\fR\.
+.
+.TP
+\fBplugins\fR
+A comma\-delimited list of enabled server plugins\. Currently available plugins are:
+.
+.IP
+\fBAccount\fR, \fBActions\fR, \fBBase\fR, \fBBundler\fR, \fBBzr\fR, \fBCfg\fR, \fBCvs\fR, \fBDarcs\fR, \fBDBStats\fR, \fBDecisions\fR, \fBDeps\fR, \fBEditor\fR, \fBFossil\fR, \fBGit\fR, \fBGroupPatterns\fR, \fBHg\fR, \fBHostbase\fR, \fBMetadata\fR, \fBNagiosGen\fR, \fBOhai\fR, \fBPackages\fR, \fBPkgmgr\fR, \fBProbes\fR, \fBProperties\fR, \fBRules\fR, \fBSnapshots\fR, \fBSSHbase\fR, \fBSvn\fR, \fBSvn2\fR, \fBTCheetah\fR, \fBTGenshi\fR, \fBTrigger\fR
+.
+.IP
+Descriptions of each plugin can be found in their respective sections below\.
+.
+.TP
+\fBprefix\fR
+Specifies a prefix if the Bcfg2 installation isn’t placed in the default location (e\.g\. /usr/local)\.
+.
+.SS "Account Plugin"
+The account plugin manages authentication data, including the following\.
+.
+.IP "\(bu" 4
+\fB/etc/passwd\fR
+.
+.IP "\(bu" 4
+\fB/etc/group\fR
+.
+.IP "\(bu" 4
+\fB/etc/security/limits\.conf\fR
+.
+.IP "\(bu" 4
+\fB/etc/sudoers\fR
+.
+.IP "\(bu" 4
+\fB/root/\.ssh/authorized_keys\fR
+.
+.IP "" 0
+.
+.SS "Base Plugin"
+A structure plugin that provides the ability to add lists of unrelated entries into client configuration entry inventories\. Base works much like Bundler in its file format\. This structure plugin is good for the pile of independent configs needed for most actual systems\.
+.
+.SS "Bundler Plugin"
+Bundler is used to describe groups of inter\-dependent configuration entries, such as the combination of packages, configuration files, and service activations that comprise typical Unix daemons\. Bundles are used to add groups of configuration entries to the inventory of client configurations, as opposed to describing particular versions of those entries\.
+.
+.SS "Bzr Plugin"
+The Bzr plugin allows you to track changes to your Bcfg2 repository using a GNU Bazaar version control backend\. Currently, it enables you to get revision information out of your repository for reporting purposes\.
+.
+.SS "Cfg Plugin"
+The Cfg plugin provides a repository to describe configuration file contents for clients\. In its simplest form, the Cfg repository is just a directory tree modeled off of the directory tree on your client machines\.
+.
+.SS "Cvs Plugin (experimental)"
+The Cvs plugin allows you to track changes to your Bcfg2 repository using a Concurrent version control backend\. Currently, it enables you to get revision information out of your repository for reporting purposes\.
+.
+.SS "Darcs Plugin (experimental)"
+The Darcs plugin allows you to track changes to your Bcfg2 repository using a Darcs version control backend\. Currently, it enables you to get revision information out of your repository for reporting purposes\.
+.
+.SS "DBStats Plugin"
+Direct to database statistics plugin\.
+.
+.SS "Decisions Plugin"
+The Decisions plugin has support for a centralized set of per\-entry installation decisions\. This approach is needed when particular changes are deemed "\fIhigh risk\fR"; this gives the ability to centrally specify these changes, but only install them on clients when administrator supervision is available\.
+.
+.SS "Deps Plugin"
+The Deps plugin allows you to make a series of assertions like "Package X requires Package Y (and optionally also Package Z etc\.)"
+.
+.SS "Editor Plugin"
+The Editor plugin attempts to allow you to partially manage configuration for a file\. Its use is not recommended and not well documented\.
+.
+.SS "Fossil Plugin"
+The Fossil plugin allows you to track changes to your Bcfg2 repository using a Fossil SCM version control backend\. Currently, it enables you to get revision information out of your repository for reporting purposes\.
+.
+.SS "Git Plugin"
+The Git plugin allows you to track changes to your Bcfg2 repository using a Git version control backend\. Currently, it enables you to get revision information out of your repository for reporting purposes\.
+.
+.SS "GroupPatterns Plugin"
+The GroupPatterns plugin is a connector that can assign clients group membership based on patterns in client hostnames\.
+.
+.SS "Hg Plugin (experimental)"
+The Hg plugin allows you to track changes to your Bcfg2 repository using a Mercurial version control backend\. Currently, it enables you to get revision information out of your repository for reporting purposes\.
+.
+.SS "Hostbase Plugin"
+The Hostbase plugin is an IP management system built on top of Bcfg2\.
+.
+.SS "Metadata Plugin"
+The Metadata plugin is the primary method of specifying Bcfg2 server metadata\.
+.
+.SS "NagiosGen Plugin"
+NagiosGen is a Bcfg2 plugin that dynamically generates Nagios configuration files based on Bcfg2 data\.
+.
+.SS "Ohai Plugin (experimental)"
+The Ohai plugin is used to detect information about the client operating system\. The data is reported back to the server using JSON\.
+.
+.SS "Packages Plugin"
+The Packages plugin is an alternative to Pkgmgr for specifying package entries for clients\. Where Pkgmgr explicitly specifies package entry information, Packages delegates control of package version information to the underlying package manager, installing the latest version available from through those channels\.
+.
+.SS "Pkgmgr Plugin"
+The Pkgmgr plugin resolves the Abstract Configuration Entity "Package" to a package specification that the client can use to detect, verify and install the specified package\.
+.
+.SS "Probes Plugin"
+The Probes plugin gives you the ability to gather information from a client machine before you generate its configuration\. This information can be used with the various templating systems to generate configuration based on the results\.
+.
+.SS "Properties Plugin"
+The Properties plugin is a connector plugin that adds information from properties files into client metadata instances\.
+.
+.SS "Rules Plugin"
+The Rules plugin provides literal configuration entries that resolve the abstract configuration entries normally found in the Bundler and Base plugins\. The literal entries in Rules are suitable for consumption by the appropriate client drivers\.
+.
+.SS "Snapshots Plugin"
+The Snapshots plugin stores various aspects of a client’s state when the client checks in to the server\.
+.
+.SS "SSHbase Plugin"
+The SSHbase generator plugin manages ssh host keys (both v1 and v2) for hosts\. It also manages the ssh_known_hosts file\. It can integrate host keys from other management domains and similarly export its keys\.
+.
+.SS "Svn Plugin"
+The Svn plugin allows you to track changes to your Bcfg2 repository using a Subversion backend\. Currently, it enables you to get revision information out of your repository for reporting purposes\.
+.
+.SS "Svn2 Plugin"
+The Svn2 plugin extends on the capabilities in the Svn plugin\. It provides Update and Commit methods which provide hooks for modifying subversion\-backed Bcfg2 repositories\.
+.
+.SS "TCheetah Plugin"
+The TCheetah plugin allows you to use the cheetah templating system to create files\. It also allows you to include the results of probes executed on the client in the created files\.
+.
+.SS "TGenshi Plugin"
+The TGenshi plugin allows you to use the Genshi templating system to create files\. It also allows you to include the results of probes executed on the client in the created files\.
+.
+.SS "Trigger Plugin"
+The Trigger plugin provides a method for calling external scripts when clients are configured\.
+.
+.SH "CLIENT OPTIONS"
+These options only affect client functionality, specified in the \fB[client]\fR section\.
+.
+.TP
+\fBdecision\fR
+Specify the server decision list mode (whitelist or blacklist)\. (This settiing will be ignored if the client is called with the \-f option\.)
+.
+.TP
+\fBdrivers\fR
+Specify tool driver set to use\. This option can be used to explicitly specify the client tool drivers you want to use when the client is run\.
+.
+.TP
+\fBparanoid\fR
+Run the client in paranoid mode\.
+.
+.SH "COMMUNICATION OPTIONS"
+Specified in the \fB[communication]\fR section\. These options define settings used for client\-server communication\.
+.
+.TP
+\fBca\fR
+The path to a file containing the CA certificate\. This file is required on the server, and optional on clients\. However, if the cacert is not present on clients, the server cannot be verified\.
+.
+.TP
+\fBcertificate\fR
+The path to a file containing a PEM formatted certificate which signs the key with the ca certificate\. This setting is required on the server in all cases, and required on clients if using client certificates\.
+.
+.TP
+\fBkey\fR
+Specifies the path to a file containing the SSL Key\. This is required on the server in all cases, and required on clients if using client certificates\.
+.
+.TP
+\fBpassword\fR
+Required on both the server and clients\. On the server, sets the password clients need to use to communicate\. On a client, sets the password to use to connect to the server\.
+.
+.TP
+\fBprotocol\fR
+Communication protocol to use\. Defaults to xmlrpc/ssl\.
+.
+.TP
+\fBretries\fR
+A client\-only option\. Number of times to retry network communication\.
+.
+.TP
+\fBserverCommonNames\fR
+A client\-only option\. A colon\-separated list of Common Names the client will accept in the SSL certificate presented by the server\.
+.
+.TP
+\fBuser\fR
+A client\-only option\. The UUID of the client\.
+.
+.SH "COMPONENT OPTIONS"
+Specified in the \fB[components]\fR section\.
+.
+.TP
+\fBbcfg2\fR
+URL of the server\. On the server this specifies which interface and port the server listens on\. On the client, this specifies where the client will attempt to contact the server\.
+.
+.IP
+e\.g\. \fBbcfg2 = https://10\.3\.1\.6:6789\fR
+.
+.TP
+\fBencoding\fR
+Text encoding of configuration files\. Defaults to UTF\-8\.
+.
+.SH "LOGGING OPTIONS"
+Specified in the \fB[logging]\fR section\. These options control the server logging functionality\.
+.
+.TP
+\fBpath\fR
+Server log file path\.
+.
+.SH "MDATA OPTIONS"
+These options affect the default metadata settings for Paths with type=’file’\.
+.
+.TP
+\fBowner\fR
Global owner for Paths (defaults to root)
-
+.
.TP
-.B group
+\fBgroup\fR
Global group for Paths (defaults to root)
-
+.
.TP
-.B perms
+\fBperms\fR
Global permissions for Paths (defaults to 644)
-
+.
.TP
-.B paranoid
+\fBparanoid\fR
Global paranoid settings for Paths (defaults to false)
-
+.
.TP
-.B sensitive
+\fBsensitive\fR
Global sensitive settings for Paths (defaults to false)
-
-
-.SH CLIENT OPTIONS
-These options only affect client functionality, specified in the
-[client] section.
-
-.TP
-.B decision
-Specify the server decision list mode (whitelist or blacklist). (This
-setting will be ignored if the client is called with the -f option.)
-
-.TP
-.B drivers
-Specify tool driver set to use. This option can be used to explicitly
-specify the client tool drivers you want to use when the client is run.
-
-.TP
-.B paranoid
-Run the client in paranoid mode.
-
-
-.SH STATISTICS OPTIONS
-Server-only, specified in the [statistics] section. These options
-control the statistics collection functionality of the server.
-
-.TP
-.B database_engine
-The database engine used by the statistics module. One of either
-\[oq]postgresql\[cq], \[oq]mysql\[cq], \[oq]sqlite3\[cq], or
-\[oq]ado_mssql\[cq].
-
-.TP
-.B database_name
-The name of the database to use for statistics data. If
-\[oq]database_engine\[cq] is set to \[oq]sqlite3\[cq] this is a file
-path to sqlite file and defaults to $REPOSITORY_DIR/etc/brpt.sqlite
-
-.TP
-.B database_user
-User for database connections. Not used for sqlite3.
-
-.TP
-.B database_password
-Password for database connections. Not used for sqlite3.
-
-.TP
-.B database_host
-Host for database connections. Not used for sqlite3.
-
-.TP
-.B database_port
-Port for database connections. Not used for sqlite3.
-
-.TP
-.B time_zone
-Specify a time zone other than that used on the system. (Note that this
-will cause the bcfg2 server to log messages in this time zone as well).
-
-
-.SH COMMUNICATION OPTIONS
-Specified in the [communication] section. These options define
-settings used for client-server communication.
-
-.TP
-.B ca
-The path to a file containing the CA certificate. This file is
-required on the server, and optional on clients. However, if the
-cacert is not present on clients, the server cannot be verified.
-
-.TP
-.B certificate
-The path to a file containing a PEM formatted certificate which
-signs the key with the ca certificate. This setting is required on
-the server in all cases, and required on clients if using client
-certificates.
-
-.TP
-.B key
-Specifies the path to a file containing the SSL Key. This is required
-on the server in all cases, and required on clients if using client
-certificates.
-
-.TP
-.B password
-Required on both the server and clients. On the server, sets the
-password clients need to use to communicate. On a client, sets the
-password to use to connect to the server.
-
-.TP
-.B protocol
-Communication protocol to use. Defaults to xmlrpc/ssl.
-
-.TP
-.B retries
-A client-only option. Number of times to retry network communication.
-
-.TP
-.B serverCommonNames
-A client-only option. A colon-separated list of Common Names the client
-will accept in the SSL certificate presented by the server.
-
-.TP
-.B user
-A client-only option. The UUID of the client.
-
-.SH PARANOID OPTIONS
-These options allow for finer-grained control of the paranoid mode
-on the Bcfg2 client. They are specified in the [paranoid] section
-of the configuration file.
-
-.TP
-.B path
-Custom path for backups created in paranoid mode. The default is in
-/var/cache/bcfg2.
-
-.TP
-.B max_copies
-Specify a maximum number of copies for the server to keep when running
-in paranoid mode. Only the most recent versions of these copies will
-be kept.
-
-.SH COMPONENT OPTIONS
-Specified in the [components] section.
-
-.TP
-.B bcfg2
-URL of the server. On the server this specifies which interface and
-port the server listens on. On the client, this specifies where the
-client will attempt to contact the server.
-eg: bcfg2 = https://10.3.1.6:6789
-
-.TP
-.B encoding
-Text encoding of configuration files. Defaults to UTF-8.
-
-.SH LOGGING OPTIONS
-Specified in the [logging] section. These options control the server
-logging functionality.
-
-.B path
-Server log file path.
-
-.SH SNAPSHOTS OPTIONS
-Specified in the [snapshots] section. These options control the server
-snapshots functionality.
-
-.B driver
+.
+.SH "PACKAGES OPTIONS"
+The following options are specified in the \fB[packages]\fR section of the configuration file\.
+.
+.TP
+\fBresolver\fR
+Enable dependency resolution\. Default is 1 (true)\.
+.
+.TP
+\fBmetadata\fR
+Enable metadata processing\. Default is 1 (true)\. If metadata is disabled, it’s implied that resolver is also disabled\.
+.
+.TP
+\fByum_config\fR
+The path at which to generate Yum configs\. No default\.
+.
+.TP
+\fBapt_config\fR
+The path at which to generate APT configs\. No default\.
+.
+.TP
+\fBgpg_keypath\fR
+The path on the client where RPM GPG keys will be copied before they are imported on the client\. Default is \fB/etc/pki/rpm\-gpg\fR\.
+.
+.TP
+\fBversion\fR
+Set the version attribute used when binding Packages\. Default is auto\.
+.
+.P
+The following options are specified in the \fB[packages:yum]\fR section of the configuration file\.
+.
+.TP
+\fBuse_yum_libraries\fR
+By default, Bcfg2 uses an internal implementation of Yum’s dependency resolution and other routines so that the Bcfg2 server can be run on a host that does not support Yum itself\. If you run the Bcfg2 server on a machine that does have Yum libraries, however, you can enable use of those native libraries in Bcfg2 by setting this to 1\.
+.
+.TP
+\fBhelper\fR
+Path to bcfg2\-yum\-helper\. By default, Bcfg2 looks first in $PATH and then in \fB/usr/sbin/bcfg2\-yum\-helper\fR for the helper\.
+.
+.P
+All other options in the \fB[packages:yum]\fR section will be passed along verbatim to the Yum configuration if you are using the native Yum library support\.
+.
+.P
+The following options are specified in the \fB[packages:pulp]\fR section of the configuration file\.
+.
+.TP
+\fBusername\fR
+The username of a Pulp user that will be used to register new clients and bind them to repositories\.
+.
+.TP
+\fBpassword\fR
+The password of a Pulp user that will be used to register new clients and bind them to repositories\.
+.
+.SH "PARANOID OPTIONS"
+These options allow for finer\-grained control of the paranoid mode on the Bcfg2 client\. They are specified in the \fB[paranoid]\fR section of the configuration file\.
+.
+.TP
+\fBpath\fR
+Custom path for backups created in paranoid mode\. The default is in \fB/var/cache/bcfg2\fR\.
+.
+.TP
+\fBmax_copies\fR
+Specify a maximum number of copies for the server to keep when running in paranoid mode\. Only the most recent versions of these copies will be kept\.
+.
+.SH "SNAPSHOTS OPTIONS"
+Specified in the \fB[snapshots]\fR section\. These options control the server snapshots functionality\.
+.
+.TP
+\fBdriver\fR
sqlite
-
-.B database
-The name of the database to use for statistics data.
-eg: $REPOSITORY_DIR/etc/bcfg2.sqlite
-
-.SH SEE ALSO
-.BR bcfg2(1),
-.BR bcfg2-server(8)
-
+.
+.TP
+\fBdatabase\fR
+The name of the database to use for statistics data\.
+.
+.IP
+eg: \fB$REPOSITORY_DIR/etc/bcfg2\.sqlite\fR
+.
+.SH "SSLCA OPTIONS"
+These options are necessary to configure the SSLCA plugin and can be found in the \fB[sslca_default]\fR section of the configuration file\.
+.
+.TP
+\fBconfig\fR
+Specifies the location of the openssl configuration file for your CA\.
+.
+.TP
+\fBpassphrase\fR
+Specifies the passphrase for the CA’s private key (if necessary)\. If no passphrase exists, it is assumed that the private key is stored unencrypted\.
+.
+.TP
+\fBchaincert\fR
+Specifies the location of your ssl chaining certificate\. This is used when pre\-existing certifcate hostfiles are found, so that they can be validated and only regenerated if they no longer meet the specification\. If you’re using a self signing CA this would be the CA cert that you generated\.
+.
+.SH "STATISTICS OPTIONS"
+Server\-only, specified in the \fB[statistics]\fR section\. These options control the statistics collection functionality of the server\.
+.
+.TP
+\fBdatabase_engine\fR
+The database engine used by the statistics module\. One of the following:
+.
+.IP
+\fBpostgresql\fR, \fBmysql\fR, \fBsqlite3\fR, \fBado_mssql\fR
+.
+.TP
+\fBdatabase_name\fR
+The name of the database to use for statistics data\. If ‘database_engine’ is set to ‘sqlite3’ this is a file path to sqlite file and defaults to \fB$REPOSITORY_DIR/etc/brpt\.sqlite\fR\.
+.
+.TP
+\fBdatabase_user\fR
+User for database connections\. Not used for sqlite3\.
+.
+.TP
+\fBdatabase_password\fR
+Password for database connections\. Not used for sqlite3\.
+.
+.TP
+\fBdatabase_host\fR
+Host for database connections\. Not used for sqlite3\.
+.
+.TP
+\fBdatabase_port\fR
+Port for database connections\. Not used for sqlite3\.
+.
+.TP
+\fBtime_zone\fR
+Specify a time zone other than that used on the system\. (Note that this will cause the Bcfg2 server to log messages in this time zone as well)\.
+.
+.SH "SEE ALSO"
+bcfg2(1), bcfg2\-server(8)
diff --git a/misc/bcfg2.spec b/misc/bcfg2.spec
index 75c6090a0..62b94f827 100644
--- a/misc/bcfg2.spec
+++ b/misc/bcfg2.spec
@@ -1,4 +1,4 @@
-%define release 0.2
+%define release 0.1
%define __python python
%{!?py_ver: %define py_ver %(%{__python} -c 'import sys;print(sys.version[0:3])')}
%define pythonversion %{py_ver}
@@ -6,7 +6,7 @@
%{!?_initrddir: %define _initrddir %{_sysconfdir}/rc.d/init.d}
Name: bcfg2
-Version: 1.2.2
+Version: 1.2.3
Release: %{release}
Summary: Configuration management system
@@ -30,13 +30,14 @@ BuildArch: noarch
BuildRequires: python-devel
BuildRequires: python-lxml
%if 0%{?mandriva_version}
-# mandriva seems to behave differently than other distros and needs this explicitly.
+# mandriva seems to behave differently than other distros and needs
+# this explicitly.
BuildRequires: python-setuptools
%endif
%if 0%{?mandriva_version} == 201100
-# mandriva 2011 has multiple providers for libsane, so (at least when building on OBS)
-# one must be chosen explicitly:
-# "have choice for libsane.so.1 needed by python-imaging: libsane1 sane-backends-iscan"
+# mandriva 2011 has multiple providers for libsane, so (at least when
+# building on OBS) one must be chosen explicitly: "have choice for
+# libsane.so.1 needed by python-imaging: libsane1 sane-backends-iscan"
BuildRequires: libsane1
%endif
@@ -51,7 +52,12 @@ BuildRequires: python-sphinx10
BuildRequires: python-sphinx >= 0.6
%endif
-Requires: python-nose
+%if 0%{?fedora} >= 16
+# we require a sufficiently new cherrypy that it's really only
+# available in Fedora for now
+Requires: python-cherrypy >= 3.2.2
+%endif
+
Requires: python-lxml >= 0.9
%if 0%{?rhel_version}
# the debian init script needs redhat-lsb.
@@ -92,14 +98,14 @@ deployment strategies.
This package includes the Bcfg2 client software.
%package server
-Version: 1.2.2
+Version: 1.2.3
Summary: Bcfg2 Server
%if 0%{?suse_version}
Group: System/Management
%else
Group: System Tools
%endif
-Requires: bcfg2
+Requires: bcfg2 = %{version}
%if "%{py_ver}" < "2.6"
Requires: python-ssl
%endif
@@ -109,6 +115,7 @@ Requires: gamin-python
%endif
Requires: /usr/sbin/sendmail
Requires: /usr/bin/openssl
+Requires: python-nose
%description server
Bcfg2 helps system administrators produce a consistent, reproducible,
@@ -175,7 +182,7 @@ deployment strategies.
This package includes the Bcfg2 documentation.
%package web
-Version: 1.2.2
+Version: 1.2.3
Summary: Bcfg2 Web Reporting Interface
%if 0%{?suse_version}
Group: System/Management
@@ -241,6 +248,7 @@ rm -rf %{buildroot}
%{__install} -d %{buildroot}%{_sysconfdir}/cron.hourly
%{__install} -d %{buildroot}%{_prefix}/lib/bcfg2
mkdir -p %{buildroot}%{_defaultdocdir}/bcfg2-doc-%{version}
+mkdir -p %{buildroot}%{_defaultdocdir}/bcfg2-server-%{version}
%if 0%{?suse_version}
%{__install} -d %{buildroot}/var/adm/fillup-templates
%endif
@@ -260,8 +268,9 @@ ln -s %{_initrddir}/bcfg2 %{buildroot}%{_sbindir}/rcbcfg2
ln -s %{_initrddir}/bcfg2-server %{buildroot}%{_sbindir}/rcbcfg2-server
%endif
-mv build/sphinx/html/* %{buildroot}%{_defaultdocdir}/bcfg2-doc-%{version}
-mv build/dtd %{buildroot}%{_defaultdocdir}/bcfg2-doc-%{version}/
+cp -r tools/* %{buildroot}%{_defaultdocdir}/bcfg2-server-%{version}
+cp -r build/sphinx/html/* %{buildroot}%{_defaultdocdir}/bcfg2-doc-%{version}
+cp -r build/dtd %{buildroot}%{_defaultdocdir}/bcfg2-doc-%{version}/
%{__install} -d %{buildroot}%{apache_conf}/conf.d
%{__install} -m 644 misc/apache/bcfg2.conf %{buildroot}%{apache_conf}/conf.d/wsgi_bcfg2.conf
@@ -302,9 +311,7 @@ touch %{buildroot}%{_sysconfdir}/bcfg2.conf %{buildroot}%{_sysconfdir}/bcfg2-web
%dir %{python_sitelib}/Bcfg2
%{python_sitelib}/Bcfg2/Server
-%if "%{pythonversion}" >= "2.5"
%{python_sitelib}/*egg-info
-%endif
%dir %{_datadir}/bcfg2
%{_datadir}/bcfg2/Hostbase
@@ -313,8 +320,8 @@ touch %{buildroot}%{_sysconfdir}/bcfg2.conf %{buildroot}%{_sysconfdir}/bcfg2-web
%config(noreplace) %{_sysconfdir}/default/bcfg2-server
%{_sbindir}/bcfg2-admin
%{_sbindir}/bcfg2-build-reports
+%{_sbindir}/bcfg2-crypt
%{_sbindir}/bcfg2-info
-%{_sbindir}/bcfg2-ping-sweep
%{_sbindir}/bcfg2-lint
%{_sbindir}/bcfg2-repo-validate
%{_sbindir}/bcfg2-reports
@@ -331,6 +338,8 @@ touch %{buildroot}%{_sysconfdir}/bcfg2.conf %{buildroot}%{_sysconfdir}/bcfg2-web
%dir %{_prefix}/lib/bcfg2
%ghost %config(noreplace,missingok) %attr(0600,root,root) %{_sysconfdir}/bcfg2.conf
+%doc %{_defaultdocdir}/bcfg2-server-%{version}
+
%files doc
%defattr(-,root,root,-)
%doc %{_defaultdocdir}/bcfg2-doc-%{version}
@@ -385,6 +394,9 @@ fi
%endif
%changelog
+* Wed Aug 15 2012 Chris St. Pierre <chris.a.st.pierre@gmail.com> 1.2.3-0.1
+- Added tools/ as doc for bcfg2-server subpackage
+
* Sat Feb 18 2012 Christopher 'm4z' Holm <686f6c6d@googlemail.com> 1.2.1
- Added Fedora and Mandriva compatibilty (for Open Build Service).
- Added missing dependency redhat-lsb.
@@ -393,7 +405,7 @@ fi
- Added openSUSE compatibility.
- Various changes to satisfy rpmlint.
-* Thu Jan 27 2011 Chris St. Pierre <stpierreca@ornl.gov> 1.2.0pre1-0.0
+* Thu Jan 27 2011 Chris St. Pierre <chris.a.st.pierre@gmail.com> 1.2.0pre1-0.0
- Added -doc sub-package
* Mon Jun 21 2010 Fabian Affolter <fabian@bernewireless.net> - 1.1.0rc3-0.1
diff --git a/osx/Makefile b/osx/Makefile
index 72751ff32..d3a2d4b90 100644
--- a/osx/Makefile
+++ b/osx/Makefile
@@ -29,9 +29,9 @@ SITELIBDIR = /Library/Python/${PYVERSION}/site-packages
# an Info.plist file for packagemaker to look at for package creation
# and substitute the version strings. Major/Minor versions can only be
# integers (e.g. "1" and "00" for bcfg2 version 1.0.0.
-BCFGVER = 1.2.2
+BCFGVER = 1.2.3
MAJOR = 1
-MINOR = 22
+MINOR = 23
default: clean client
diff --git a/redhat/VERSION b/redhat/VERSION
index 23aa83906..0495c4a88 100644
--- a/redhat/VERSION
+++ b/redhat/VERSION
@@ -1 +1 @@
-1.2.2
+1.2.3
diff --git a/reports/reports.wsgi b/reports/reports.wsgi
index 232650485..235715854 100644
--- a/reports/reports.wsgi
+++ b/reports/reports.wsgi
@@ -1,4 +1,4 @@
import os
-os.environ['DJANGO_SETTINGS_MODULE'] = 'Bcfg2.Server.Reports.settings'
+os.environ['DJANGO_SETTINGS_MODULE'] = 'Bcfg2.settings'
import django.core.handlers.wsgi
application = django.core.handlers.wsgi.WSGIHandler()
diff --git a/reports/site_media/bcfg2_base.css b/reports/site_media/bcfg2_base.css
index d74c1b618..ae7e145f1 100644
--- a/reports/site_media/bcfg2_base.css
+++ b/reports/site_media/bcfg2_base.css
@@ -133,6 +133,16 @@ ul.menu-level2 {
color: #10324b;
text-decoration: none;
}
+/*
+ * Convenience for templating..
+ */
+.bad-lineitem {
+ background: #FF7777;
+}
+.bad-lineitem a {
+ color: #10324b;
+ text-decoration: none;
+}
.clean-lineitem {
background: #AAFFBB;
}
@@ -264,3 +274,9 @@ span.nav_bar_current {
border: 1px solid #98DBCC;
padding: 1px;
}
+#threshold_box {
+ border: 1px solid #98DBCC;
+ margin-top: 5px;
+ width: 640px;
+ padding: 5px;
+}
diff --git a/schemas/base.xsd b/schemas/base.xsd
index cca665b38..98682fdb5 100644
--- a/schemas/base.xsd
+++ b/schemas/base.xsd
@@ -10,6 +10,7 @@
<xsd:include schemaLocation="atom.xsd"/>
<xsd:include schemaLocation="pathentry.xsd"/>
<xsd:include schemaLocation="rules.xsd"/>
+ <xsd:include schemaLocation="types.xsd"/>
<xsd:group name='BaseEntries'>
<xsd:choice>
@@ -19,7 +20,7 @@
<xsd:element name='Path' type='PathEntry'/>
<xsd:element name='Service' type='StructureEntry'/>
<xsd:element name='BoundPackage' type='PackageType'/>
- <xsd:element name='BoundPath' type='BoundPathEntry'/>
+ <xsd:element name='BoundPath' type='PathType'/>
<xsd:element name='BoundService' type='ServiceType'/>
</xsd:choice>
</xsd:group>
diff --git a/schemas/bundle.xsd b/schemas/bundle.xsd
index 4e034ee3c..6306b6da4 100644
--- a/schemas/bundle.xsd
+++ b/schemas/bundle.xsd
@@ -16,10 +16,12 @@
<xsd:include schemaLocation="atom.xsd"/>
<xsd:include schemaLocation="pathentry.xsd"/>
<xsd:include schemaLocation="rules.xsd"/>
- <xsd:include schemaLocation="services.xsd"/>
+ <xsd:include schemaLocation="types.xsd"/>
+ <xsd:include schemaLocation="servicetype.xsd"/>
- <xsd:complexType name='GroupType'>
- <xsd:choice minOccurs='0' maxOccurs='unbounded'>
+ <xsd:group name="bundleElements">
+ <xsd:choice>
+ <xsd:group ref="py:genshiElements"/>
<xsd:element name='Package' type='PackageStructure'>
<xsd:annotation>
<xsd:documentation>
@@ -56,12 +58,20 @@
</xsd:documentation>
</xsd:annotation>
</xsd:element>
+ <xsd:element name='SELinux' type='SELinuxStructure'>
+ <xsd:annotation>
+ <xsd:documentation>
+ Abstract implementation of an SELinux entry. The
+ full specification will be included in Rules.
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:element>
<xsd:element name='PostInstall' type='StructureEntry'>
<xsd:annotation>
<xsd:documentation>
PostInstall entries are deprecated in favor of Action
- entries. Actions can do everything PostInstall entries can
- do and more.
+ entries. Actions can do everything PostInstall entries can
+ do and more.
</xsd:documentation>
</xsd:annotation>
</xsd:element>
@@ -72,7 +82,7 @@
</xsd:documentation>
</xsd:annotation>
</xsd:element>
- <xsd:element name='BoundPath' type='BoundPathEntry'>
+ <xsd:element name='BoundPath' type='PathType'>
<xsd:annotation>
<xsd:documentation>
Fully bound description of a filesystem path to be handled
@@ -94,6 +104,13 @@
</xsd:documentation>
</xsd:annotation>
</xsd:element>
+ <xsd:element name='BoundSELinux' type='SELinuxType'>
+ <xsd:annotation>
+ <xsd:documentation>
+ Fully bound description of an SELinux entry.
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:element>
<xsd:element name='Group' type='GroupType'>
<xsd:annotation>
<xsd:documentation>
@@ -107,7 +124,7 @@
<xsd:annotation>
<xsd:documentation>
Elements within Client tags only apply to the named client
- (or vice-versa; see #element_negate below)
+ (or vice-versa; see #element_negate below)
</xsd:documentation>
</xsd:annotation>
</xsd:element>
@@ -119,7 +136,12 @@
</xsd:documentation>
</xsd:annotation>
</xsd:element>
- <xsd:group ref="py:genshiElements"/>
+ </xsd:choice>
+ </xsd:group>
+
+ <xsd:complexType name='GroupType'>
+ <xsd:choice minOccurs='0' maxOccurs='unbounded'>
+ <xsd:group ref="bundleElements"/>
</xsd:choice>
<xsd:attribute type='xsd:string' name='name' use='required'>
<xsd:annotation>
@@ -140,96 +162,7 @@
<xsd:complexType name='BundleType'>
<xsd:choice minOccurs='0' maxOccurs='unbounded'>
- <xsd:element name='Package' type='PackageStructure'>
- <xsd:annotation>
- <xsd:documentation>
- Abstract implementation of a Package entry. The full
- specification will be generated by a plugin such as
- Packages.
- </xsd:documentation>
- </xsd:annotation>
- </xsd:element>
- <xsd:element name='Path' type='PathEntry'>
- <xsd:annotation>
- <xsd:documentation>
- Abstract implementation of a Path entry. The entry will
- either be handled by Cfg, TGenshi, or another
- DirectoryBacked plugin; or handled by Rules, in which case
- the full specification of this entry will be included in
- Rules.
- </xsd:documentation>
- </xsd:annotation>
- </xsd:element>
- <xsd:element name='Service' type='StructureEntry'>
- <xsd:annotation>
- <xsd:documentation>
- Abstract implementation of a Service entry. The full
- specification will be included in Rules.
- </xsd:documentation>
- </xsd:annotation>
- </xsd:element>
- <xsd:element name='Action' type='StructureEntry'>
- <xsd:annotation>
- <xsd:documentation>
- Abstract implementation of an Action entry. The full
- specification will be included in Rules.
- </xsd:documentation>
- </xsd:annotation>
- </xsd:element>
- <xsd:element name='BoundPackage' type='PackageType'>
- <xsd:annotation>
- <xsd:documentation>
- Fully bound description of a software package to be managed.
- </xsd:documentation>
- </xsd:annotation>
- </xsd:element>
- <xsd:element name='BoundPath' type='BoundPathEntry'>
- <xsd:annotation>
- <xsd:documentation>
- Fully bound description of a filesystem path to be handled
- by the POSIX driver.
- </xsd:documentation>
- </xsd:annotation>
- </xsd:element>
- <xsd:element name='BoundService' type='ServiceType'>
- <xsd:annotation>
- <xsd:documentation>
- Fully bound description of a system service to be managed.
- </xsd:documentation>
- </xsd:annotation>
- </xsd:element>
- <xsd:element name='BoundAction' type='ActionType'>
- <xsd:annotation>
- <xsd:documentation>
- Fully bound description of a command to be run.
- </xsd:documentation>
- </xsd:annotation>
- </xsd:element>
- <xsd:element name='Group' type='GroupType'>
- <xsd:annotation>
- <xsd:documentation>
- Elements within Group tags only apply to clients that are
- members of that group
- </xsd:documentation>
- </xsd:annotation>
- </xsd:element>
- <xsd:element name='Client' type='GroupType'>
- <xsd:annotation>
- <xsd:documentation>
- Elements within Client tags only apply to the named client
- (or vice-versa; see #element_negate below)
- </xsd:documentation>
- </xsd:annotation>
- </xsd:element>
- <xsd:element name='Bundle' type='BundleType'>
- <xsd:annotation>
- <xsd:documentation>
- Nesting Bundle tags is allowed in order to support
- XInclude within Bundles.
- </xsd:documentation>
- </xsd:annotation>
- </xsd:element>
- <xsd:group ref="py:genshiElements"/>
+ <xsd:group ref="bundleElements"/>
</xsd:choice>
<xsd:attribute type='xsd:string' name='description' />
<xsd:attribute type='xsd:string' name='name'/>
diff --git a/schemas/clients.xsd b/schemas/clients.xsd
index 56f458a45..3b98c5fc3 100644
--- a/schemas/clients.xsd
+++ b/schemas/clients.xsd
@@ -26,9 +26,11 @@
<xsd:attribute type='xsd:string' name='uuid'/>
<xsd:attribute type='xsd:string' name='password'/>
<xsd:attribute type='xsd:string' name='location'/>
- <xsd:attribute type='xsd:string' name='secure'/>
+ <xsd:attribute type='xsd:boolean' name='floating'/>
+ <xsd:attribute type='xsd:boolean' name='secure'/>
<xsd:attribute type='xsd:string' name='pingtime' use='optional'/>
<xsd:attribute type='xsd:string' name='address'/>
+ <xsd:attribute type='xsd:string' name='version'/>
</xsd:complexType>
<xsd:complexType name='ClientsType'>
diff --git a/schemas/defaults.xsd b/schemas/defaults.xsd
index c7e2edc7e..17ae84366 100644
--- a/schemas/defaults.xsd
+++ b/schemas/defaults.xsd
@@ -11,33 +11,6 @@
<xsd:include schemaLocation="types.xsd"/>
<xsd:include schemaLocation="pkgtype.xsd"/>
- <xsd:complexType name="ActionType">
- <xsd:attribute type="ActionTimingEnum" name="timing"/>
- <xsd:attribute type="ActionWhenEnum" name="when"/>
- <xsd:attribute type="ActionStatusEnum" name="status"/>
- <xsd:attribute type="xsd:boolean" name="build"/>
- <xsd:attribute type="xsd:string" name="name" use="required"/>
- <xsd:attribute type="xsd:string" name="command"/>
- </xsd:complexType>
-
- <xsd:complexType name="PathType">
- <xsd:attribute type="PathTypeEnum" name="type"/>
- <xsd:attribute type="xsd:string" name="name" use="required"/>
- <xsd:attribute type="xsd:string" name="dev_type"/>
- <xsd:attribute type="xsd:string" name="major"/>
- <xsd:attribute type="xsd:string" name="minor"/>
- <xsd:attribute type="xsd:string" name="mode"/>
- <xsd:attribute type="xsd:string" name="perms"/>
- <xsd:attribute type="xsd:string" name="owner"/>
- <xsd:attribute type="xsd:string" name="group"/>
- <xsd:attribute type="xsd:string" name="recursive"/>
- <xsd:attribute type="xsd:string" name="prune"/>
- <xsd:attribute type="xsd:string" name="to"/>
- <xsd:attribute type="xsd:string" name="vcstype"/>
- <xsd:attribute type="xsd:string" name="revision"/>
- <xsd:attribute type="xsd:string" name="sourceurl"/>
- </xsd:complexType>
-
<xsd:complexType name="DContainerType">
<xsd:choice minOccurs="0" maxOccurs="unbounded">
<xsd:element name="Service" type="ServiceType"/>
diff --git a/schemas/info.xsd b/schemas/info.xsd
index 37232ab23..862b758b2 100644
--- a/schemas/info.xsd
+++ b/schemas/info.xsd
@@ -1,5 +1,5 @@
<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema" xml:lang="en">
-
+
<xsd:annotation>
<xsd:documentation>
info.xml schema for bcfg2
@@ -7,13 +7,18 @@
</xsd:annotation>
<xsd:include schemaLocation="atom.xsd"/>
+ <xsd:include schemaLocation="types.xsd"/>
<xsd:complexType name='InfoType'>
+ <xsd:choice minOccurs='0' maxOccurs='unbounded'>
+ <xsd:element name='ACL' type='ACLType'/>
+ </xsd:choice>
<xsd:attribute name='encoding' type='xsd:string'/>
<xsd:attribute name='group' type='xsd:string'/>
<xsd:attribute name='important' type='xsd:string'/>
<xsd:attribute name='owner' type='xsd:string'/>
<xsd:attribute name='perms' type='xsd:string'/>
+ <xsd:attribute name='secontext' type='xsd:string'/>
<xsd:attribute name='paranoid' type='xsd:boolean'/>
<xsd:attribute name='sensitive' type='xsd:boolean'/>
</xsd:complexType>
diff --git a/schemas/metadata.xsd b/schemas/metadata.xsd
index f79039d25..84d7436c9 100644
--- a/schemas/metadata.xsd
+++ b/schemas/metadata.xsd
@@ -1,6 +1,6 @@
<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:xi="http://www.w3.org/2001/XInclude" xml:lang="en">
-
+
<xsd:annotation>
<xsd:documentation>
metadata schema for bcfg2
@@ -13,38 +13,49 @@
<xsd:import namespace="http://www.w3.org/2001/XInclude"
schemaLocation="xinclude.xsd"/>
+ <xsd:complexType name='bundleDeclaration'>
+ <xsd:attribute type='xsd:string' name='name' use='required'/>
+ </xsd:complexType>
+
<xsd:complexType name='groupType'>
<xsd:choice minOccurs='0' maxOccurs='unbounded'>
- <xsd:element name='Bundle'>
- <xsd:complexType>
- <xsd:attribute type='xsd:string' name='name' use='required'/>
- </xsd:complexType>
- </xsd:element>
- <xsd:element name='Group' >
- <xsd:complexType>
- <xsd:attribute name='name' use='required'/>
- </xsd:complexType>
- </xsd:element>
+ <xsd:element name='Bundle' type='bundleDeclaration'/>
+ <xsd:element name='Group' type='groupType'/>
+ <xsd:element name='Client' type='clientType'/>
+ <xsd:element name='Groups' type='groupsType'/>
+ </xsd:choice>
+ <xsd:attribute type='xsd:string' name='name' use='required'/>
+ <xsd:attribute type='xsd:boolean' name='profile'/>
+ <xsd:attribute type='xsd:boolean' name='public'/>
+ <xsd:attribute type='xsd:boolean' name='default'/>
+ <xsd:attribute type='xsd:string' name='auth'/>
+ <xsd:attribute type='xsd:string' name='category'/>
+ <xsd:attribute type='xsd:string' name='comment'/>
+ <xsd:attribute type='xsd:string' name='negate'/>
+ </xsd:complexType>
+
+ <xsd:complexType name='clientType'>
+ <xsd:choice minOccurs='0' maxOccurs='unbounded'>
+ <xsd:element name='Bundle' type='bundleDeclaration'/>
+ <xsd:element name='Group' type='groupType'/>
+ <xsd:element name='Client' type='clientType'/>
+ <xsd:element name='Groups' type='groupsType'/>
</xsd:choice>
- <xsd:attribute type='xsd:boolean' name='profile' use='optional'/>
- <xsd:attribute type='xsd:boolean' name='public' use='optional'/>
- <xsd:attribute type='xsd:boolean' name='default' use='optional'/>
<xsd:attribute type='xsd:string' name='name' use='required'/>
- <xsd:attribute type='xsd:string' name='auth' use='optional'/>
- <xsd:attribute type='xsd:string' name='category' use='optional'/>
- <xsd:attribute type='xsd:string' name='comment' use='optional'/>
+ <xsd:attribute type='xsd:string' name='negate'/>
</xsd:complexType>
<xsd:complexType name='groupsType'>
<xsd:choice minOccurs='0' maxOccurs='unbounded'>
<xsd:element name='Group' type='groupType'/>
+ <xsd:element name='Client' type='clientType'/>
<xsd:element name='Groups' type='groupsType'/>
<xsd:element ref="xi:include"/>
</xsd:choice>
<xsd:attribute name='version' type='xsd:string'/>
- <xsd:attribute name='origin' type='xsd:string'/>
- <xsd:attribute name='revision' type='xsd:string'/>
- <xsd:attribute ref='xml:base'/>
+ <xsd:attribute name='origin' type='xsd:string'/>
+ <xsd:attribute name='revision' type='xsd:string'/>
+ <xsd:attribute ref='xml:base'/>
</xsd:complexType>
<xsd:element name='Groups' type='groupsType'/>
diff --git a/schemas/packages.xsd b/schemas/packages.xsd
index c29a85ecf..c4252194f 100644
--- a/schemas/packages.xsd
+++ b/schemas/packages.xsd
@@ -18,11 +18,18 @@
</xsd:restriction>
</xsd:simpleType>
+ <xsd:complexType name="RepoOptionsType">
+ <xsd:attribute type="xsd:boolean" name="serveronly"/>
+ <xsd:attribute type="xsd:boolean" name="clientonly"/>
+ <xsd:anyAttribute processContents="lax"/>
+ </xsd:complexType>
+
<xsd:complexType name="sourceType">
<xsd:choice minOccurs="0" maxOccurs="unbounded">
<xsd:element name="Component" type="xsd:string"/>
<xsd:element name="Arch" type="xsd:string"/>
<xsd:element name="GPGKey" type="xsd:string"/>
+ <xsd:element name="Options" type="RepoOptionsType"/>
<xsd:choice>
<xsd:element name="Blacklist" type="xsd:string"/>
<xsd:element name="Whitelist" type="xsd:string"/>
diff --git a/schemas/pathentry.xsd b/schemas/pathentry.xsd
index 080758d0b..e5d2ef6af 100644
--- a/schemas/pathentry.xsd
+++ b/schemas/pathentry.xsd
@@ -16,25 +16,4 @@
<xsd:attribute type='xsd:string' name='altsrc' use='optional'/>
<xsd:attributeGroup ref="py:genshiAttrs"/>
</xsd:complexType>
-
- <xsd:complexType name='BoundPathEntry'>
- <xsd:attribute type='xsd:string' name='name' use='required'/>
- <xsd:attribute type='xsd:string' name='group' use='optional'/>
- <xsd:attribute type='xsd:string' name='important' use='optional'/>
- <xsd:attribute type='xsd:string' name='owner' use='optional'/>
- <xsd:attribute type='xsd:string' name='paranoid' use='optional'/>
- <xsd:attribute type='xsd:string' name='perms' use='optional'/>
- <xsd:attribute type='xsd:string' name='prune' use='optional'/>
- <xsd:attribute type='xsd:string' name='recursive' use='optional'/>
- <xsd:attribute type='xsd:string' name='sensitive' use='optional'/>
- <xsd:attribute type='xsd:string' name='to' use='optional'/>
- <xsd:attribute type='xsd:string' name='type' use='optional'/>
- <!-- device attributes -->
- <xsd:attribute type='xsd:string' name='dev_type' use='optional'/>
- <xsd:attribute type='xsd:string' name='major' use='optional'/>
- <xsd:attribute type='xsd:string' name='minor' use='optional'/>
- <xsd:attribute type='xsd:string' name='mode' use='optional'/>
- <!-- end device attributes -->
- <xsd:attributeGroup ref="py:genshiAttrs"/>
- </xsd:complexType>
</xsd:schema>
diff --git a/schemas/pkgtype.xsd b/schemas/pkgtype.xsd
index 0aaea0c22..cbee6f317 100644
--- a/schemas/pkgtype.xsd
+++ b/schemas/pkgtype.xsd
@@ -42,7 +42,9 @@
</xsd:choice>
<xsd:attribute type="xsd:string" name="name"/>
<xsd:attribute type="xsd:string" name="group"/>
+ <xsd:attribute type="xsd:string" name="arch"/>
<xsd:attribute type="xsd:string" name="version"/>
+ <xsd:attribute type="xsd:string" name="release"/>
<xsd:attribute type="xsd:string" name="file"/>
<xsd:attribute type="xsd:boolean" name="verify"/>
<xsd:attribute type="xsd:string" name="simplefile"/>
diff --git a/schemas/rules.xsd b/schemas/rules.xsd
index 924792b18..2f4f805c0 100644
--- a/schemas/rules.xsd
+++ b/schemas/rules.xsd
@@ -14,66 +14,91 @@
<xsd:import namespace="http://genshi.edgewall.org/"
schemaLocation="genshi.xsd"/>
- <xsd:complexType name='ActionType'>
- <xsd:attribute type='ActionTimingEnum' name='timing'/>
- <xsd:attribute type='ActionWhenEnum' name='when'/>
- <xsd:attribute type='ActionStatusEnum' name='status'/>
- <xsd:attribute type="xsd:boolean" name="build"/>
- <xsd:attribute type='xsd:string' name='name'/>
- <xsd:attribute type='xsd:string' name='command'/>
- <xsd:attributeGroup ref="py:genshiAttrs"/>
- </xsd:complexType>
-
<xsd:complexType name='PostInstallType'>
<xsd:attribute type='xsd:string' name='name' use='required'/>
</xsd:complexType>
- <xsd:complexType name='PathType'>
- <xsd:attribute type='PathTypeEnum' name='type' use='required'/>
- <xsd:attribute type='xsd:string' name='name' use='required'/>
- <xsd:attribute type='xsd:string' name='dev_type'/>
- <xsd:attribute type='xsd:string' name='major'/>
- <xsd:attribute type='xsd:string' name='minor'/>
- <xsd:attribute type='xsd:string' name='mode'/>
- <xsd:attribute type='xsd:string' name='perms'/>
- <xsd:attribute type='xsd:string' name='owner'/>
- <xsd:attribute type='xsd:string' name='group'/>
- <xsd:attribute type='xsd:string' name='recursive'/>
- <xsd:attribute type='xsd:string' name='prune'/>
- <xsd:attribute type='xsd:string' name='to'/>
- <xsd:attribute type='xsd:string' name='vcstype'/>
- <xsd:attribute type='xsd:string' name='revision'/>
- <xsd:attribute type='xsd:string' name='sourceurl'/>
- <xsd:attributeGroup ref="py:genshiAttrs"/>
- </xsd:complexType>
+ <xsd:group name="rulesElements">
+ <xsd:choice>
+ <xsd:group ref="py:genshiElements"/>
+ <xsd:element name='Package' type='PackageType'>
+ <xsd:annotation>
+ <xsd:documentation>
+ Fully bound description of a software package to be managed.
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:element>
+ <xsd:element name='Path' type='PathType'>
+ <xsd:annotation>
+ <xsd:documentation>
+ Fully bound description of a filesystem path to be handled
+ by the POSIX driver.
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:element>
+ <xsd:element name='Service' type='ServiceType'>
+ <xsd:annotation>
+ <xsd:documentation>
+ Fully bound description of a system service to be managed.
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:element>
+ <xsd:element name='Action' type='ActionType'>
+ <xsd:annotation>
+ <xsd:documentation>
+ Fully bound description of a command to be run.
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:element>
+ <xsd:element name='SELinux' type='SELinuxType'>
+ <xsd:annotation>
+ <xsd:documentation>
+ Fully bound description of an SELinux entry.
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:element>
+ <xsd:element name='PostInstall' type='PostInstallType'>
+ <xsd:annotation>
+ <xsd:documentation>
+ PostInstall entries are deprecated in favor of Action
+ entries. Actions can do everything PostInstall entries can
+ do and more.
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:element>
+ <xsd:element name='Group' type='RContainerType'>
+ <xsd:annotation>
+ <xsd:documentation>
+ Elements within Group tags only apply to clients that are
+ members of that group (or vice-versa; see #element_negate
+ below)
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:element>
+ <xsd:element name='Client' type='RContainerType'>
+ <xsd:annotation>
+ <xsd:documentation>
+ Elements within Client tags only apply to the named client
+ (or vice-versa; see #element_negate below)
+ </xsd:documentation>
+ </xsd:annotation>
+ </xsd:element>
+ </xsd:choice>
+ </xsd:group>
<xsd:complexType name='RContainerType'>
<xsd:choice minOccurs='0' maxOccurs='unbounded'>
- <xsd:element name='Service' type='ServiceType'/>
- <xsd:element name='Package' type='PackageType'/>
- <xsd:element name='Path' type='PathType'/>
- <xsd:element name='Action' type='ActionType'/>
- <xsd:element name='Group' type='RContainerType'/>
- <xsd:element name='Client' type='RContainerType'/>
- <xsd:group ref="py:genshiElements"/>
+ <xsd:group ref="rulesElements"/>
</xsd:choice>
<xsd:attribute name='name' type='xsd:string'/>
<xsd:attribute name='negate' type='xsd:boolean'/>
<xsd:attributeGroup ref="py:genshiAttrs"/>
</xsd:complexType>
-
<xsd:element name='Rules'>
<xsd:complexType>
<xsd:choice minOccurs='0' maxOccurs='unbounded'>
- <xsd:element name='Service' type='ServiceType'/>
- <xsd:element name='Package' type='PackageType'/>
- <xsd:element name='Path' type='PathType'/>
- <xsd:element name='Action' type='ActionType'/>
- <xsd:element name='PostInstall' type='PostInstallType'/>
- <xsd:element name='Group' type='RContainerType'/>
- <xsd:element name='Client' type='RContainerType'/>
- <xsd:group ref="py:genshiElements"/>
+ <xsd:group ref="rulesElements"/>
</xsd:choice>
<xsd:attribute name='priority' type='xsd:integer' use='required'/>
<xsd:attributeGroup ref="py:genshiAttrs"/>
diff --git a/schemas/services.xsd b/schemas/services.xsd
deleted file mode 100644
index b91e851d2..000000000
--- a/schemas/services.xsd
+++ /dev/null
@@ -1,33 +0,0 @@
-<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema" xml:lang="en">
-
- <xsd:annotation>
- <xsd:documentation>
- services schema for bcfg2
- Narayan Desai, Argonne National Laboratory
- </xsd:documentation>
- </xsd:annotation>
-
- <xsd:include schemaLocation="servicetype.xsd"/>
-
- <xsd:complexType name='ServiceContainerType'>
- <xsd:choice minOccurs='0' maxOccurs='unbounded'>
- <xsd:element name='Service' type='ServiceType'/>
- <xsd:element name='Group' type='ServiceContainerType'/>
- <xsd:element name='Client' type='ServiceContainerType'/>
- </xsd:choice>
- <xsd:attribute name='name' type='xsd:string'/>
- <xsd:attribute name='negate' type='xsd:string'/>
- </xsd:complexType>
-
- <xsd:element name='Services'>
- <xsd:complexType>
- <xsd:choice minOccurs='0' maxOccurs='unbounded'>
- <xsd:element name='Service' type='ServiceType'/>
- <xsd:element name='Group' type='ServiceContainerType'/>
- <xsd:element name='Client' type='ServiceContainerType'/>
- </xsd:choice>
- <xsd:attribute name='priority' type='xsd:integer'/>
- </xsd:complexType>
- </xsd:element>
-
-</xsd:schema>
diff --git a/schemas/servicetype.xsd b/schemas/servicetype.xsd
index af5bc64a6..7de847c7f 100644
--- a/schemas/servicetype.xsd
+++ b/schemas/servicetype.xsd
@@ -12,6 +12,16 @@
<xsd:import namespace="http://genshi.edgewall.org/"
schemaLocation="genshi.xsd"/>
+ <xsd:simpleType name='RestartEnum'>
+ <xsd:restriction base='xsd:string'>
+ <xsd:enumeration value='true'/>
+ <xsd:enumeration value='false'/>
+ <xsd:enumeration value='1'/>
+ <xsd:enumeration value='0'/>
+ <xsd:enumeration value='interactive'/>
+ </xsd:restriction>
+ </xsd:simpleType>
+
<xsd:complexType name="ServiceType">
<xsd:choice minOccurs="0" maxOccurs="unbounded">
<xsd:element name="User">
@@ -24,13 +34,13 @@
</xsd:choice>
<xsd:attribute name="name" type="xsd:string" use="required"/>
<xsd:attribute name="status" type="StatusEnum"/>
+ <xsd:attribute name="restart" type="RestartEnum"/>
+ <xsd:attribute name="install" type="xsd:boolean"/>
<xsd:attribute name="type" type="ServiceTypeEnum"/>
<xsd:attribute name="port" type="xsd:string"/>
<xsd:attribute name="protocol" type="xsd:string"/>
- <xsd:attribute name="mode" type="xsd:string"/>
<xsd:attribute name="custom" type="xsd:string"/>
<xsd:attribute name="FMRI" type="xsd:string"/>
- <xsd:attribute name="supervised" type="xsd:string"/>
<xsd:attribute name="sequence" type="xsd:string"/>
<xsd:attribute name="target" type="xsd:string"/>
<xsd:attribute name="parameters" type="xsd:string"/>
diff --git a/schemas/types.xsd b/schemas/types.xsd
index ead377192..edbc8ad37 100644
--- a/schemas/types.xsd
+++ b/schemas/types.xsd
@@ -1,5 +1,6 @@
-<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema" xml:lang="en">
-
+<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema"
+ xmlns:py="http://genshi.edgewall.org/" xml:lang="en">
+
<xsd:annotation>
<xsd:documentation>
string enumeration definitions for bcfg2
@@ -7,6 +8,9 @@
</xsd:documentation>
</xsd:annotation>
+ <xsd:import namespace="http://genshi.edgewall.org/"
+ schemaLocation="genshi.xsd"/>
+
<xsd:simpleType name='PackageTypeEnum'>
<xsd:restriction base='xsd:string'>
<xsd:enumeration value='deb' />
@@ -86,4 +90,134 @@
</xsd:restriction>
</xsd:simpleType>
+ <xsd:complexType name='ActionType'>
+ <xsd:attribute type='ActionTimingEnum' name='timing'/>
+ <xsd:attribute type='ActionWhenEnum' name='when'/>
+ <xsd:attribute type='ActionStatusEnum' name='status'/>
+ <xsd:attribute type="xsd:boolean" name="build"/>
+ <xsd:attribute type='xsd:string' name='name'/>
+ <xsd:attribute type='xsd:string' name='command'/>
+ <xsd:attributeGroup ref="py:genshiAttrs"/>
+ </xsd:complexType>
+
+ <xsd:simpleType name="DeviceTypeEnum">
+ <xsd:restriction base="xsd:string">
+ <xsd:enumeration value="block"/>
+ <xsd:enumeration value="char"/>
+ <xsd:enumeration value="fifo"/>
+ </xsd:restriction>
+ </xsd:simpleType>
+
+ <xsd:simpleType name="ACLTypeEnum">
+ <xsd:restriction base="xsd:string">
+ <xsd:enumeration value="default"/>
+ <xsd:enumeration value="access"/>
+ </xsd:restriction>
+ </xsd:simpleType>
+
+ <xsd:simpleType name="ACLScopeEnum">
+ <xsd:restriction base="xsd:string">
+ <xsd:enumeration value="user"/>
+ <xsd:enumeration value="group"/>
+ </xsd:restriction>
+ </xsd:simpleType>
+
+ <xsd:complexType name="ACLType">
+ <xsd:attribute type="ACLTypeEnum" name="type" use="required"/>
+ <xsd:attribute type="ACLScopeEnum" name="scope"/>
+ <xsd:attribute type="xsd:string" name="perms" use="required"/>
+ <xsd:attribute type="xsd:string" name="user"/>
+ <xsd:attribute type="xsd:string" name="group"/>
+ <xsd:attributeGroup ref="py:genshiAttrs"/>
+ </xsd:complexType>
+
+ <xsd:complexType name="PathType">
+ <xsd:choice minOccurs='0' maxOccurs='unbounded'>
+ <xsd:element name='ACL' type='ACLType'/>
+ </xsd:choice>
+ <xsd:attribute type="PathTypeEnum" name="type"/>
+ <xsd:attribute type="xsd:string" name="name" use="required"/>
+ <xsd:attribute type="DeviceTypeEnum" name="dev_type"/>
+ <xsd:attribute type="xsd:integer" name="major"/>
+ <xsd:attribute type="xsd:integer" name="minor"/>
+ <xsd:attribute type="xsd:string" name="perms"/>
+ <xsd:attribute type="xsd:string" name="owner"/>
+ <xsd:attribute type="xsd:string" name="group"/>
+ <xsd:attribute type="xsd:string" name="secontext"/>
+ <xsd:attribute type="xsd:string" name="recursive"/>
+ <xsd:attribute type="xsd:string" name="prune"/>
+ <xsd:attribute type="xsd:string" name="to"/>
+ <xsd:attribute type="xsd:string" name="vcstype"/>
+ <xsd:attribute type="xsd:string" name="revision"/>
+ <xsd:attribute type="xsd:string" name="sourceurl"/>
+ <xsd:attributeGroup ref="py:genshiAttrs"/>
+ </xsd:complexType>
+
+ <xsd:simpleType name='SELinuxTypeEnum'>
+ <xsd:restriction base='xsd:string'>
+ <xsd:enumeration value='boolean'/>
+ <xsd:enumeration value='module'/>
+ <xsd:enumeration value='port'/>
+ <xsd:enumeration value='fcontext'/>
+ <xsd:enumeration value='node'/>
+ <xsd:enumeration value='login'/>
+ <xsd:enumeration value='user'/>
+ <xsd:enumeration value='interface'/>
+ <xsd:enumeration value='permissive'/>
+ </xsd:restriction>
+ </xsd:simpleType>
+
+ <xsd:simpleType name='SELinuxFileTypeEnum'>
+ <xsd:restriction base='xsd:string'>
+ <xsd:enumeration value='all'/>
+ <xsd:enumeration value='regular'/>
+ <xsd:enumeration value='directory'/>
+ <xsd:enumeration value='symlink'/>
+ <xsd:enumeration value='pipe'/>
+ <xsd:enumeration value='socket'/>
+ <xsd:enumeration value='block'/>
+ <xsd:enumeration value='char'/>
+ </xsd:restriction>
+ </xsd:simpleType>
+
+ <xsd:simpleType name='SELinuxBooleanValueEnum'>
+ <xsd:restriction base='xsd:string'>
+ <xsd:enumeration value='on'/>
+ <xsd:enumeration value='off'/>
+ </xsd:restriction>
+ </xsd:simpleType>
+
+ <xsd:simpleType name='SELinuxEntryTypeEnum'>
+ <xsd:restriction base='xsd:string'>
+ <xsd:enumeration value='boolean'/>
+ <xsd:enumeration value='module'/>
+ <xsd:enumeration value='port'/>
+ <xsd:enumeration value='fcontext'/>
+ <xsd:enumeration value='node'/>
+ <xsd:enumeration value='login'/>
+ <xsd:enumeration value='user'/>
+ <xsd:enumeration value='interface'/>
+ <xsd:enumeration value='permissive'/>
+ </xsd:restriction>
+ </xsd:simpleType>
+
+ <xsd:complexType name="SELinuxStructure">
+ <xsd:attribute type='xsd:string' name='name' use='required'/>
+ <xsd:attribute type="xsd:boolean" name="disabled"/>
+ <xsd:attributeGroup ref="py:genshiAttrs"/>
+ </xsd:complexType>
+
+ <xsd:complexType name="SELinuxType">
+ <xsd:attribute type="xsd:string" name="name" use="required"/>
+ <xsd:attribute type="SELinuxEntryTypeEnum" name="type" use="required"/>
+ <xsd:attribute type="SELinuxBooleanValueEnum" name="value"/>
+ <xsd:attribute type="xsd:boolean" name="disabled"/>
+ <xsd:attribute type="xsd:string" name="selinuxtype"/>
+ <xsd:attribute type="SELinuxFileTypeEnum" name="filetype"/>
+ <xsd:attribute type="xsd:string" name="proto"/>
+ <xsd:attribute type="xsd:string" name="roles"/>
+ <xsd:attribute type="xsd:string" name="prefix"/>
+ <xsd:attribute type="xsd:string" name="selinuxuser"/>
+ <xsd:attributeGroup ref="py:genshiAttrs"/>
+ </xsd:complexType>
</xsd:schema>
diff --git a/setup.py b/setup.py
index 13d8dee89..928556598 100755
--- a/setup.py
+++ b/setup.py
@@ -8,6 +8,14 @@ import os
import os.path
import sys
+vfile = 'src/lib/Bcfg2/version.py'
+try:
+ # python 2
+ execfile(vfile)
+except NameError:
+ # py3k
+ exec(compile(open(vfile).read(), vfile, 'exec'))
+
# we only need m2crypto on < python2.6
need_m2crypto = False
version = sys.version_info[:2]
@@ -122,7 +130,7 @@ if need_m2crypto:
setup(cmdclass=cmdclass,
name="Bcfg2",
- version="1.2.2",
+ version="1.2.3",
description="Bcfg2 Server",
author="Narayan Desai",
author_email="desai@mcs.anl.gov",
diff --git a/solaris/Makefile b/solaris/Makefile
index 77d9019eb..39a279bb8 100644
--- a/solaris/Makefile
+++ b/solaris/Makefile
@@ -1,7 +1,7 @@
#!/usr/sfw/bin/gmake
PYTHON="/usr/local/bin/python"
-VERS=1.2.2-1
+VERS=1.2.3-1
PYVERSION := $(shell $(PYTHON) -c "import sys; print sys.version[0:3]")
default: clean package
@@ -12,8 +12,9 @@ package:
-cd ../ && PYTHONPATH=$(PYTHONPATH):$(PWD)/build/lib/python2.6/site-packages/ $(PYTHON) setup.py install --single-version-externally-managed --record=/dev/null --prefix=$(PWD)/build
#setuptools appears to use a restictive umask
-chmod -R o+r build/
- -cat bin/bcfg2 | sed -e 's!/usr/bin/python!$(PYTHON)!' > bin/bcfg2.new && mv bin/bcfg2.new bin/bcfg2
- -./gen-prototypes.sh
+ -cat build/bin/bcfg2 | sed -e 's!/usr/bin/python!$(PYTHON)!' > build/bin/bcfg2.new && mv build/bin/bcfg2.new build/bin/bcfg2
+ -chmod +x build/bin/bcfg2
+ -sh ./gen-prototypes.sh
-pkgmk -o -a `uname -m` -f prototype.bcfg2 -d $(PWD)/tmp -r $(PWD)/build
-pkgmk -o -a `uname -m` -f prototype.bcfg2-server -d $(PWD)/tmp -r $(PWD)/build
-pkgtrans -o -s $(PWD)/tmp $(PWD)/bcfg2-$(VERS) SCbcfg2
diff --git a/solaris/gen-prototypes.sh b/solaris/gen-prototypes.sh
index ea0b4bb13..64aff9edb 100644
--- a/solaris/gen-prototypes.sh
+++ b/solaris/gen-prototypes.sh
@@ -1,6 +1,6 @@
#!/bin/sh
cd build
-PP="./"`ls -1d lib/*`"/site-packages/"
+PP="./lib/python/site-packages/"
#bcfg2
echo "i pkginfo=./pkginfo.bcfg2" > ../prototype.tmp
diff --git a/solaris/pkginfo.bcfg2 b/solaris/pkginfo.bcfg2
index 0ff18516d..73696651c 100644
--- a/solaris/pkginfo.bcfg2
+++ b/solaris/pkginfo.bcfg2
@@ -1,7 +1,7 @@
PKG="SCbcfg2"
NAME="bcfg2"
ARCH="sparc"
-VERSION="1.2.2"
+VERSION="1.2.3"
CATEGORY="application"
VENDOR="Argonne National Labratory"
EMAIL="bcfg-dev@mcs.anl.gov"
diff --git a/solaris/pkginfo.bcfg2-server b/solaris/pkginfo.bcfg2-server
index a0958f9e4..af2f016b0 100644
--- a/solaris/pkginfo.bcfg2-server
+++ b/solaris/pkginfo.bcfg2-server
@@ -1,7 +1,7 @@
PKG="SCbcfg2-server"
NAME="bcfg2-server"
ARCH="sparc"
-VERSION="1.2.2"
+VERSION="1.2.3"
CATEGORY="application"
VENDOR="Argonne National Labratory"
EMAIL="bcfg-dev@mcs.anl.gov"
diff --git a/solaris/prototype.bcfg2-server b/solaris/prototype.bcfg2-server
index ee9e3a3a7..590175329 100644
--- a/solaris/prototype.bcfg2-server
+++ b/solaris/prototype.bcfg2-server
@@ -24,7 +24,6 @@ f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Probes.py 0644 bin bin
f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Decisions.py 0644 bin bin
f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Rules.py 0644 bin bin
f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Packages.py 0644 bin bin
-f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/SGenshi.py 0644 bin bin
f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/GroupPatterns.py 0644 bin bin
f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/SSHbase.py 0644 bin bin
f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Trigger.py 0644 bin bin
@@ -32,7 +31,6 @@ f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/__init__.py 0644 bin bin
f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Snapshots.py 0644 bin bin
f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/TCheetah.py 0644 bin bin
f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Account.py 0644 bin bin
-f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/BB.py 0644 bin bin
f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Cfg.py 0644 bin bin
f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Statistics.py 0644 bin bin
f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Metadata.py 0644 bin bin
@@ -41,7 +39,6 @@ f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Pkgmgr.py 0644 bin bin
f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Ohai.py 0644 bin bin
f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Properties.py 0644 bin bin
f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Editor.py 0644 bin bin
-f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Svcmgr.py 0644 bin bin
f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Bundler.py 0644 bin bin
f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/NagiosGen.py 0644 bin bin
f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Deps.py 0644 bin bin
diff --git a/src/lib/Bcfg2/Bcfg2Py3k.py b/src/lib/Bcfg2/Bcfg2Py3k.py
index 6af8b3e5c..be5175e62 100644
--- a/src/lib/Bcfg2/Bcfg2Py3k.py
+++ b/src/lib/Bcfg2/Bcfg2Py3k.py
@@ -14,6 +14,7 @@ try:
from urllib2 import install_opener
from urllib2 import urlopen
from urllib2 import HTTPError
+ from urllib2 import URLError
except ImportError:
from urllib.parse import urljoin, urlparse
from urllib.request import HTTPBasicAuthHandler
@@ -22,6 +23,7 @@ except ImportError:
from urllib.request import install_opener
from urllib.request import urlopen
from urllib.error import HTTPError
+ from urllib.error import URLError
try:
from cStringIO import StringIO
@@ -62,6 +64,12 @@ try:
except ImportError:
import http.client as httplib
+# py3k compatibility
+if sys.hexversion >= 0x03000000:
+ unicode = str
+else:
+ unicode = unicode
+
# print to file compatibility
def u_str(string, encoding=None):
if sys.hexversion >= 0x03000000:
@@ -75,7 +83,49 @@ def u_str(string, encoding=None):
else:
return unicode(string)
-if sys.hexversion >= 0x03000000:
- from io import FileIO as file
-else:
- file = file
+try:
+ unicode = unicode
+except:
+ unicode = str
+
+# base64 compat
+from base64 import b64encode as _b64encode, b64decode as _b64decode
+b64encode = lambda s: _b64encode(s.encode('ascii')).decode('ascii')
+b64decode = lambda s: _b64decode(s.encode('ascii')).decode('ascii')
+
+try:
+ input = raw_input
+except:
+ input = input
+
+try:
+ reduce = reduce
+except NameError:
+ from functools import reduce
+
+try:
+ from collections import MutableMapping
+except ImportError:
+ from UserDict import DictMixin as MutableMapping
+
+
+# in py3k __cmp__ is no longer magical, so we define a mixin that can
+# be used to define the rich comparison operators from __cmp__
+class CmpMixin(object):
+ def __lt__(self, other):
+ return self.__cmp__(other) < 0
+
+ def __gt__(self, other):
+ return self.__cmp__(other) > 0
+
+ def __eq__(self, other):
+ return self.__cmp__(other) == 0
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __ge__(self, other):
+ return self.__gt__(other) or self.__eq__(other)
+
+ def __le__(self, other):
+ return self.__lt__(other) or self.__eq__(other)
diff --git a/src/lib/Bcfg2/Client/Frame.py b/src/lib/Bcfg2/Client/Frame.py
index 9ad669ad6..2218b890b 100644
--- a/src/lib/Bcfg2/Client/Frame.py
+++ b/src/lib/Bcfg2/Client/Frame.py
@@ -17,32 +17,6 @@ def cmpent(ent1, ent2):
return cmp(ent1.get('name'), ent2.get('name'))
-def promptFilter(prompt, entries):
- """Filter a supplied list based on user input."""
- ret = []
- entries.sort(cmpent)
- for entry in entries[:]:
- if 'qtext' in entry.attrib:
- iprompt = entry.get('qtext')
- else:
- iprompt = prompt % (entry.tag, entry.get('name'))
- try:
- # py3k compatibility
- try:
- ans = raw_input(iprompt.encode(sys.stdout.encoding, 'replace'))
- except NameError:
- ans = input(iprompt)
- if ans in ['y', 'Y']:
- ret.append(entry)
- except EOFError:
- # python 2.4.3 on CentOS doesn't like ^C for some reason
- break
- except:
- print("Error while reading input")
- continue
- return ret
-
-
def matches_entry(entryspec, entry):
# both are (tag, name)
if entryspec == entry:
@@ -71,7 +45,7 @@ def passes_black_list(entry, blacklist):
for be in blacklist]
-class Frame:
+class Frame(object):
"""Frame is the container for all Tool objects and state information."""
def __init__(self, config, setup, times, drivers, dryrun):
self.config = config
@@ -84,7 +58,7 @@ class Frame:
self.whitelist = []
self.blacklist = []
self.removal = []
- self.logger = logging.getLogger("Bcfg2.Client.Frame")
+ self.logger = logging.getLogger(__name__)
for driver in drivers[:]:
if driver not in Bcfg2.Client.Tools.drivers and \
isinstance(driver, str):
@@ -124,33 +98,74 @@ class Frame:
self.logger.info([tool.name for tool in self.tools])
# find entries not handled by any tools
- problems = [entry for struct in config for \
- entry in struct if entry not in self.handled]
+ self.unhandled = [entry for struct in config
+ for entry in struct
+ if entry not in self.handled]
- if problems:
+ if self.unhandled:
self.logger.error("The following entries are not handled by any tool:")
- self.logger.error(["%s:%s:%s" % (entry.tag, entry.get('type'), \
- entry.get('name')) for entry in problems])
- self.logger.error("")
- entries = [(entry.tag, entry.get('name'))
- for struct in config for entry in struct]
+ for entry in self.unhandled:
+ self.logger.error("%s:%s:%s" % (entry.tag, entry.get('type'),
+ entry.get('name')))
+
+ self.find_dups(config)
+
pkgs = [(entry.get('name'), entry.get('origin'))
- for struct in config for entry in struct if entry.tag == 'Package']
- multi = []
- for entry in entries[:]:
- if entries.count(entry) > 1:
- multi.append(entry)
- entries.remove(entry)
- if multi:
- self.logger.debug("The following entries are included multiple times:")
- self.logger.debug(["%s:%s" % entry for entry in multi])
- self.logger.debug("")
+ for struct in config
+ for entry in struct
+ if entry.tag == 'Package']
if pkgs:
self.logger.debug("The following packages are specified in bcfg2:")
self.logger.debug([pkg[0] for pkg in pkgs if pkg[1] == None])
self.logger.debug("The following packages are prereqs added by Packages:")
self.logger.debug([pkg[0] for pkg in pkgs if pkg[1] == 'Packages'])
+ def find_dups(self, config):
+ entries = dict()
+ for struct in config:
+ for entry in struct:
+ for tool in self.tools:
+ if tool.handlesEntry(entry):
+ pkey = tool.primarykey(entry)
+ if pkey in entries:
+ entries[pkey] += 1
+ else:
+ entries[pkey] = 1
+ multi = [e for e, c in entries.items() if c > 1]
+ if multi:
+ self.logger.debug("The following entries are included multiple times:")
+ for entry in multi:
+ self.logger.debug(entry)
+
+ def promptFilter(self, prompt, entries):
+ """Filter a supplied list based on user input."""
+ ret = []
+ entries.sort(cmpent)
+ for entry in entries[:]:
+ if entry in self.unhandled:
+ # don't prompt for entries that can't be installed
+ continue
+ if 'qtext' in entry.attrib:
+ iprompt = entry.get('qtext')
+ else:
+ iprompt = prompt % (entry.tag, entry.get('name'))
+ try:
+ # py3k compatibility
+ try:
+ ans = raw_input(iprompt.encode(sys.stdout.encoding,
+ 'replace'))
+ except NameError:
+ ans = input(iprompt)
+ if ans in ['y', 'Y']:
+ ret.append(entry)
+ except EOFError:
+ # python 2.4.3 on CentOS doesn't like ^C for some reason
+ break
+ except:
+ print("Error while reading input")
+ continue
+ return ret
+
def __getattr__(self, name):
if name in ['extra', 'handled', 'modified', '__important__']:
ret = []
@@ -190,17 +205,26 @@ class Frame:
self.whitelist = [x for x in self.whitelist if x not in b_to_rem]
# take care of important entries first
- if not self.dryrun and not self.setup['bundle']:
- for cfile in [cfl for cfl in self.config.findall(".//Path") \
- if cfl.get('name') in self.__important__ and \
- cfl.get('type') == 'file']:
- if cfile not in self.whitelist:
+ if not self.dryrun:
+ for cfile in self.config.findall(".//Path"):
+ if (cfile.get('name') not in self.__important__ or
+ cfile.get('type') != 'file' or
+ cfile not in self.whitelist):
+ continue
+ parent = cfile.getparent()
+ if ((parent.tag == "Bundle" and
+ ((self.setup['bundle'] and
+ parent.get("name") not in self.setup['bundle']) or
+ (self.setup['skipbundle'] and
+ parent.get("name") in self.setup['skipbundle']))) or
+ (parent.tag == "Independent" and
+ (self.setup['bundle'] or self.setup['skipindep']))):
continue
- tl = [t for t in self.tools if t.handlesEntry(cfile) \
- and t.canVerify(cfile)]
+ tl = [t for t in self.tools
+ if t.handlesEntry(cfile) and t.canVerify(cfile)]
if tl:
if self.setup['interactive'] and not \
- promptFilter("Install %s: %s? (y/N):", [cfile]):
+ self.promptFilter("Install %s: %s? (y/N):", [cfile]):
self.whitelist.remove(cfile)
continue
try:
@@ -262,22 +286,33 @@ class Frame:
return
# Here is where most of the work goes
# first perform bundle filtering
+ all_bundle_names = [b.get('name')
+ for b in self.config.findall('./Bundle')]
+ bundles = self.config.getchildren()
if self.setup['bundle']:
- all_bundle_names = [b.get('name') for b in
- self.config.findall('./Bundle')]
# warn if non-existent bundle given
for bundle in self.setup['bundle']:
if bundle not in all_bundle_names:
self.logger.info("Warning: Bundle %s not found" % bundle)
- bundles = [b for b in self.config.findall('./Bundle')
- if b.get('name') in self.setup['bundle']]
- self.whitelist = [e for e in self.whitelist
- if True in [e in b for b in bundles]]
+ bundles = filter(lambda b: b.get('name') in self.setup['bundle'],
+ bundles)
elif self.setup['indep']:
- bundles = [nb for nb in self.config.getchildren()
- if nb.tag != 'Bundle']
- else:
- bundles = self.config.getchildren()
+ bundles = filter(lambda b: b.tag != 'Bundle', bundles)
+ if self.setup['skipbundle']:
+ # warn if non-existent bundle given
+ if not self.setup['bundle_quick']:
+ for bundle in self.setup['skipbundle']:
+ if bundle not in all_bundle_names:
+ self.logger.info("Warning: Bundle %s not found" %
+ bundle)
+ bundles = filter(lambda b: \
+ b.get('name') not in self.setup['skipbundle'],
+ bundles)
+ if self.setup['skipindep']:
+ bundles = filter(lambda b: b.tag == 'Bundle', bundles)
+
+ self.whitelist = [e for e in self.whitelist
+ if True in [e in b for b in bundles]]
# first process prereq actions
for bundle in bundles[:]:
@@ -289,7 +324,7 @@ class Frame:
(bmodified or a.get('when') == 'always'))]
# now we process all "always actions"
if self.setup['interactive']:
- promptFilter(prompt, actions)
+ self.promptFilter(prompt, actions)
self.DispatchInstallCalls(actions)
# need to test to fail entries in whitelist
@@ -307,8 +342,8 @@ class Frame:
[self.whitelist.remove(ent) for ent in b_to_remv]
if self.setup['interactive']:
- self.whitelist = promptFilter(prompt, self.whitelist)
- self.removal = promptFilter(rprompt, self.removal)
+ self.whitelist = self.promptFilter(prompt, self.whitelist)
+ self.removal = self.promptFilter(rprompt, self.removal)
for entry in candidates:
if entry not in self.whitelist:
@@ -337,7 +372,6 @@ class Frame:
if mbundles:
self.logger.info("The Following Bundles have been modified:")
self.logger.info([mbun.get('name') for mbun in mbundles])
- self.logger.info("")
tbm = [(t, b) for t in self.tools for b in mbundles]
for tool, bundle in tbm:
try:
@@ -380,19 +414,33 @@ class Frame:
def CondDisplayState(self, phase):
"""Conditionally print tracing information."""
- self.logger.info('\nPhase: %s' % phase)
- self.logger.info('Correct entries:\t%d' % list(self.states.values()).count(True))
- self.logger.info('Incorrect entries:\t%d' % list(self.states.values()).count(False))
+ self.logger.info('Phase: %s' % phase)
+ self.logger.info('Correct entries: %d' %
+ list(self.states.values()).count(True))
+ self.logger.info('Incorrect entries: %d' %
+ list(self.states.values()).count(False))
if phase == 'final' and list(self.states.values()).count(False):
- self.logger.info(["%s:%s" % (entry.tag, entry.get('name')) for \
- entry in self.states if not self.states[entry]])
- self.logger.info('Total managed entries:\t%d' % len(list(self.states.values())))
- self.logger.info('Unmanaged entries:\t%d' % len(self.extra))
+ for entry in self.states.keys():
+ if not self.states[entry]:
+ etype = entry.get('type')
+ if etype:
+ self.logger.info( "%s:%s:%s" % (entry.tag, etype,
+ entry.get('name')))
+ else:
+ self.logger.info(" %s:%s" % (entry.tag,
+ entry.get('name')))
+ self.logger.info('Total managed entries: %d' %
+ len(list(self.states.values())))
+ self.logger.info('Unmanaged entries: %d' % len(self.extra))
if phase == 'final' and self.setup['extra']:
- self.logger.info(["%s:%s" % (entry.tag, entry.get('name')) \
- for entry in self.extra])
-
- self.logger.info("")
+ for entry in self.extra:
+ etype = entry.get('type')
+ if etype:
+ self.logger.info( "%s:%s:%s" % (entry.tag, etype,
+ entry.get('name')))
+ else:
+ self.logger.info(" %s:%s" % (entry.tag,
+ entry.get('name')))
if ((list(self.states.values()).count(False) == 0) and not self.extra):
self.logger.info('All entries correct.')
@@ -428,7 +476,8 @@ class Frame:
total=str(len(self.states)),
version='2.0',
revision=self.config.get('revision', '-1'))
- good = len([key for key, val in list(self.states.items()) if val])
+ good_entries = [key for key, val in list(self.states.items()) if val]
+ good = len(good_entries)
stats.set('good', str(good))
if len([key for key, val in list(self.states.items()) if not val]) == 0:
stats.set('state', 'clean')
@@ -437,6 +486,7 @@ class Frame:
# List bad elements of the configuration
for (data, ename) in [(self.modified, 'Modified'), (self.extra, "Extra"), \
+ (good_entries, "Good"),
([entry for entry in self.states if not \
self.states[entry]], "Bad")]:
container = Bcfg2.Client.XML.SubElement(stats, ename)
diff --git a/src/lib/Bcfg2/Client/Tools/APK.py b/src/lib/Bcfg2/Client/Tools/APK.py
index aaaf2472f..d70916792 100644
--- a/src/lib/Bcfg2/Client/Tools/APK.py
+++ b/src/lib/Bcfg2/Client/Tools/APK.py
@@ -24,8 +24,8 @@ class APK(Bcfg2.Client.Tools.PkgTool):
for pkg in zip(names, nameversions):
pkgname = pkg[0]
version = pkg[1][len(pkgname) + 1:]
- self.logger.debug(" pkgname: %s\n version: %s" %
- (pkgname, version))
+ self.logger.debug(" pkgname: %s" % pkgname)
+ self.logger.debug(" version: %s" % version)
self.installed[pkgname] = version
def VerifyPackage(self, entry, modlist):
diff --git a/src/lib/Bcfg2/Client/Tools/APT.py b/src/lib/Bcfg2/Client/Tools/APT.py
index 6b839ffbc..ce7e9701f 100644
--- a/src/lib/Bcfg2/Client/Tools/APT.py
+++ b/src/lib/Bcfg2/Client/Tools/APT.py
@@ -6,22 +6,7 @@ warnings.filterwarnings("ignore", "apt API not stable yet",
FutureWarning)
import apt.cache
import os
-
import Bcfg2.Client.Tools
-import Bcfg2.Options
-
-# Options for tool locations
-opts = {'install_path': Bcfg2.Options.CLIENT_APT_TOOLS_INSTALL_PATH,
- 'var_path': Bcfg2.Options.CLIENT_APT_TOOLS_VAR_PATH,
- 'etc_path': Bcfg2.Options.CLIENT_SYSTEM_ETC_PATH}
-setup = Bcfg2.Options.OptionParser(opts)
-setup.parse([])
-install_path = setup['install_path']
-var_path = setup['var_path']
-etc_path = setup['etc_path']
-DEBSUMS = '%s/bin/debsums' % install_path
-APTGET = '%s/bin/apt-get' % install_path
-DPKG = '%s/bin/dpkg' % install_path
class APT(Bcfg2.Client.Tools.Tool):
"""The Debian toolset implements package and service operations and inherits
@@ -29,18 +14,26 @@ class APT(Bcfg2.Client.Tools.Tool):
"""
name = 'APT'
- __execs__ = [DEBSUMS, APTGET, DPKG]
+ __execs__ = []
__handles__ = [('Package', 'deb'), ('Path', 'ignore')]
__req__ = {'Package': ['name', 'version'], 'Path': ['type']}
def __init__(self, logger, setup, config):
Bcfg2.Client.Tools.Tool.__init__(self, logger, setup, config)
+
+ self.install_path = setup.get('apt_install_path', '/usr')
+ self.var_path = setup.get('apt_var_path', '/var')
+ self.etc_path = setup.get('apt_etc_path', '/etc')
+ self.debsums = '%s/bin/debsums' % self.install_path
+ self.aptget = '%s/bin/apt-get' % self.install_path
+ self.dpkg = '%s/bin/dpkg' % self.install_path
+ self.__execs__ = [self.debsums, self.aptget, self.dpkg]
+
path_entries = os.environ['PATH'].split(':')
for reqdir in ['/sbin', '/usr/sbin']:
if reqdir not in path_entries:
os.environ['PATH'] = os.environ['PATH'] + ':' + reqdir
- self.pkgcmd = '%s ' % APTGET + \
- '-o DPkg::Options::=--force-overwrite ' + \
+ self.pkgcmd = '%s ' % self.aptget + \
'-o DPkg::Options::=--force-confold ' + \
'-o DPkg::Options::=--force-confmiss ' + \
'--reinstall ' + \
@@ -53,21 +46,21 @@ class APT(Bcfg2.Client.Tools.Tool):
if entry.tag == 'Path' and \
entry.get('type') == 'ignore']
self.__important__ = self.__important__ + \
- ["%s/cache/debconf/config.dat" % var_path,
- "%s/cache/debconf/templates.dat" % var_path,
+ ["%s/cache/debconf/config.dat" % self.var_path,
+ "%s/cache/debconf/templates.dat" % self.var_path,
'/etc/passwd', '/etc/group',
- '%s/apt/apt.conf' % etc_path,
- '%s/dpkg/dpkg.cfg' % etc_path] + \
+ '%s/apt/apt.conf' % self.etc_path,
+ '%s/dpkg/dpkg.cfg' % self.etc_path] + \
[entry.get('name') for struct in config for entry in struct \
if entry.tag == 'Path' and \
- entry.get('name').startswith('%s/apt/sources.list' % etc_path)]
+ entry.get('name').startswith('%s/apt/sources.list' % self.etc_path)]
self.nonexistent = [entry.get('name') for struct in config for entry in struct \
if entry.tag == 'Path' and entry.get('type') == 'nonexistent']
os.environ["DEBIAN_FRONTEND"] = 'noninteractive'
self.actions = {}
if self.setup['kevlar'] and not self.setup['dryrun']:
- self.cmd.run("%s --force-confold --configure --pending" % DPKG)
- self.cmd.run("%s clean" % APTGET)
+ self.cmd.run("%s --force-confold --configure --pending" % self.dpkg)
+ self.cmd.run("%s clean" % self.aptget)
try:
self.pkg_cache = apt.cache.Cache()
except SystemError:
@@ -95,7 +88,8 @@ class APT(Bcfg2.Client.Tools.Tool):
for (name, version) in extras]
def VerifyDebsums(self, entry, modlist):
- output = self.cmd.run("%s -as %s" % (DEBSUMS, entry.get('name')))[1]
+ output = self.cmd.run("%s -as %s" % (self.debsums,
+ entry.get('name')))[1]
if len(output) == 1 and "no md5sums for" in output[0]:
self.logger.info("Package %s has no md5sums. Cannot verify" % \
entry.get('name'))
diff --git a/src/lib/Bcfg2/Client/Tools/Action.py b/src/lib/Bcfg2/Client/Tools/Action.py
index dc49347e9..52d4e6a3f 100644
--- a/src/lib/Bcfg2/Client/Tools/Action.py
+++ b/src/lib/Bcfg2/Client/Tools/Action.py
@@ -2,6 +2,7 @@
import Bcfg2.Client.Tools
from Bcfg2.Client.Frame import matches_white_list, passes_black_list
+from Bcfg2.Bcfg2Py3k import input
"""
<Action timing='pre|post|both'
@@ -44,11 +45,7 @@ class Action(Bcfg2.Client.Tools.Tool):
if self.setup['interactive']:
prompt = ('Run Action %s, %s: (y/N): ' %
(entry.get('name'), entry.get('command')))
- # py3k compatibility
- try:
- ans = raw_input(prompt)
- except NameError:
- ans = input(prompt)
+ ans = input(prompt)
if ans not in ['y', 'Y']:
return False
if self.setup['servicemode'] == 'build':
@@ -64,7 +61,7 @@ class Action(Bcfg2.Client.Tools.Tool):
else:
return rc == 0
else:
- self.logger.debug("In dryrun mode: not running action:\n %s" %
+ self.logger.debug("In dryrun mode: not running action: %s" %
(entry.get('name')))
return False
diff --git a/src/lib/Bcfg2/Client/Tools/Chkconfig.py b/src/lib/Bcfg2/Client/Tools/Chkconfig.py
index 12ea5f132..0169b12da 100644
--- a/src/lib/Bcfg2/Client/Tools/Chkconfig.py
+++ b/src/lib/Bcfg2/Client/Tools/Chkconfig.py
@@ -45,30 +45,14 @@ class Chkconfig(Bcfg2.Client.Tools.SvcTool):
except IndexError:
onlevels = []
+ pstatus = self.check_service(entry)
if entry.get('status') == 'on':
- status = (len(onlevels) > 0)
+ status = (len(onlevels) > 0 and pstatus)
command = 'start'
else:
- status = (len(onlevels) == 0)
+ status = (len(onlevels) == 0 and not pstatus)
command = 'stop'
- if entry.get('mode', 'default') == 'supervised':
- # turn on or off the service in supervised mode
- pstatus = self.cmd.run('/sbin/service %s status' % \
- entry.get('name'))[0]
- needs_modification = ((command == 'start' and pstatus) or \
- (command == 'stop' and not pstatus))
- if (not self.setup.get('dryrun') and
- self.setup['servicemode'] != 'disabled' and
- needs_modification):
- self.cmd.run(self.get_svc_command(entry, command))
- # service was modified, so it failed
- pstatus = False
-
- # chkconfig/init.d service
- if entry.get('status') == 'on':
- status = status and not pstatus
-
if not status:
if entry.get('status') == 'on':
entry.set('current_status', 'off')
@@ -78,22 +62,22 @@ class Chkconfig(Bcfg2.Client.Tools.SvcTool):
def InstallService(self, entry):
"""Install Service entry."""
- # don't take any actions for mode='manual'
- if entry.get('mode', 'default') == 'manual':
- self.logger.info("Service %s mode set to manual. Skipping "
- "installation." % (entry.get('name')))
- return False
rcmd = "/sbin/chkconfig %s %s"
self.cmd.run("/sbin/chkconfig --add %s" % (entry.attrib['name']))
self.logger.info("Installing Service %s" % (entry.get('name')))
- pass1 = True
+ rv = True
if entry.get('status') == 'off':
- rc = self.cmd.run(rcmd % (entry.get('name'),
- entry.get('status')) + \
- " --level 0123456")[0]
- pass1 = rc == 0
- rc = self.cmd.run(rcmd % (entry.get('name'), entry.get('status')))[0]
- return pass1 and rc == 0
+ rv &= self.cmd.run(rcmd + " --level 0123456" %
+ (entry.get('name'),
+ entry.get('status')))[0] == 0
+ if entry.get("current_status") == "on":
+ rv &= self.stop_service(entry)
+ else:
+ rv &= self.cmd.run(rcmd % (entry.get('name'),
+ entry.get('status')))[0] == 0
+ if entry.get("current_status") == "off":
+ rv &= self.start_service(entry)
+ return rv
def FindExtra(self):
"""Locate extra chkconfig Services."""
diff --git a/src/lib/Bcfg2/Client/Tools/DebInit.py b/src/lib/Bcfg2/Client/Tools/DebInit.py
index ca6fc439e..7d5af1127 100644
--- a/src/lib/Bcfg2/Client/Tools/DebInit.py
+++ b/src/lib/Bcfg2/Client/Tools/DebInit.py
@@ -76,11 +76,6 @@ class DebInit(Bcfg2.Client.Tools.SvcTool):
def InstallService(self, entry):
"""Install Service for entry."""
- # don't take any actions for mode='manual'
- if entry.get('mode', 'default') == 'manual':
- self.logger.info("Service %s mode set to manual. Skipping "
- "installation." % (entry.get('name')))
- return False
self.logger.info("Installing Service %s" % (entry.get('name')))
try:
os.stat('/etc/init.d/%s' % entry.get('name'))
diff --git a/src/lib/Bcfg2/Client/Tools/OpenCSW.py b/src/lib/Bcfg2/Client/Tools/OpenCSW.py
new file mode 100644
index 000000000..6aafe316f
--- /dev/null
+++ b/src/lib/Bcfg2/Client/Tools/OpenCSW.py
@@ -0,0 +1,33 @@
+# This is the bcfg2 support for opencsw packages (pkgutil)
+"""This provides Bcfg2 support for OpenCSW packages."""
+
+import tempfile
+import Bcfg2.Client.Tools.SYSV
+
+
+class OpenCSW(Bcfg2.Client.Tools.SYSV.SYSV):
+ """Support for OpenCSW packages."""
+ pkgtype = 'opencsw'
+ pkgtool = ("/opt/csw/bin/pkgutil -y -i %s", ("%s", ["bname"]))
+ name = 'OpenCSW'
+ __execs__ = ['/opt/csw/bin/pkgutil', "/usr/bin/pkginfo"]
+ __handles__ = [('Package', 'opencsw')]
+ __ireq__ = {'Package': ['name', 'version', 'bname']}
+
+ def __init__(self, logger, setup, config):
+ # dont use the sysv constructor
+ Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config)
+ noaskfile = tempfile.NamedTemporaryFile()
+ self.noaskname = noaskfile.name
+ try:
+ noaskfile.write(Bcfg2.Client.Tools.SYSV.noask)
+ except:
+ pass
+
+ # VerifyPackage comes from Bcfg2.Client.Tools.SYSV
+ # Install comes from Bcfg2.Client.Tools.PkgTool
+ # Extra comes from Bcfg2.Client.Tools.Tool
+ # Remove comes from Bcfg2.Client.Tools.SYSV
+ def FindExtraPackages(self):
+ """Pass through to null FindExtra call."""
+ return []
diff --git a/src/lib/Bcfg2/Client/Tools/POSIX.py b/src/lib/Bcfg2/Client/Tools/POSIX.py
deleted file mode 100644
index 0d67dbbab..000000000
--- a/src/lib/Bcfg2/Client/Tools/POSIX.py
+++ /dev/null
@@ -1,943 +0,0 @@
-"""All POSIX Type client support for Bcfg2."""
-
-import binascii
-from datetime import datetime
-import difflib
-import errno
-import grp
-import logging
-import os
-import pwd
-import shutil
-import stat
-import sys
-import time
-# py3k compatibility
-if sys.hexversion >= 0x03000000:
- unicode = str
-
-import Bcfg2.Client.Tools
-import Bcfg2.Options
-from Bcfg2.Client import XML
-
-log = logging.getLogger('POSIX')
-
-# map between dev_type attribute and stat constants
-device_map = {'block': stat.S_IFBLK,
- 'char': stat.S_IFCHR,
- 'fifo': stat.S_IFIFO}
-
-
-def calcPerms(initial, perms):
- """This compares ondisk permissions with specified ones."""
- pdisp = [{1:stat.S_ISVTX, 2:stat.S_ISGID, 4:stat.S_ISUID},
- {1:stat.S_IXUSR, 2:stat.S_IWUSR, 4:stat.S_IRUSR},
- {1:stat.S_IXGRP, 2:stat.S_IWGRP, 4:stat.S_IRGRP},
- {1:stat.S_IXOTH, 2:stat.S_IWOTH, 4:stat.S_IROTH}]
- tempperms = initial
- if len(perms) == 3:
- perms = '0%s' % (perms)
- pdigits = [int(perms[digit]) for digit in range(4)]
- for index in range(4):
- for (num, perm) in list(pdisp[index].items()):
- if pdigits[index] & num:
- tempperms |= perm
- return tempperms
-
-
-def normGid(entry):
- """
- This takes a group name or gid and
- returns the corresponding gid or False.
- """
- try:
- try:
- return int(entry.get('group'))
- except:
- return int(grp.getgrnam(entry.get('group'))[2])
- except (OSError, KeyError):
- log.error('GID normalization failed for %s. Does group %s exist?'
- % (entry.get('name'), entry.get('group')))
- return False
-
-
-def normUid(entry):
- """
- This takes a user name or uid and
- returns the corresponding uid or False.
- """
- try:
- try:
- return int(entry.get('owner'))
- except:
- return int(pwd.getpwnam(entry.get('owner'))[2])
- except (OSError, KeyError):
- log.error('UID normalization failed for %s. Does owner %s exist?'
- % (entry.get('name'), entry.get('owner')))
- return False
-
-
-def isString(strng, encoding):
- """
- Returns true if the string contains no ASCII control characters
- and can be decoded from the specified encoding.
- """
- for char in strng:
- if ord(char) < 9 or ord(char) > 13 and ord(char) < 32:
- return False
- try:
- strng.decode(encoding)
- return True
- except:
- return False
-
-
-class POSIX(Bcfg2.Client.Tools.Tool):
- """POSIX File support code."""
- name = 'POSIX'
- __handles__ = [('Path', 'device'),
- ('Path', 'directory'),
- ('Path', 'file'),
- ('Path', 'hardlink'),
- ('Path', 'nonexistent'),
- ('Path', 'permissions'),
- ('Path', 'symlink')]
- __req__ = {'Path': ['name', 'type']}
-
- # grab paranoid options from /etc/bcfg2.conf
- opts = {'ppath': Bcfg2.Options.PARANOID_PATH,
- 'max_copies': Bcfg2.Options.PARANOID_MAX_COPIES}
- setup = Bcfg2.Options.OptionParser(opts)
- setup.parse([])
- ppath = setup['ppath']
- max_copies = setup['max_copies']
-
- def canInstall(self, entry):
- """Check if entry is complete for installation."""
- if Bcfg2.Client.Tools.Tool.canInstall(self, entry):
- if (entry.tag,
- entry.get('type'),
- entry.text,
- entry.get('empty', 'false')) == ('Path',
- 'file',
- None,
- 'false'):
- return False
- return True
- else:
- return False
-
- def gatherCurrentData(self, entry):
- if entry.tag == 'Path' and entry.get('type') == 'file':
- try:
- ondisk = os.stat(entry.get('name'))
- except OSError:
- entry.set('current_exists', 'false')
- self.logger.debug("%s %s does not exist" %
- (entry.tag, entry.get('name')))
- return False
- try:
- entry.set('current_owner', str(ondisk[stat.ST_UID]))
- entry.set('current_group', str(ondisk[stat.ST_GID]))
- except (OSError, KeyError):
- pass
- entry.set('perms', str(oct(ondisk[stat.ST_MODE])[-4:]))
-
- def Verifydevice(self, entry, _):
- """Verify device entry."""
- if entry.get('dev_type') == None or \
- entry.get('owner') == None or \
- entry.get('group') == None:
- self.logger.error('Entry %s not completely specified. '
- 'Try running bcfg2-lint.' % (entry.get('name')))
- return False
- if entry.get('dev_type') in ['block', 'char']:
- # check if major/minor are properly specified
- if entry.get('major') == None or \
- entry.get('minor') == None:
- self.logger.error('Entry %s not completely specified. '
- 'Try running bcfg2-lint.' % (entry.get('name')))
- return False
- try:
- # check for file existence
- filestat = os.stat(entry.get('name'))
- except OSError:
- entry.set('current_exists', 'false')
- self.logger.debug("%s %s does not exist" %
- (entry.tag, entry.get('name')))
- return False
-
- try:
- # attempt to verify device properties as specified in config
- dev_type = entry.get('dev_type')
- mode = calcPerms(device_map[dev_type],
- entry.get('mode', '0600'))
- owner = normUid(entry)
- group = normGid(entry)
- if dev_type in ['block', 'char']:
- # check for incompletely specified entries
- if entry.get('major') == None or \
- entry.get('minor') == None:
- self.logger.error('Entry %s not completely specified. '
- 'Try running bcfg2-lint.' % (entry.get('name')))
- return False
- major = int(entry.get('major'))
- minor = int(entry.get('minor'))
- if major == os.major(filestat.st_rdev) and \
- minor == os.minor(filestat.st_rdev) and \
- mode == filestat.st_mode and \
- owner == filestat.st_uid and \
- group == filestat.st_gid:
- return True
- else:
- return False
- elif dev_type == 'fifo' and \
- mode == filestat.st_mode and \
- owner == filestat.st_uid and \
- group == filestat.st_gid:
- return True
- else:
- self.logger.info('Device properties for %s incorrect' % \
- entry.get('name'))
- return False
- except OSError:
- self.logger.debug("%s %s failed to verify" %
- (entry.tag, entry.get('name')))
- return False
-
- def Installdevice(self, entry):
- """Install device entries."""
- try:
- # check for existing paths and remove them
- os.lstat(entry.get('name'))
- try:
- os.unlink(entry.get('name'))
- exists = False
- except OSError:
- self.logger.info('Failed to unlink %s' % \
- entry.get('name'))
- return False
- except OSError:
- exists = False
-
- if not exists:
- try:
- dev_type = entry.get('dev_type')
- mode = calcPerms(device_map[dev_type],
- entry.get('mode', '0600'))
- if dev_type in ['block', 'char']:
- # check if major/minor are properly specified
- if entry.get('major') == None or \
- entry.get('minor') == None:
- self.logger.error('Entry %s not completely specified. '
- 'Try running bcfg2-lint.' % (entry.get('name')))
- return False
- major = int(entry.get('major'))
- minor = int(entry.get('minor'))
- device = os.makedev(major, minor)
- os.mknod(entry.get('name'), mode, device)
- else:
- os.mknod(entry.get('name'), mode)
- """
- Python uses the OS mknod(2) implementation which modifies the
- mode based on the umask of the running process. Therefore, the
- following chmod(2) call is needed to make sure the permissions
- are set as specified by the user.
- """
- os.chmod(entry.get('name'), mode)
- os.chown(entry.get('name'), normUid(entry), normGid(entry))
- return True
- except KeyError:
- self.logger.error('Failed to install %s' % entry.get('name'))
- except OSError:
- self.logger.error('Failed to install %s' % entry.get('name'))
- return False
-
- def Verifydirectory(self, entry, modlist):
- """Verify Path type='directory' entry."""
- if entry.get('perms') == None or \
- entry.get('owner') == None or \
- entry.get('group') == None:
- self.logger.error("POSIX: Entry %s not completely specified. "
- "Try running bcfg2-lint." % (entry.get('name')))
- return False
- while len(entry.get('perms', '')) < 4:
- entry.set('perms', '0' + entry.get('perms', ''))
- try:
- ondisk = os.stat(entry.get('name'))
- except OSError:
- entry.set('current_exists', 'false')
- self.logger.info("POSIX: %s %s does not exist" %
- (entry.tag, entry.get('name')))
- return False
- try:
- owner = str(ondisk[stat.ST_UID])
- group = str(ondisk[stat.ST_GID])
- except (OSError, KeyError):
- self.logger.info("POSIX: User/Group resolution failed "
- "for path %s" % entry.get('name'))
- owner = 'root'
- group = '0'
- finfo = os.stat(entry.get('name'))
- perms = oct(finfo[stat.ST_MODE])[-4:]
- if entry.get('mtime', '-1') != '-1':
- mtime = str(finfo[stat.ST_MTIME])
- else:
- mtime = '-1'
- pTrue = ((owner == str(normUid(entry))) and
- (group == str(normGid(entry))) and
- (perms == entry.get('perms')) and
- (mtime == entry.get('mtime', '-1')))
-
- pruneTrue = True
- ex_ents = []
- if entry.get('prune', 'false') == 'true' \
- and (entry.tag == 'Path' and entry.get('type') == 'directory'):
- # check for any extra entries when prune='true' attribute is set
- try:
- entries = ['/'.join([entry.get('name'), ent]) \
- for ent in os.listdir(entry.get('name'))]
- ex_ents = [e for e in entries if e not in modlist]
- if ex_ents:
- pruneTrue = False
- self.logger.info("POSIX: Directory %s contains "
- "extra entries:" % entry.get('name'))
- self.logger.info(ex_ents)
- nqtext = entry.get('qtext', '') + '\n'
- nqtext += "Directory %s contains extra entries: " % \
- entry.get('name')
- nqtext += ":".join(ex_ents)
- entry.set('qtest', nqtext)
- [entry.append(XML.Element('Prune', path=x)) \
- for x in ex_ents]
- except OSError:
- ex_ents = []
- pruneTrue = True
-
- if not pTrue:
- if owner != str(normUid(entry)):
- entry.set('current_owner', owner)
- self.logger.debug("%s %s ownership wrong" % \
- (entry.tag, entry.get('name')))
- nqtext = entry.get('qtext', '') + '\n'
- nqtext += "%s owner wrong. is %s should be %s" % \
- (entry.get('name'), owner, entry.get('owner'))
- entry.set('qtext', nqtext)
- if group != str(normGid(entry)):
- entry.set('current_group', group)
- self.logger.debug("%s %s group wrong" % \
- (entry.tag, entry.get('name')))
- nqtext = entry.get('qtext', '') + '\n'
- nqtext += "%s group is %s should be %s" % \
- (entry.get('name'), group, entry.get('group'))
- entry.set('qtext', nqtext)
- if perms != entry.get('perms'):
- entry.set('current_perms', perms)
- self.logger.debug("%s %s permissions are %s should be %s" %
- (entry.tag,
- entry.get('name'),
- perms,
- entry.get('perms')))
- nqtext = entry.get('qtext', '') + '\n'
- nqtext += "%s %s perms are %s should be %s" % \
- (entry.tag,
- entry.get('name'),
- perms,
- entry.get('perms'))
- entry.set('qtext', nqtext)
- if mtime != entry.get('mtime', '-1'):
- entry.set('current_mtime', mtime)
- self.logger.debug("%s %s mtime is %s should be %s" \
- % (entry.tag, entry.get('name'), mtime,
- entry.get('mtime')))
- nqtext = entry.get('qtext', '') + '\n'
- nqtext += "%s mtime is %s should be %s" % \
- (entry.get('name'), mtime, entry.get('mtime'))
- entry.set('qtext', nqtext)
- if entry.get('type') != 'file':
- nnqtext = entry.get('qtext')
- nnqtext += '\nInstall %s %s: (y/N) ' % (entry.get('type'),
- entry.get('name'))
- entry.set('qtext', nnqtext)
- return pTrue and pruneTrue
-
- def Installdirectory(self, entry):
- """Install Path type='directory' entry."""
- if entry.get('perms') == None or \
- entry.get('owner') == None or \
- entry.get('group') == None:
- self.logger.error('Entry %s not completely specified. '
- 'Try running bcfg2-lint.' % \
- (entry.get('name')))
- return False
- self.logger.info("Installing directory %s" % (entry.get('name')))
- try:
- fmode = os.lstat(entry.get('name'))
- if not stat.S_ISDIR(fmode[stat.ST_MODE]):
- self.logger.debug("Found a non-directory entry at %s" % \
- (entry.get('name')))
- try:
- os.unlink(entry.get('name'))
- exists = False
- except OSError:
- self.logger.info("Failed to unlink %s" % \
- (entry.get('name')))
- return False
- else:
- self.logger.debug("Found a pre-existing directory at %s" % \
- (entry.get('name')))
- exists = True
- except OSError:
- # stat failed
- exists = False
-
- if not exists:
- parent = "/".join(entry.get('name').split('/')[:-1])
- if parent:
- try:
- os.stat(parent)
- except:
- self.logger.debug('Creating parent path for directory %s' % (entry.get('name')))
- for idx in range(len(parent.split('/')[:-1])):
- current = '/'+'/'.join(parent.split('/')[1:2+idx])
- try:
- sloc = os.stat(current)
- except OSError:
- try:
- os.mkdir(current)
- continue
- except OSError:
- return False
- if not stat.S_ISDIR(sloc[stat.ST_MODE]):
- try:
- os.unlink(current)
- os.mkdir(current)
- except OSError:
- return False
-
- try:
- os.mkdir(entry.get('name'))
- except OSError:
- self.logger.error('Failed to create directory %s' % \
- (entry.get('name')))
- return False
- if entry.get('prune', 'false') == 'true' and entry.get("qtest"):
- for pent in entry.findall('Prune'):
- pname = pent.get('path')
- ulfailed = False
- if os.path.isdir(pname):
- self.logger.info("Not removing extra directory %s, "
- "please check and remove manually" % pname)
- continue
- try:
- self.logger.debug("Unlinking file %s" % pname)
- os.unlink(pname)
- except OSError:
- self.logger.error("Failed to unlink path %s" % pname)
- ulfailed = True
- if ulfailed:
- return False
- return self.Installpermissions(entry)
-
- def Verifyfile(self, entry, _):
- """Verify Path type='file' entry."""
- # permissions check + content check
- permissionStatus = self.Verifydirectory(entry, _)
- tbin = False
- if entry.text == None and entry.get('empty', 'false') == 'false':
- self.logger.error("Cannot verify incomplete Path type='%s' %s" %
- (entry.get('type'), entry.get('name')))
- return False
- if entry.get('encoding', 'ascii') == 'base64':
- tempdata = binascii.a2b_base64(entry.text)
- tbin = True
- elif entry.get('empty', 'false') == 'true':
- tempdata = ''
- else:
- tempdata = entry.text
- if type(tempdata) == unicode:
- try:
- tempdata = tempdata.encode(self.setup['encoding'])
- except UnicodeEncodeError:
- e = sys.exc_info()[1]
- self.logger.error("Error encoding file %s:\n %s" % \
- (entry.get('name'), e))
-
- different = False
- content = None
- if not os.path.exists(entry.get("name")):
- # first, see if the target file exists at all; if not,
- # they're clearly different
- different = True
- content = ""
- else:
- # next, see if the size of the target file is different
- # from the size of the desired content
- try:
- estat = os.stat(entry.get('name'))
- except OSError:
- err = sys.exc_info()[1]
- self.logger.error("Failed to stat %s: %s" %
- (err.filename, err))
- return False
- if len(tempdata) != estat[stat.ST_SIZE]:
- different = True
- else:
- # finally, read in the target file and compare them
- # directly. comparison could be done with a checksum,
- # which might be faster for big binary files, but
- # slower for everything else
- try:
- content = open(entry.get('name')).read()
- except IOError:
- err = sys.exc_info()[1]
- self.logger.error("Failed to read %s: %s" %
- (err.filename, err))
- return False
- different = content != tempdata
-
- if different:
- if self.setup['interactive']:
- prompt = [entry.get('qtext', '')]
- if not tbin and content is None:
- # it's possible that we figured out the files are
- # different without reading in the local file. if
- # the supplied version of the file is not binary,
- # we now have to read in the local file to figure
- # out if _it_ is binary, and either include that
- # fact or the diff in our prompts for -I
- try:
- content = open(entry.get('name')).read()
- except IOError:
- err = sys.exc_info()[1]
- self.logger.error("Failed to read %s: %s" %
- (err.filename, err))
- return False
- if tbin or not isString(content, self.setup['encoding']):
- # don't compute diffs if the file is binary
- prompt.append('Binary file, no printable diff')
- else:
- diff = self._diff(content, tempdata,
- difflib.unified_diff,
- filename=entry.get("name"))
- if diff:
- udiff = '\n'.join(diff)
- try:
- prompt.append(udiff.decode(self.setup['encoding']))
- except UnicodeDecodeError:
- prompt.append("Binary file, no printable diff")
- else:
- prompt.append("Diff took too long to compute, no "
- "printable diff")
- prompt.append("Install %s %s: (y/N): " % (entry.tag,
- entry.get('name')))
- entry.set("qtext", "\n".join(prompt))
-
- if entry.get('sensitive', 'false').lower() != 'true':
- if content is None:
- # it's possible that we figured out the files are
- # different without reading in the local file. we
- # now have to read in the local file to figure out
- # if _it_ is binary, and either include the whole
- # file or the diff for reports
- try:
- content = open(entry.get('name')).read()
- except IOError:
- err = sys.exc_info()[1]
- self.logger.error("Failed to read %s: %s" %
- (err.filename, err))
- return False
-
- if tbin or not isString(content, self.setup['encoding']):
- # don't compute diffs if the file is binary
- entry.set('current_bfile', binascii.b2a_base64(content))
- else:
- diff = self._diff(content, tempdata, difflib.ndiff,
- filename=entry.get("name"))
- if diff:
- entry.set("current_bdiff",
- binascii.b2a_base64("\n".join(diff)))
- elif not tbin and isString(content, self.setup['encoding']):
- entry.set('current_bfile', binascii.b2a_base64(content))
- elif permissionStatus == False and self.setup['interactive']:
- prompt = [entry.get('qtext', '')]
- prompt.append("Install %s %s: (y/N): " % (entry.tag,
- entry.get('name')))
- entry.set("qtext", "\n".join(prompt))
-
-
- return permissionStatus and not different
-
- def Installfile(self, entry):
- """Install Path type='file' entry."""
- self.logger.info("Installing file %s" % (entry.get('name')))
-
- parent = "/".join(entry.get('name').split('/')[:-1])
- if parent:
- try:
- os.stat(parent)
- except:
- self.logger.debug('Creating parent path for config file %s' % \
- (entry.get('name')))
- current = '/'
- for next in parent.split('/')[1:]:
- current += next + '/'
- try:
- sloc = os.stat(current)
- try:
- if not stat.S_ISDIR(sloc[stat.ST_MODE]):
- self.logger.debug('%s is not a directory; recreating' \
- % (current))
- os.unlink(current)
- os.mkdir(current)
- except OSError:
- return False
- except OSError:
- try:
- self.logger.debug("Creating non-existent path %s" % current)
- os.mkdir(current)
- except OSError:
- return False
-
- # If we get here, then the parent directory should exist
- if (entry.get("paranoid", False) in ['true', 'True']) and \
- self.setup.get("paranoid", False) and not \
- (entry.get('current_exists', 'true') == 'false'):
- bkupnam = entry.get('name').replace('/', '_')
- # current list of backups for this file
- try:
- bkuplist = [f for f in os.listdir(self.ppath) if
- f.startswith(bkupnam)]
- except OSError:
- e = sys.exc_info()[1]
- self.logger.error("Failed to create backup list in %s: %s" %
- (self.ppath, e.strerror))
- return False
- bkuplist.sort()
- while len(bkuplist) >= int(self.max_copies):
- # remove the oldest backup available
- oldest = bkuplist.pop(0)
- self.logger.info("Removing %s" % oldest)
- try:
- os.remove("%s/%s" % (self.ppath, oldest))
- except:
- self.logger.error("Failed to remove %s/%s" % \
- (self.ppath, oldest))
- return False
- try:
- # backup existing file
- shutil.copy(entry.get('name'),
- "%s/%s_%s" % (self.ppath, bkupnam,
- datetime.isoformat(datetime.now())))
- self.logger.info("Backup of %s saved to %s" %
- (entry.get('name'), self.ppath))
- except IOError:
- e = sys.exc_info()[1]
- self.logger.error("Failed to create backup file for %s" % \
- (entry.get('name')))
- self.logger.error(e)
- return False
- try:
- newfile = open("%s.new"%(entry.get('name')), 'w')
- if entry.get('encoding', 'ascii') == 'base64':
- filedata = binascii.a2b_base64(entry.text)
- elif entry.get('empty', 'false') == 'true':
- filedata = ''
- else:
- if type(entry.text) == unicode:
- filedata = entry.text.encode(self.setup['encoding'])
- else:
- filedata = entry.text
- newfile.write(filedata)
- newfile.close()
- try:
- os.chown(newfile.name, normUid(entry), normGid(entry))
- except KeyError:
- self.logger.error("Failed to chown %s to %s:%s" %
- (newfile.name, entry.get('owner'),
- entry.get('group')))
- os.chown(newfile.name, 0, 0)
- except OSError:
- err = sys.exc_info()[1]
- self.logger.error("Could not chown %s: %s" % (newfile.name,
- err))
- os.chmod(newfile.name, calcPerms(stat.S_IFREG, entry.get('perms')))
- os.rename(newfile.name, entry.get('name'))
- if entry.get('mtime', '-1') != '-1':
- try:
- os.utime(entry.get('name'), (int(entry.get('mtime')),
- int(entry.get('mtime'))))
- except:
- self.logger.error("File %s mtime fix failed" \
- % (entry.get('name')))
- return False
- return True
- except (OSError, IOError):
- err = sys.exc_info()[1]
- if err.errno == errno.EACCES:
- self.logger.info("Failed to open %s for writing" % (entry.get('name')))
- else:
- print(err)
- return False
-
- def Verifyhardlink(self, entry, _):
- """Verify HardLink entry."""
- if entry.get('to') == None:
- self.logger.error('Entry %s not completely specified. '
- 'Try running bcfg2-lint.' % \
- (entry.get('name')))
- return False
- try:
- if os.path.samefile(entry.get('name'), entry.get('to')):
- return True
- self.logger.debug("Hardlink %s is incorrect" % \
- entry.get('name'))
- entry.set('qtext', "Link %s to %s? [y/N] " % \
- (entry.get('name'),
- entry.get('to')))
- return False
- except OSError:
- entry.set('current_exists', 'false')
- entry.set('qtext', "Link %s to %s? [y/N] " % \
- (entry.get('name'),
- entry.get('to')))
- return False
-
- def Installhardlink(self, entry):
- """Install HardLink entry."""
- if entry.get('to') == None:
- self.logger.error('Entry %s not completely specified. '
- 'Try running bcfg2-lint.' % \
- (entry.get('name')))
- return False
- self.logger.info("Installing Hardlink %s" % (entry.get('name')))
- if os.path.lexists(entry.get('name')):
- try:
- fmode = os.lstat(entry.get('name'))[stat.ST_MODE]
- if stat.S_ISREG(fmode) or stat.S_ISLNK(fmode):
- self.logger.debug("Non-directory entry already exists at "
- "%s. Unlinking entry." % (entry.get('name')))
- os.unlink(entry.get('name'))
- elif stat.S_ISDIR(fmode):
- self.logger.debug("Directory already exists at %s" % \
- (entry.get('name')))
- self.cmd.run("mv %s/ %s.bak" % \
- (entry.get('name'),
- entry.get('name')))
- else:
- os.unlink(entry.get('name'))
- except OSError:
- self.logger.info("Hardlink %s cleanup failed" % \
- (entry.get('name')))
- try:
- os.link(entry.get('to'), entry.get('name'))
- return True
- except OSError:
- return False
-
- def Verifynonexistent(self, entry, _):
- """Verify nonexistent entry."""
- # return true if path does _not_ exist
- return not os.path.lexists(entry.get('name'))
-
- def Installnonexistent(self, entry):
- '''Remove nonexistent entries'''
- ename = entry.get('name')
- if entry.get('recursive') in ['True', 'true']:
- # ensure that configuration spec is consistent first
- if [e for e in self.buildModlist() \
- if e.startswith(ename) and e != ename]:
- self.logger.error('Not installing %s. One or more files '
- 'in this directory are specified in '
- 'your configuration.' % ename)
- return False
- try:
- shutil.rmtree(ename)
- except OSError:
- e = sys.exc_info()[1]
- self.logger.error('Failed to remove %s: %s' % (ename,
- e.strerror))
- else:
- if os.path.isdir(ename):
- try:
- os.rmdir(ename)
- return True
- except OSError:
- e = sys.exc_info()[1]
- self.logger.error('Failed to remove %s: %s' % (ename,
- e.strerror))
- return False
- try:
- os.remove(ename)
- return True
- except OSError:
- e = sys.exc_info()[1]
- self.logger.error('Failed to remove %s: %s' % (ename,
- e.strerror))
- return False
-
- def Verifypermissions(self, entry, _):
- """Verify Path type='permissions' entry"""
- if entry.get('perms') == None or \
- entry.get('owner') == None or \
- entry.get('group') == None:
- self.logger.error('Entry %s not completely specified. '
- 'Try running bcfg2-lint.' % (entry.get('name')))
- return False
- if entry.get('recursive') in ['True', 'true']:
- # verify ownership information recursively
- owner = normUid(entry)
- group = normGid(entry)
-
- for root, dirs, files in os.walk(entry.get('name')):
- for p in dirs + files:
- path = os.path.join(root, p)
- pstat = os.stat(path)
- if owner != pstat.st_uid:
- # owner mismatch for path
- entry.set('current_owner', str(pstat.st_uid))
- self.logger.debug("%s %s ownership wrong" % \
- (entry.tag, path))
- nqtext = entry.get('qtext', '') + '\n'
- nqtext += ("Owner for path %s is incorrect. "
- "Current owner is %s but should be %s\n" % \
- (path, pstat.st_uid, entry.get('owner')))
- nqtext += ("\nInstall %s %s: (y/N): " %
- (entry.tag, entry.get('name')))
- entry.set('qtext', nqtext)
- return False
- if group != pstat.st_gid:
- # group mismatch for path
- entry.set('current_group', str(pstat.st_gid))
- self.logger.debug("%s %s group wrong" % \
- (entry.tag, path))
- nqtext = entry.get('qtext', '') + '\n'
- nqtext += ("Group for path %s is incorrect. "
- "Current group is %s but should be %s\n" % \
- (path, pstat.st_gid, entry.get('group')))
- nqtext += ("\nInstall %s %s: (y/N): " %
- (entry.tag, entry.get('name')))
- entry.set('qtext', nqtext)
- return False
- return self.Verifydirectory(entry, _)
-
- def _diff(self, content1, content2, difffunc, filename=None):
- rv = []
- start = time.time()
- longtime = False
- for diffline in difffunc(content1.split('\n'),
- content2.split('\n')):
- now = time.time()
- rv.append(diffline)
- if now - start > 5 and not longtime:
- if filename:
- self.logger.info("Diff of %s taking a long time" %
- filename)
- else:
- self.logger.info("Diff taking a long time")
- longtime = True
- elif now - start > 30:
- if filename:
- self.logger.error("Diff of %s took too long; giving up" %
- filename)
- else:
- self.logger.error("Diff took too long; giving up")
- return False
- return rv
-
- def Installpermissions(self, entry):
- """Install POSIX permissions"""
- if entry.get('perms') == None or \
- entry.get('owner') == None or \
- entry.get('group') == None:
- self.logger.error('Entry %s not completely specified. '
- 'Try running bcfg2-lint.' % (entry.get('name')))
- return False
- plist = [entry.get('name')]
- if entry.get('recursive') in ['True', 'true']:
- # verify ownership information recursively
- owner = normUid(entry)
- group = normGid(entry)
-
- for root, dirs, files in os.walk(entry.get('name')):
- for p in dirs + files:
- path = os.path.join(root, p)
- pstat = os.stat(path)
- if owner != pstat.st_uid or group != pstat.st_gid:
- # owner mismatch for path
- plist.append(path)
- try:
- for p in plist:
- os.chown(p, normUid(entry), normGid(entry))
- os.chmod(p, calcPerms(stat.S_IFDIR, entry.get('perms')))
- return True
- except (OSError, KeyError):
- self.logger.error('Permission fixup failed for %s' % \
- (entry.get('name')))
- return False
-
- def Verifysymlink(self, entry, _):
- """Verify Path type='symlink' entry."""
- if entry.get('to') == None:
- self.logger.error('Entry %s not completely specified. '
- 'Try running bcfg2-lint.' % \
- (entry.get('name')))
- return False
- try:
- sloc = os.readlink(entry.get('name'))
- if sloc == entry.get('to'):
- return True
- self.logger.debug("Symlink %s points to %s, should be %s" % \
- (entry.get('name'), sloc, entry.get('to')))
- entry.set('current_to', sloc)
- entry.set('qtext', "Link %s to %s? [y/N] " % (entry.get('name'),
- entry.get('to')))
- return False
- except OSError:
- entry.set('current_exists', 'false')
- entry.set('qtext', "Link %s to %s? [y/N] " % (entry.get('name'),
- entry.get('to')))
- return False
-
- def Installsymlink(self, entry):
- """Install Path type='symlink' entry."""
- if entry.get('to') == None:
- self.logger.error('Entry %s not completely specified. '
- 'Try running bcfg2-lint.' % \
- (entry.get('name')))
- return False
- self.logger.info("Installing symlink %s" % (entry.get('name')))
- if os.path.lexists(entry.get('name')):
- try:
- fmode = os.lstat(entry.get('name'))[stat.ST_MODE]
- if stat.S_ISREG(fmode) or stat.S_ISLNK(fmode):
- self.logger.debug("Non-directory entry already exists at "
- "%s. Unlinking entry." % \
- (entry.get('name')))
- os.unlink(entry.get('name'))
- elif stat.S_ISDIR(fmode):
- self.logger.debug("Directory already exists at %s" %\
- (entry.get('name')))
- self.cmd.run("mv %s/ %s.bak" % \
- (entry.get('name'),
- entry.get('name')))
- else:
- os.unlink(entry.get('name'))
- except OSError:
- self.logger.info("Symlink %s cleanup failed" %\
- (entry.get('name')))
- try:
- os.symlink(entry.get('to'), entry.get('name'))
- return True
- except OSError:
- return False
-
- def InstallPath(self, entry):
- """Dispatch install to the proper method according to type"""
- ret = getattr(self, 'Install%s' % entry.get('type'))
- return ret(entry)
-
- def VerifyPath(self, entry, _):
- """Dispatch verify to the proper method according to type"""
- ret = getattr(self, 'Verify%s' % entry.get('type'))
- return ret(entry, _)
diff --git a/src/lib/Bcfg2/Client/Tools/POSIX/Device.py b/src/lib/Bcfg2/Client/Tools/POSIX/Device.py
new file mode 100644
index 000000000..0ea4128f7
--- /dev/null
+++ b/src/lib/Bcfg2/Client/Tools/POSIX/Device.py
@@ -0,0 +1,66 @@
+import os
+import sys
+try:
+ from base import POSIXTool, device_map
+except ImportError:
+ # py3k, incompatible syntax with py2.4
+ exec("from .base import POSIXTool, device_map")
+
+class POSIXDevice(POSIXTool):
+ __req__ = ['name', 'dev_type', 'perms', 'owner', 'group']
+
+ def fully_specified(self, entry):
+ if entry.get('dev_type') in ['block', 'char']:
+ # check if major/minor are properly specified
+ if (entry.get('major') == None or
+ entry.get('minor') == None):
+ return False
+ return True
+
+ def verify(self, entry, modlist):
+ """Verify device entry."""
+ ondisk = self._exists(entry)
+ if not ondisk:
+ return False
+
+ # attempt to verify device properties as specified in config
+ rv = True
+ dev_type = entry.get('dev_type')
+ if dev_type in ['block', 'char']:
+ major = int(entry.get('major'))
+ minor = int(entry.get('minor'))
+ if major != os.major(ondisk.st_rdev):
+ msg = ("Major number for device %s is incorrect. "
+ "Current major is %s but should be %s" %
+ (entry.get("name"), os.major(ondisk.st_rdev), major))
+ self.logger.debug('POSIX: ' + msg)
+ entry.set('qtext', entry.get('qtext', '') + "\n" + msg)
+ rv = False
+
+ if minor != os.minor(ondisk.st_rdev):
+ msg = ("Minor number for device %s is incorrect. "
+ "Current minor is %s but should be %s" %
+ (entry.get("name"), os.minor(ondisk.st_rdev), minor))
+ self.logger.debug('POSIX: ' + msg)
+ entry.set('qtext', entry.get('qtext', '') + "\n" + msg)
+ rv = False
+ return POSIXTool.verify(self, entry, modlist) and rv
+
+ def install(self, entry):
+ if not self._exists(entry, remove=True):
+ try:
+ dev_type = entry.get('dev_type')
+ mode = device_map[dev_type] | int(entry.get('perms'), 8)
+ if dev_type in ['block', 'char']:
+ major = int(entry.get('major'))
+ minor = int(entry.get('minor'))
+ device = os.makedev(major, minor)
+ os.mknod(entry.get('name'), mode, device)
+ else:
+ os.mknod(entry.get('name'), mode)
+ except (KeyError, OSError, ValueError):
+ err = sys.exc_info()[1]
+ self.logger.error('POSIX: Failed to install %s: %s' %
+ (entry.get('name'), err))
+ return False
+ return POSIXTool.install(self, entry)
diff --git a/src/lib/Bcfg2/Client/Tools/POSIX/Directory.py b/src/lib/Bcfg2/Client/Tools/POSIX/Directory.py
new file mode 100644
index 000000000..d2d383f66
--- /dev/null
+++ b/src/lib/Bcfg2/Client/Tools/POSIX/Directory.py
@@ -0,0 +1,90 @@
+import os
+import sys
+import stat
+import shutil
+import Bcfg2.Client.XML
+try:
+ from base import POSIXTool
+except ImportError:
+ # py3k, incompatible syntax with py2.4
+ exec("from .base import POSIXTool")
+
+class POSIXDirectory(POSIXTool):
+ __req__ = ['name', 'perms', 'owner', 'group']
+
+ def verify(self, entry, modlist):
+ ondisk = self._exists(entry)
+ if not ondisk:
+ return False
+
+ if not stat.S_ISDIR(ondisk[stat.ST_MODE]):
+ self.logger.info("POSIX: %s is not a directory" % entry.get('name'))
+ return False
+
+ pruneTrue = True
+ if entry.get('prune', 'false').lower() == 'true':
+ # check for any extra entries when prune='true' attribute is set
+ try:
+ extras = [os.path.join(entry.get('name'), ent)
+ for ent in os.listdir(entry.get('name'))
+ if os.path.join(entry.get('name'),
+ ent) not in modlist]
+ if extras:
+ pruneTrue = False
+ msg = "Directory %s contains extra entries: %s" % \
+ (entry.get('name'), "; ".join(extras))
+ self.logger.info("POSIX: " + msg)
+ entry.set('qtext', entry.get('qtext', '') + '\n' + msg)
+ for extra in extras:
+ Bcfg2.Client.XML.SubElement(entry, 'Prune', path=extra)
+ except OSError:
+ pruneTrue = True
+
+ return POSIXTool.verify(self, entry, modlist) and pruneTrue
+
+ def install(self, entry):
+ """Install device entries."""
+ fmode = self._exists(entry)
+
+ if fmode and not stat.S_ISDIR(fmode[stat.ST_MODE]):
+ self.logger.info("POSIX: Found a non-directory entry at %s, "
+ "removing" % entry.get('name'))
+ try:
+ os.unlink(entry.get('name'))
+ fmode = False
+ except OSError:
+ err = sys.exc_info()[1]
+ self.logger.error("POSIX: Failed to unlink %s: %s" %
+ (entry.get('name'), err))
+ return False
+ elif fmode:
+ self.logger.debug("POSIX: Found a pre-existing directory at %s" %
+ entry.get('name'))
+
+ rv = True
+ if not fmode:
+ rv &= self._makedirs(entry)
+
+ if entry.get('prune', 'false') == 'true':
+ ulfailed = False
+ for pent in entry.findall('Prune'):
+ pname = pent.get('path')
+ ulfailed = False
+ if os.path.isdir(pname):
+ rm = shutil.rmtree
+ else:
+ rm = os.unlink
+ try:
+ self.logger.debug("POSIX: Removing %s" % pname)
+ rm(pname)
+ except OSError:
+ err = sys.exc_info()[1]
+ self.logger.error("POSIX: Failed to unlink %s: %s" %
+ (pname, err))
+ ulfailed = True
+ if ulfailed:
+ # even if prune failed, we still want to install the
+ # entry to make sure that we get permissions and
+ # whatnot set
+ rv = False
+ return POSIXTool.install(self, entry) and rv
diff --git a/src/lib/Bcfg2/Client/Tools/POSIX/File.py b/src/lib/Bcfg2/Client/Tools/POSIX/File.py
new file mode 100644
index 000000000..26550078e
--- /dev/null
+++ b/src/lib/Bcfg2/Client/Tools/POSIX/File.py
@@ -0,0 +1,225 @@
+import os
+import sys
+import stat
+import time
+import difflib
+import tempfile
+try:
+ from base import POSIXTool
+except ImportError:
+ # py3k, incompatible syntax with py2.4
+ exec("from .base import POSIXTool")
+from Bcfg2.Bcfg2Py3k import unicode, b64encode, b64decode
+
+class POSIXFile(POSIXTool):
+ __req__ = ['name', 'perms', 'owner', 'group']
+
+ def fully_specified(self, entry):
+ return entry.text is not None or entry.get('empty', 'false') == 'true'
+
+ def _is_string(self, strng, encoding):
+ """ Returns true if the string contains no ASCII control
+ characters and can be decoded from the specified encoding. """
+ for char in strng:
+ if ord(char) < 9 or ord(char) > 13 and ord(char) < 32:
+ return False
+ if not hasattr(strng, "decode"):
+ # py3k
+ return True
+ try:
+ strng.decode(encoding)
+ return True
+ except:
+ return False
+
+ def _get_data(self, entry):
+ is_binary = False
+ if entry.get('encoding', 'ascii') == 'base64':
+ tempdata = b64decode(entry.text)
+ is_binary = True
+
+ elif entry.get('empty', 'false') == 'true':
+ tempdata = ''
+ else:
+ tempdata = entry.text
+ if isinstance(tempdata, unicode) and unicode != str:
+ try:
+ tempdata = tempdata.encode(self.setup['encoding'])
+ except UnicodeEncodeError:
+ err = sys.exc_info()[1]
+ self.logger.error("POSIX: Error encoding file %s: %s" %
+ (entry.get('name'), err))
+ return (tempdata, is_binary)
+
+ def verify(self, entry, modlist):
+ ondisk = self._exists(entry)
+ tempdata, is_binary = self._get_data(entry)
+
+ different = False
+ content = None
+ if not ondisk:
+ # first, see if the target file exists at all; if not,
+ # they're clearly different
+ different = True
+ content = ""
+ elif len(tempdata) != ondisk[stat.ST_SIZE]:
+ # next, see if the size of the target file is different
+ # from the size of the desired content
+ different = True
+ else:
+ # finally, read in the target file and compare them
+ # directly. comparison could be done with a checksum,
+ # which might be faster for big binary files, but slower
+ # for everything else
+ try:
+ content = open(entry.get('name')).read()
+ except IOError:
+ self.logger.error("POSIX: Failed to read %s: %s" %
+ (entry.get("name"), sys.exc_info()[1]))
+ return False
+ different = content != tempdata
+
+ if different:
+ self.logger.debug("POSIX: %s has incorrect contents" %
+ entry.get("name"))
+ self._get_diffs(
+ entry, interactive=self.setup['interactive'],
+ sensitive=entry.get('sensitive', 'false').lower() == 'true',
+ is_binary=is_binary, content=content)
+ return POSIXTool.verify(self, entry, modlist) and not different
+
+ def _write_tmpfile(self, entry):
+ filedata, _ = self._get_data(entry)
+ # get a temp file to write to that is in the same directory as
+ # the existing file in order to preserve any permissions
+ # protections on that directory, and also to avoid issues with
+ # /tmp set nosetuid while creating files that are supposed to
+ # be setuid
+ try:
+ (newfd, newfile) = \
+ tempfile.mkstemp(prefix=os.path.basename(entry.get("name")),
+ dir=os.path.dirname(entry.get("name")))
+ except OSError:
+ err = sys.exc_info()[1]
+ self.logger.error("POSIX: Failed to create temp file in %s: %s" %
+ (os.path.dirname(entry.get('name')), err))
+ return False
+ try:
+ os.fdopen(newfd, 'w').write(filedata)
+ except (OSError, IOError):
+ err = sys.exc_info()[1]
+ self.logger.error("POSIX: Failed to open temp file %s for writing "
+ "%s: %s" %
+ (newfile, entry.get("name"), err))
+ return False
+ return newfile
+
+ def _rename_tmpfile(self, newfile, entry):
+ try:
+ os.rename(newfile, entry.get('name'))
+ return True
+ except OSError:
+ err = sys.exc_info()[1]
+ self.logger.error("POSIX: Failed to rename temp file %s to %s: %s" %
+ (newfile, entry.get('name'), err))
+ try:
+ os.unlink(newfile)
+ except:
+ err = sys.exc_info()[1]
+ self.logger.error("POSIX: Could not remove temp file %s: %s" %
+ (newfile, err))
+ return False
+
+ def install(self, entry):
+ """Install device entries."""
+ if not os.path.exists(os.path.dirname(entry.get('name'))):
+ if not self._makedirs(entry,
+ path=os.path.dirname(entry.get('name'))):
+ return False
+ newfile = self._write_tmpfile(entry)
+ if not newfile:
+ return False
+ rv = self._set_perms(entry, path=newfile)
+ if not self._rename_tmpfile(newfile, entry):
+ return False
+
+ return POSIXTool.install(self, entry) and rv
+
+ def _get_diffs(self, entry, interactive=False, sensitive=False,
+ is_binary=False, content=None):
+ if not interactive and sensitive:
+ return
+
+ prompt = [entry.get('qtext', '')]
+ attrs = dict()
+ if content is None:
+ # it's possible that we figured out the files are
+ # different without reading in the local file. if the
+ # supplied version of the file is not binary, we now have
+ # to read in the local file to figure out if _it_ is
+ # binary, and either include that fact or the diff in our
+ # prompts for -I and the reports
+ try:
+ content = open(entry.get('name')).read()
+ except IOError:
+ self.logger.error("POSIX: Failed to read %s: %s" %
+ (entry.get("name"), sys.exc_info()[1]))
+ return False
+ if not is_binary:
+ is_binary |= not self._is_string(content, self.setup['encoding'])
+ if is_binary:
+ # don't compute diffs if the file is binary
+ prompt.append('Binary file, no printable diff')
+ attrs['current_bfile'] = b64encode(content)
+ else:
+ if interactive:
+ diff = self._diff(content, self._get_data(entry)[0],
+ difflib.unified_diff,
+ filename=entry.get("name"))
+ if diff:
+ udiff = ''.join(diff)
+ if hasattr(udiff, "decode"):
+ udiff = udiff.decode(self.setup['encoding'])
+ try:
+ prompt.append(udiff)
+ except UnicodeEncodeError:
+ prompt.append("Could not encode diff")
+ else:
+ prompt.append("Diff took too long to compute, no "
+ "printable diff")
+ if not sensitive:
+ diff = self._diff(content, self._get_data(entry)[0],
+ difflib.ndiff, filename=entry.get("name"))
+ if diff:
+ attrs["current_bdiff"] = b64encode("\n".join(diff))
+ else:
+ attrs['current_bfile'] = b64encode(content)
+ if interactive:
+ entry.set("qtext", "\n".join(prompt))
+ if not sensitive:
+ for attr, val in attrs.items():
+ entry.set(attr, val)
+
+ def _diff(self, content1, content2, difffunc, filename=None):
+ rv = []
+ start = time.time()
+ longtime = False
+ for diffline in difffunc(content1.split('\n'),
+ content2.split('\n')):
+ now = time.time()
+ rv.append(diffline)
+ if now - start > 5 and not longtime:
+ if filename:
+ self.logger.info("POSIX: Diff of %s taking a long time" %
+ filename)
+ else:
+ self.logger.info("POSIX: Diff taking a long time")
+ longtime = True
+ elif now - start > 30:
+ if filename:
+ self.logger.error("POSIX: Diff of %s took too long; giving "
+ "up" % filename)
+ else:
+ self.logger.error("POSIX: Diff took too long; giving up")
+ return False
+ return rv
diff --git a/src/lib/Bcfg2/Client/Tools/POSIX/Hardlink.py b/src/lib/Bcfg2/Client/Tools/POSIX/Hardlink.py
new file mode 100644
index 000000000..ca7a23717
--- /dev/null
+++ b/src/lib/Bcfg2/Client/Tools/POSIX/Hardlink.py
@@ -0,0 +1,43 @@
+import os
+import sys
+try:
+ from base import POSIXTool
+except ImportError:
+ # py3k, incompatible syntax with py2.4
+ exec("from .base import POSIXTool")
+
+class POSIXHardlink(POSIXTool):
+ __req__ = ['name', 'to']
+
+ def verify(self, entry, modlist):
+ rv = True
+
+ try:
+ if not os.path.samefile(entry.get('name'), entry.get('to')):
+ msg = "Hardlink %s is incorrect" % entry.get('name')
+ self.logger.debug("POSIX: " + msg)
+ entry.set('qtext', "\n".join([entry.get('qtext', ''), msg]))
+ rv = False
+ except OSError:
+ self.logger.debug("POSIX: %s %s does not exist" %
+ (entry.tag, entry.get("name")))
+ entry.set('current_exists', 'false')
+ return False
+
+ return POSIXTool.verify(self, entry, modlist) and rv
+
+ def install(self, entry):
+ ondisk = self._exists(entry, remove=True)
+ if ondisk:
+ self.logger.info("POSIX: Hardlink %s cleanup failed" %
+ entry.get('name'))
+ try:
+ os.link(entry.get('to'), entry.get('name'))
+ rv = True
+ except OSError:
+ err = sys.exc_info()[1]
+ self.logger.error("POSIX: Failed to create hardlink %s to %s: %s" %
+ (entry.get('name'), entry.get('to'), err))
+ rv = False
+ return POSIXTool.install(self, entry) and rv
+
diff --git a/src/lib/Bcfg2/Client/Tools/POSIX/Nonexistent.py b/src/lib/Bcfg2/Client/Tools/POSIX/Nonexistent.py
new file mode 100644
index 000000000..c870ca0ed
--- /dev/null
+++ b/src/lib/Bcfg2/Client/Tools/POSIX/Nonexistent.py
@@ -0,0 +1,45 @@
+import os
+import sys
+import shutil
+try:
+ from base import POSIXTool
+except ImportError:
+ # py3k, incompatible syntax with py2.4
+ exec("from .base import POSIXTool")
+
+class POSIXNonexistent(POSIXTool):
+ __req__ = ['name']
+
+ def verify(self, entry, _):
+ if os.path.lexists(entry.get('name')):
+ self.logger.debug("POSIX: %s exists but should not" %
+ entry.get("name"))
+ return False
+ return True
+
+ def install(self, entry):
+ ename = entry.get('name')
+ if entry.get('recursive', '').lower() == 'true':
+ # ensure that configuration spec is consistent first
+ for struct in self.config.getchildren():
+ for entry in struct.getchildren():
+ if (entry.tag == 'Path' and
+ entry.get('type') != 'nonexistent' and
+ entry.get('name').startswith(ename)):
+ self.logger.error('POSIX: Not removing %s. One or '
+ 'more files in this directory are '
+ 'specified in your configuration.' %
+ ename)
+ return False
+ rm = shutil.rmtree
+ elif os.path.isdir(ename):
+ rm = os.rmdir
+ else:
+ rm = os.remove
+ try:
+ rm(ename)
+ return True
+ except OSError:
+ err = sys.exc_info()[1]
+ self.logger.error('POSIX: Failed to remove %s: %s' % (ename, err))
+ return False
diff --git a/src/lib/Bcfg2/Client/Tools/POSIX/Permissions.py b/src/lib/Bcfg2/Client/Tools/POSIX/Permissions.py
new file mode 100644
index 000000000..321376b98
--- /dev/null
+++ b/src/lib/Bcfg2/Client/Tools/POSIX/Permissions.py
@@ -0,0 +1,11 @@
+import os
+import sys
+try:
+ from base import POSIXTool
+except ImportError:
+ # py3k, incompatible syntax with py2.4
+ exec("from .base import POSIXTool")
+
+class POSIXPermissions(POSIXTool):
+ __req__ = ['name', 'perms', 'owner', 'group']
+
diff --git a/src/lib/Bcfg2/Client/Tools/POSIX/Symlink.py b/src/lib/Bcfg2/Client/Tools/POSIX/Symlink.py
new file mode 100644
index 000000000..fb303bdbe
--- /dev/null
+++ b/src/lib/Bcfg2/Client/Tools/POSIX/Symlink.py
@@ -0,0 +1,46 @@
+import os
+import sys
+try:
+ from base import POSIXTool
+except ImportError:
+ # py3k, incompatible syntax with py2.4
+ exec("from .base import POSIXTool")
+
+class POSIXSymlink(POSIXTool):
+ __req__ = ['name', 'to']
+
+ def verify(self, entry, modlist):
+ rv = True
+
+ try:
+ sloc = os.readlink(entry.get('name'))
+ if sloc != entry.get('to'):
+ entry.set('current_to', sloc)
+ msg = ("Symlink %s points to %s, should be %s" %
+ (entry.get('name'), sloc, entry.get('to')))
+ self.logger.debug("POSIX: " + msg)
+ entry.set('qtext', "\n".join([entry.get('qtext', ''), msg]))
+ rv = False
+ except OSError:
+ self.logger.debug("POSIX: %s %s does not exist" %
+ (entry.tag, entry.get("name")))
+ entry.set('current_exists', 'false')
+ return False
+
+ return POSIXTool.verify(self, entry, modlist) and rv
+
+ def install(self, entry):
+ ondisk = self._exists(entry, remove=True)
+ if ondisk:
+ self.logger.info("POSIX: Symlink %s cleanup failed" %
+ entry.get('name'))
+ try:
+ os.symlink(entry.get('to'), entry.get('name'))
+ rv = True
+ except OSError:
+ err = sys.exc_info()[1]
+ self.logger.error("POSIX: Failed to create symlink %s to %s: %s" %
+ (entry.get('name'), entry.get('to'), err))
+ rv = False
+ return POSIXTool.install(self, entry) and rv
+
diff --git a/src/lib/Bcfg2/Client/Tools/POSIX/__init__.py b/src/lib/Bcfg2/Client/Tools/POSIX/__init__.py
new file mode 100644
index 000000000..46631eb06
--- /dev/null
+++ b/src/lib/Bcfg2/Client/Tools/POSIX/__init__.py
@@ -0,0 +1,151 @@
+"""All POSIX Type client support for Bcfg2."""
+
+import os
+import re
+import sys
+import shutil
+import pkgutil
+from datetime import datetime
+import Bcfg2.Client.Tools
+try:
+ from base import POSIXTool
+except ImportError:
+ # py3k, incompatible syntax with py2.4
+ exec("from .base import POSIXTool")
+
+class POSIX(Bcfg2.Client.Tools.Tool):
+ """POSIX File support code."""
+ name = 'POSIX'
+
+ def __init__(self, logger, setup, config):
+ Bcfg2.Client.Tools.Tool.__init__(self, logger, setup, config)
+ self.ppath = setup['ppath']
+ self.max_copies = setup['max_copies']
+ self._load_handlers()
+ self.logger.debug("POSIX: Handlers loaded: %s" %
+ (", ".join(self._handlers.keys())))
+ self.__req__ = dict(Path=dict())
+ for etype, hdlr in self._handlers.items():
+ self.__req__['Path'][etype] = hdlr.__req__
+ self.__handles__.append(('Path', etype))
+ # Tool.__init__() sets up the list of handled entries, but we
+ # need to do it again after __handles__ has been populated. we
+ # can't populate __handles__ when the class is created because
+ # _load_handlers() _must_ be called at run-time, not at
+ # compile-time.
+ for struct in config:
+ self.handled = [e for e in struct if self.handlesEntry(e)]
+
+ def _load_handlers(self):
+ # this must be called at run-time, not at compile-time, or we
+ # get wierd circular import issues.
+ self._handlers = dict()
+ if hasattr(pkgutil, 'walk_packages'):
+ submodules = pkgutil.walk_packages(path=__path__)
+ else:
+ # python 2.4
+ import glob
+ submodules = []
+ for path in __path__:
+ for submodule in glob.glob(os.path.join(path, "*.py")):
+ mod = os.path.splitext(os.path.basename(submodule))[0]
+ if mod not in ['__init__']:
+ submodules.append((None, mod, True))
+
+ for submodule in submodules:
+ if submodule[1] == 'base':
+ continue
+ module = getattr(__import__("%s.%s" %
+ (__name__,
+ submodule[1])).Client.Tools.POSIX,
+ submodule[1])
+ hdlr = getattr(module, "POSIX" + submodule[1])
+ if POSIXTool in hdlr.__mro__:
+ # figure out what entry type this handler handles
+ etype = hdlr.__name__[5:].lower()
+ self._handlers[etype] = hdlr(self.logger,
+ self.setup,
+ self.config)
+
+ def canVerify(self, entry):
+ if not Bcfg2.Client.Tools.Tool.canVerify(self, entry):
+ return False
+ if not self._handlers[entry.get("type")].fully_specified(entry):
+ self.logger.error('POSIX: Cannot verify incomplete entry %s. '
+ 'Try running bcfg2-lint.' %
+ entry.get('name'))
+ return False
+ return True
+
+ def canInstall(self, entry):
+ """Check if entry is complete for installation."""
+ if not Bcfg2.Client.Tools.Tool.canInstall(self, entry):
+ return False
+ if not self._handlers[entry.get("type")].fully_specified(entry):
+ self.logger.error('POSIX: Cannot install incomplete entry %s. '
+ 'Try running bcfg2-lint.' %
+ entry.get('name'))
+ return False
+ return True
+
+ def InstallPath(self, entry):
+ """Dispatch install to the proper method according to type"""
+ self.logger.debug("POSIX: Installing entry %s:%s:%s" %
+ (entry.tag, entry.get("type"), entry.get("name")))
+ self._paranoid_backup(entry)
+ return self._handlers[entry.get("type")].install(entry)
+
+ def VerifyPath(self, entry, modlist):
+ """Dispatch verify to the proper method according to type"""
+ self.logger.debug("POSIX: Verifying entry %s:%s:%s" %
+ (entry.tag, entry.get("type"), entry.get("name")))
+ ret = self._handlers[entry.get("type")].verify(entry, modlist)
+ if self.setup['interactive'] and not ret:
+ entry.set('qtext',
+ '%s\nInstall %s %s: (y/N) ' %
+ (entry.get('qtext', ''),
+ entry.get('type'), entry.get('name')))
+ return ret
+
+ def _prune_old_backups(self, entry):
+ bkupnam = entry.get('name').replace('/', '_')
+ bkup_re = re.compile(bkupnam + \
+ r'_\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{6}$')
+ # current list of backups for this file
+ try:
+ bkuplist = [f for f in os.listdir(self.ppath) if
+ bkup_re.match(f)]
+ except OSError:
+ err = sys.exc_info()[1]
+ self.logger.error("POSIX: Failed to create backup list in %s: %s" %
+ (self.ppath, err))
+ return
+ bkuplist.sort()
+ while len(bkuplist) >= int(self.max_copies):
+ # remove the oldest backup available
+ oldest = bkuplist.pop(0)
+ self.logger.info("POSIX: Removing old backup %s" % oldest)
+ try:
+ os.remove(os.path.join(self.ppath, oldest))
+ except OSError:
+ err = sys.exc_info()[1]
+ self.logger.error("POSIX: Failed to remove old backup %s: %s" %
+ (os.path.join(self.ppath, oldest), err))
+
+ def _paranoid_backup(self, entry):
+ if (entry.get("paranoid", 'false').lower() == 'true' and
+ self.setup.get("paranoid", False) and
+ entry.get('current_exists', 'true') == 'true' and
+ not os.path.isdir(entry.get("name"))):
+ self._prune_old_backups(entry)
+ bkupnam = "%s_%s" % (entry.get('name').replace('/', '_'),
+ datetime.isoformat(datetime.now()))
+ bfile = os.path.join(self.ppath, bkupnam)
+ try:
+ shutil.copy(entry.get('name'), bfile)
+ self.logger.info("POSIX: Backup of %s saved to %s" %
+ (entry.get('name'), bfile))
+ except IOError:
+ err = sys.exc_info()[1]
+ self.logger.error("POSIX: Failed to create backup file for %s: "
+ "%s" % (entry.get('name'), err))
diff --git a/src/lib/Bcfg2/Client/Tools/POSIX/base.py b/src/lib/Bcfg2/Client/Tools/POSIX/base.py
new file mode 100644
index 000000000..6952d0f7b
--- /dev/null
+++ b/src/lib/Bcfg2/Client/Tools/POSIX/base.py
@@ -0,0 +1,642 @@
+import os
+import sys
+import pwd
+import grp
+import stat
+import shutil
+import Bcfg2.Client.Tools
+import Bcfg2.Client.XML
+
+try:
+ import selinux
+ has_selinux = True
+except ImportError:
+ has_selinux = False
+
+try:
+ import posix1e
+ has_acls = True
+
+ # map between permissions characters and numeric ACL constants
+ acl_map = dict(r=posix1e.ACL_READ,
+ w=posix1e.ACL_WRITE,
+ x=posix1e.ACL_EXECUTE)
+except ImportError:
+ has_acls = False
+ acl_map = dict(r=4, w=2, x=1)
+
+# map between dev_type attribute and stat constants
+device_map = dict(block=stat.S_IFBLK,
+ char=stat.S_IFCHR,
+ fifo=stat.S_IFIFO)
+
+
+class POSIXTool(Bcfg2.Client.Tools.Tool):
+ def fully_specified(self, entry):
+ # checking is done by __req__
+ return True
+
+ def verify(self, entry, modlist):
+ if not self._verify_metadata(entry):
+ return False
+ if entry.get('recursive', 'false').lower() == 'true':
+ # verify ownership information recursively
+ for root, dirs, files in os.walk(entry.get('name')):
+ for p in dirs + files:
+ if not self._verify_metadata(entry,
+ path=os.path.join(root, p)):
+ return False
+ return True
+
+ def install(self, entry):
+ plist = [entry.get('name')]
+ rv = True
+ rv &= self._set_perms(entry)
+ if entry.get('recursive', 'false').lower() == 'true':
+ # set metadata recursively
+ for root, dirs, files in os.walk(entry.get('name')):
+ for path in dirs + files:
+ rv &= self._set_perms(entry, path=os.path.join(root, path))
+ return rv
+
+ def _exists(self, entry, remove=False):
+ try:
+ # check for existing paths and optionally remove them
+ ondisk = os.lstat(entry.get('name'))
+ if remove:
+ if os.path.isdir(entry.get('name')):
+ rm = shutil.rmtree
+ else:
+ rm = os.unlink
+ try:
+ rm(entry.get('name'))
+ return False
+ except OSError:
+ err = sys.exc_info()[1]
+ self.logger.warning('POSIX: Failed to unlink %s: %s' %
+ (entry.get('name'), err))
+ return ondisk # probably still exists
+ else:
+ return ondisk
+ except OSError:
+ return False
+
+ def _set_perms(self, entry, path=None):
+ if path is None:
+ path = entry.get("name")
+
+ rv = True
+ if entry.get("owner") and entry.get("group"):
+ try:
+ self.logger.debug("POSIX: Setting ownership of %s to %s:%s" %
+ (path,
+ self._norm_entry_uid(entry),
+ self._norm_entry_gid(entry)))
+ os.chown(path, self._norm_entry_uid(entry),
+ self._norm_entry_gid(entry))
+ except KeyError:
+ self.logger.error('POSIX: Failed to change ownership of %s' %
+ path)
+ rv = False
+ os.chown(path, 0, 0)
+ except OSError:
+ self.logger.error('POSIX: Failed to change ownership of %s' %
+ path)
+ rv = False
+
+ if entry.get("perms"):
+ configPerms = int(entry.get('perms'), 8)
+ if entry.get('dev_type'):
+ configPerms |= device_map[entry.get('dev_type')]
+ try:
+ self.logger.debug("POSIX: Setting permissions on %s to %s" %
+ (path, oct(configPerms)))
+ os.chmod(path, configPerms)
+ except (OSError, KeyError):
+ self.logger.error('POSIX: Failed to change permissions on %s' %
+ path)
+ rv = False
+
+ if entry.get('mtime'):
+ try:
+ os.utime(entry.get('name'), (int(entry.get('mtime')),
+ int(entry.get('mtime'))))
+ except OSError:
+ self.logger.error("POSIX: Failed to set mtime of %s" % path)
+ rv = False
+
+ rv &= self._set_secontext(entry, path=path)
+ rv &= self._set_acls(entry, path=path)
+ return rv
+
+
+ def _set_acls(self, entry, path=None):
+ """ set POSIX ACLs on the file on disk according to the config """
+ if not has_acls:
+ if entry.findall("ACL"):
+ self.logger.debug("POSIX: ACLs listed for %s but no pylibacl "
+ "library installed" % entry.get('name'))
+ return True
+
+ if path is None:
+ path = entry.get("name")
+
+ try:
+ acl = posix1e.ACL(file=path)
+ except IOError:
+ err = sys.exc_info()[1]
+ if err.errno == 95:
+ # fs is mounted noacl
+ self.logger.error("POSIX: Cannot set ACLs on filesystem "
+ "mounted without ACL support: %s" % path)
+ else:
+ self.logger.error("POSIX: Error getting current ACLS on %s: %s"
+ % (path, err))
+ return False
+ # clear ACLs out so we start fresh -- way easier than trying
+ # to add/remove/modify ACLs
+ for aclentry in acl:
+ if aclentry.tag_type in [posix1e.ACL_USER, posix1e.ACL_GROUP]:
+ acl.delete_entry(aclentry)
+ if os.path.isdir(path):
+ defacl = posix1e.ACL(filedef=path)
+ if not defacl.valid():
+ # when a default ACL is queried on a directory that
+ # has no default ACL entries at all, you get an empty
+ # ACL, which is not valid. in this circumstance, we
+ # just copy the access ACL to get a base valid ACL
+ # that we can add things to.
+ defacl = posix1e.ACL(acl=acl)
+ else:
+ for aclentry in defacl:
+ if aclentry.tag_type in [posix1e.ACL_USER,
+ posix1e.ACL_GROUP]:
+ defacl.delete_entry(aclentry)
+ else:
+ defacl = None
+
+ for aclkey, perms in self._list_entry_acls(entry).items():
+ atype, scope, qualifier = aclkey
+ if atype == "default":
+ if defacl is None:
+ self.logger.warning("POSIX: Cannot set default ACLs on "
+ "non-directory %s" % path)
+ continue
+ entry = posix1e.Entry(defacl)
+ else:
+ entry = posix1e.Entry(acl)
+ for perm in acl_map.values():
+ if perm & perms:
+ entry.permset.add(perm)
+ entry.tag_type = scope
+ try:
+ if scope == posix1e.ACL_USER:
+ scopename = "user"
+ entry.qualifier = self._norm_uid(qualifier)
+ elif scope == posix1e.ACL_GROUP:
+ scopename = "group"
+ entry.qualifier = self._norm_gid(qualifier)
+ except (OSError, KeyError):
+ err = sys.exc_info()[1]
+ self.logger.error("POSIX: Could not resolve %s %s: %s" %
+ (scopename, qualifier, err))
+ continue
+ acl.calc_mask()
+
+ def _apply_acl(acl, path, atype=posix1e.ACL_TYPE_ACCESS):
+ if atype == posix1e.ACL_TYPE_ACCESS:
+ atype_str = "access"
+ else:
+ atype_str = "default"
+ if acl.valid():
+ self.logger.debug("POSIX: Applying %s ACL to %s:" % (atype_str,
+ path))
+ for line in str(acl).splitlines():
+ self.logger.debug(" " + line)
+ try:
+ acl.applyto(path, atype)
+ return True
+ except:
+ err = sys.exc_info()[1]
+ self.logger.error("POSIX: Failed to set ACLs on %s: %s" %
+ (path, err))
+ return False
+ else:
+ self.logger.warning("POSIX: %s ACL created for %s was invalid:"
+ % (atype_str.title(), path))
+ for line in str(acl).splitlines():
+ self.logger.warning(" " + line)
+ return False
+
+ rv = _apply_acl(acl, path)
+ if defacl:
+ defacl.calc_mask()
+ rv &= _apply_acl(defacl, path, posix1e.ACL_TYPE_DEFAULT)
+ return rv
+
+ def _set_secontext(self, entry, path=None):
+ """ set the SELinux context of the file on disk according to the
+ config"""
+ if not has_selinux:
+ return True
+
+ if path is None:
+ path = entry.get("name")
+ context = entry.get("secontext")
+ if context is None:
+ # no context listed
+ return True
+
+ if context == '__default__':
+ try:
+ selinux.restorecon(path)
+ rv = True
+ except:
+ err = sys.exc_info()[1]
+ self.logger.error("POSIX: Failed to restore SELinux context "
+ "for %s: %s" % (path, err))
+ rv = False
+ else:
+ try:
+ rv = selinux.lsetfilecon(path, context) == 0
+ except:
+ err = sys.exc_info()[1]
+ self.logger.error("POSIX: Failed to restore SELinux context "
+ "for %s: %s" % (path, err))
+ rv = False
+ return rv
+
+ def _norm_gid(self, gid):
+ """ This takes a group name or gid and returns the
+ corresponding gid. """
+ try:
+ return int(gid)
+ except ValueError:
+ return int(grp.getgrnam(gid)[2])
+
+ def _norm_entry_gid(self, entry):
+ try:
+ return self._norm_gid(entry.get('group'))
+ except (OSError, KeyError):
+ err = sys.exc_info()[1]
+ self.logger.error('POSIX: GID normalization failed for %s on %s: %s'
+ % (entry.get('group'), entry.get('name'), err))
+ return 0
+
+ def _norm_uid(self, uid):
+ """ This takes a username or uid and returns the
+ corresponding uid. """
+ try:
+ return int(uid)
+ except ValueError:
+ return int(pwd.getpwnam(uid)[2])
+
+ def _norm_entry_uid(self, entry):
+ try:
+ return self._norm_uid(entry.get("owner"))
+ except (OSError, KeyError):
+ err = sys.exc_info()[1]
+ self.logger.error('POSIX: UID normalization failed for %s on %s: %s'
+ % (entry.get('owner'), entry.get('name'), err))
+ return 0
+
+ def _norm_acl_perms(self, perms):
+ """ takes a representation of an ACL permset and returns a digit
+ representing the permissions entailed by it. representations can
+ either be a single octal digit, a string of up to three 'r',
+ 'w', 'x', or '-' characters, or a posix1e.Permset object"""
+ if hasattr(perms, 'test'):
+ # Permset object
+ return sum([p for p in acl_map.values()
+ if perms.test(p)])
+
+ try:
+ # single octal digit
+ rv = int(perms)
+ if rv > 0 and rv < 8:
+ return rv
+ else:
+ self.logger.error("POSIX: Permissions digit out of range in "
+ "ACL: %s" % perms)
+ return 0
+ except ValueError:
+ # couldn't be converted to an int; process as a string
+ if len(perms) > 3:
+ self.logger.error("POSIX: Permissions string too long in ACL: "
+ "%s" % perms)
+ return 0
+ rv = 0
+ for char in perms:
+ if char == '-':
+ continue
+ elif char not in acl_map:
+ self.logger.warning("POSIX: Unknown permissions character "
+ "in ACL: %s" % char)
+ elif rv & acl_map[char]:
+ self.logger.warning("POSIX: Duplicate permissions "
+ "character in ACL: %s" % perms)
+ else:
+ rv |= acl_map[char]
+ return rv
+
+ def _acl2string(self, aclkey, perms):
+ atype, scope, qualifier = aclkey
+ acl_str = []
+ if atype == 'default':
+ acl_str.append(atype)
+ if scope == posix1e.ACL_USER:
+ acl_str.append("user")
+ elif scope == posix1e.ACL_GROUP:
+ acl_str.append("group")
+ acl_str.append(qualifier)
+ acl_str.append(self._acl_perm2string(perms))
+ return ":".join(acl_str)
+
+ def _acl_perm2string(self, perm):
+ rv = []
+ for char in 'rwx':
+ if acl_map[char] & perm:
+ rv.append(char)
+ else:
+ rv.append('-')
+ return ''.join(rv)
+
+ def _gather_data(self, path):
+ try:
+ ondisk = os.stat(path)
+ except OSError:
+ self.logger.debug("POSIX: %s does not exist" % path)
+ return (False, None, None, None, None, None)
+
+ try:
+ owner = str(ondisk[stat.ST_UID])
+ except OSError:
+ err = sys.exc_info()[1]
+ self.logger.debug("POSIX: Could not get current owner of %s: %s" %
+ (path, err))
+ owner = None
+ except KeyError:
+ self.logger.error('POSIX: User resolution failed for %s' % path)
+ owner = None
+
+ try:
+ group = str(ondisk[stat.ST_GID])
+ except (OSError, KeyError):
+ err = sys.exc_info()[1]
+ self.logger.debug("POSIX: Could not get current group of %s: %s" %
+ (path, err))
+ group = None
+ except KeyError:
+ self.logger.error('POSIX: Group resolution failed for %s' % path)
+ group = None
+
+ try:
+ perms = oct(ondisk[stat.ST_MODE])[-4:]
+ except (OSError, KeyError, TypeError):
+ err = sys.exc_info()[1]
+ self.logger.debug("POSIX: Could not get current permissions of %s: "
+ "%s" % (path, err))
+ perms = None
+
+ if has_selinux:
+ try:
+ secontext = selinux.getfilecon(path)[1].split(":")[2]
+ except (OSError, KeyError):
+ err = sys.exc_info()[1]
+ self.logger.debug("POSIX: Could not get current SELinux "
+ "context of %s: %s" % (path, err))
+ secontext = None
+ else:
+ secontext = None
+
+ if has_acls:
+ acls = self._list_file_acls(path)
+ else:
+ acls = None
+ return (ondisk, owner, group, perms, secontext, acls)
+
+ def _verify_metadata(self, entry, path=None):
+ """ generic method to verify perms, owner, group, secontext, acls,
+ and mtime """
+ # allow setting an alternate path for recursive permissions checking
+ if path is None:
+ path = entry.get('name')
+ attrib = dict()
+ ondisk, attrib['current_owner'], attrib['current_group'], \
+ attrib['current_perms'], attrib['current_secontext'], acls = \
+ self._gather_data(path)
+
+ if not ondisk:
+ entry.set('current_exists', 'false')
+ return False
+
+ # we conditionally verify every bit of metadata only if it's
+ # specified on the entry. consequently, canVerify() and
+ # fully_specified() are preconditions of _verify_metadata(),
+ # since they will ensure that everything that needs to be
+ # specified actually is. this lets us gracefully handle
+ # symlink and hardlink entries, which have SELinux contexts
+ # but not other permissions, optional secontext and mtime
+ # attrs, and so on.
+ configOwner, configGroup, configPerms, mtime = None, None, None, -1
+ if entry.get('mtime', '-1') != '-1':
+ mtime = str(ondisk[stat.ST_MTIME])
+ if entry.get("owner"):
+ configOwner = str(self._norm_entry_uid(entry))
+ if entry.get("group"):
+ configGroup = str(self._norm_entry_gid(entry))
+ if entry.get("perms"):
+ while len(entry.get('perms', '')) < 4:
+ entry.set('perms', '0' + entry.get('perms', ''))
+ configPerms = int(entry.get('perms'), 8)
+
+ errors = []
+ if configOwner and attrib['current_owner'] != configOwner:
+ errors.append("Owner for path %s is incorrect. "
+ "Current owner is %s but should be %s" %
+ (path, attrib['current_owner'], entry.get('owner')))
+
+ if configGroup and attrib['current_group'] != configGroup:
+ errors.append("Group for path %s is incorrect. "
+ "Current group is %s but should be %s" %
+ (path, attrib['current_group'], entry.get('group')))
+
+ if (configPerms and
+ oct(int(attrib['current_perms'], 8)) != oct(configPerms)):
+ errors.append("Permissions for path %s are incorrect. "
+ "Current permissions are %s but should be %s" %
+ (path, attrib['current_perms'], entry.get('perms')))
+
+ if entry.get('mtime'):
+ attrib['current_mtime'] = mtime
+ if mtime != entry.get('mtime', '-1'):
+ errors.append("mtime for path %s is incorrect. "
+ "Current mtime is %s but should be %s" %
+ (path, mtime, entry.get('mtime')))
+
+ if has_selinux and entry.get("secontext"):
+ if entry.get("secontext") == "__default__":
+ configContext = selinux.matchpathcon(path, 0)[1].split(":")[2]
+ else:
+ configContext = entry.get("secontext")
+ if attrib['current_secontext'] != configContext:
+ errors.append("SELinux context for path %s is incorrect. "
+ "Current context is %s but should be %s" %
+ (path, attrib['current_secontext'],
+ configContext))
+
+ if errors:
+ for error in errors:
+ self.logger.debug("POSIX: " + error)
+ entry.set('qtext', "\n".join([entry.get('qtext', '')] + errors))
+ if path == entry.get("name"):
+ for attr, val in attrib.items():
+ if val is not None:
+ entry.set(attr, str(val))
+
+ aclVerifies = self._verify_acls(entry, path=path)
+ return aclVerifies and len(errors) == 0
+
+ def _list_entry_acls(self, entry):
+ wanted = dict()
+ for acl in entry.findall("ACL"):
+ if acl.get("scope") == "user":
+ scope = posix1e.ACL_USER
+ elif acl.get("scope") == "group":
+ scope = posix1e.ACL_GROUP
+ else:
+ self.logger.error("POSIX: Unknown ACL scope %s" %
+ acl.get("scope"))
+ continue
+ wanted[(acl.get("type"), scope, acl.get(acl.get("scope")))] = \
+ self._norm_acl_perms(acl.get('perms'))
+ return wanted
+
+ def _list_file_acls(self, path):
+ def _process_acl(acl, atype):
+ try:
+ if acl.tag_type == posix1e.ACL_USER:
+ qual = pwd.getpwuid(acl.qualifier)[0]
+ elif acl.tag_type == posix1e.ACL_GROUP:
+ qual = grp.getgrgid(acl.qualifier)[0]
+ else:
+ return
+ except (OSError, KeyError):
+ err = sys.exc_info()[1]
+ self.logger.error("POSIX: Lookup of %s %s failed: %s" %
+ (scope, acl.qualifier, err))
+ qual = acl.qualifier
+ existing[(atype, acl.tag_type, qual)] = \
+ self._norm_acl_perms(acl.permset)
+
+ existing = dict()
+ try:
+ for acl in posix1e.ACL(file=path):
+ _process_acl(acl, "access")
+ except IOError:
+ err = sys.exc_info()[1]
+ if err.errno == 95:
+ # fs is mounted noacl
+ self.logger.debug("POSIX: Filesystem mounted without ACL "
+ "support: %s" % path)
+ else:
+ self.logger.error("POSIX: Error getting current ACLS on %s: %s"
+ % (path, err))
+ return existing
+
+ if os.path.isdir(path):
+ for acl in posix1e.ACL(filedef=path):
+ _process_acl(acl, "default")
+ return existing
+
+ def _verify_acls(self, entry, path=None):
+ if not has_acls:
+ if entry.findall("ACL"):
+ self.logger.debug("POSIX: ACLs listed for %s but no pylibacl "
+ "library installed" % entry.get('name'))
+ return True
+
+ if path is None:
+ path = entry.get("name")
+
+ # create lists of normalized representations of the ACLs we want
+ # and the ACLs we have. this will make them easier to compare
+ # than trying to mine that data out of the ACL objects and XML
+ # objects and compare it at the same time.
+ wanted = self._list_entry_acls(entry)
+ existing = self._list_file_acls(path)
+
+ missing = []
+ extra = []
+ wrong = []
+ for aclkey, perms in wanted.items():
+ if aclkey not in existing:
+ missing.append(self._acl2string(aclkey, perms))
+ elif existing[aclkey] != perms:
+ wrong.append((self._acl2string(aclkey, perms),
+ self._acl2string(aclkey, existing[aclkey])))
+ if path == entry.get("name"):
+ atype, scope, qual = aclkey
+ aclentry = Bcfg2.Client.XML.Element("ACL", type=atype,
+ perms=str(perms))
+ if scope == posix1e.ACL_USER:
+ aclentry.set("scope", "user")
+ elif scope == posix1e.ACL_GROUP:
+ aclentry.set("scope", "group")
+ else:
+ self.logger.debug("POSIX: Unknown ACL scope %s on %s" %
+ (scope, path))
+ continue
+ aclentry.set(aclentry.get("scope"), qual)
+ entry.append(aclentry)
+
+ for aclkey, perms in existing.items():
+ if aclkey not in wanted:
+ extra.append(self._acl2string(aclkey, perms))
+
+ msg = []
+ if missing:
+ msg.append("%s ACLs are missing: %s" % (len(missing),
+ ", ".join(missing)))
+ if wrong:
+ msg.append("%s ACLs are wrong: %s" %
+ (len(wrong),
+ "; ".join(["%s should be %s" % (e, w)
+ for w, e in wrong])))
+ if extra:
+ msg.append("%s extra ACLs: %s" % (len(extra), ", ".join(extra)))
+
+ if msg:
+ msg.insert(0, "POSIX: ACLs for %s are incorrect." % path)
+ self.logger.debug(msg[0])
+ for line in msg[1:]:
+ self.logger.debug(" " + line)
+ entry.set('qtext', "\n".join([entry.get("qtext", '')] + msg))
+ return False
+ return True
+
+ def _makedirs(self, entry, path=None):
+ """ os.makedirs helpfully creates all parent directories for
+ us, but it sets permissions according to umask, which is
+ probably wrong. we need to find out which directories were
+ created and set permissions on those
+ (http://trac.mcs.anl.gov/projects/bcfg2/ticket/1125) """
+ created = []
+ if path is None:
+ path = entry.get("name")
+ cur = path
+ while cur != '/':
+ if not os.path.exists(cur):
+ created.append(cur)
+ cur = os.path.dirname(cur)
+ rv = True
+ try:
+ os.makedirs(path)
+ except OSError:
+ err = sys.exc_info()[1]
+ self.logger.error('POSIX: Failed to create directory %s: %s' %
+ (path, err))
+ rv = False
+ for cpath in created:
+ rv &= self._set_perms(entry, path=cpath)
+ return rv
diff --git a/src/lib/Bcfg2/Client/Tools/Portage.py b/src/lib/Bcfg2/Client/Tools/Portage.py
index 4516f419d..36d48b8d3 100644
--- a/src/lib/Bcfg2/Client/Tools/Portage.py
+++ b/src/lib/Bcfg2/Client/Tools/Portage.py
@@ -2,8 +2,6 @@
import re
import Bcfg2.Client.Tools
-from Bcfg2.Bcfg2Py3k import ConfigParser
-
class Portage(Bcfg2.Client.Tools.PkgTool):
"""The Gentoo toolset implements package and service operations and
@@ -27,30 +25,11 @@ class Portage(Bcfg2.Client.Tools.PkgTool):
self._ebuild_pattern = re.compile('(ebuild|binary)')
self.cfg = cfg
self.installed = {}
- self._binpkgonly = True
-
- # Used to get options from configuration file
- parser = ConfigParser.ConfigParser()
- parser.read(self.setup.get('setup'))
- for opt in ['binpkgonly']:
- if parser.has_option(self.name, opt):
- setattr(self, ('_%s' % opt),
- self._StrToBoolIfBool(parser.get(self.name, opt)))
-
+ self._binpkgonly = self.setup.get('portage_binpkgonly', False)
if self._binpkgonly:
self.pkgtool = self._binpkgtool
self.RefreshPackages()
- def _StrToBoolIfBool(self, s):
- """Returns a boolean if the string specifies a boolean value.
- Returns a string otherwise"""
- if s.lower() in ('true', 'yes', 't', 'y', '1'):
- return True
- elif s.lower() in ('false', 'no', 'f', 'n', '0'):
- return False
- else:
- return s
-
def RefreshPackages(self):
"""Refresh memory hashes of packages."""
if not self._initialised:
@@ -83,8 +62,8 @@ class Portage(Bcfg2.Client.Tools.PkgTool):
entry.set('current_version', version)
if not self.setup['quick']:
- if ('verify' not in entry.attrib) or \
- self._StrToBoolIfBool(entry.get('verify')):
+ if ('verify' not in entry.attrib or
+ entry.get('verify').lower == 'true'):
# Check the package if:
# - Not running in quick mode
diff --git a/src/lib/Bcfg2/Client/Tools/RPMng.py b/src/lib/Bcfg2/Client/Tools/RPMng.py
index 00dd00d71..91e2180ae 100644
--- a/src/lib/Bcfg2/Client/Tools/RPMng.py
+++ b/src/lib/Bcfg2/Client/Tools/RPMng.py
@@ -4,8 +4,6 @@ import os.path
import rpm
import rpmtools
import Bcfg2.Client.Tools
-# Compatibility import
-from Bcfg2.Bcfg2Py3k import ConfigParser
class RPMng(Bcfg2.Client.Tools.PkgTool):
"""Support for RPM packages."""
@@ -44,82 +42,42 @@ class RPMng(Bcfg2.Client.Tools.PkgTool):
self.modlists = {}
self.gpg_keyids = self.getinstalledgpg()
- # Process thee RPMng section from the config file.
- RPMng_CP = ConfigParser.ConfigParser()
- RPMng_CP.read(self.setup.get('setup'))
-
- # installonlypackages
- self.installOnlyPkgs = []
- if RPMng_CP.has_option(self.name, 'installonlypackages'):
- for i in RPMng_CP.get(self.name, 'installonlypackages').split(','):
- self.installOnlyPkgs.append(i.strip())
- if self.installOnlyPkgs == []:
- self.installOnlyPkgs = ['kernel', 'kernel-bigmem', 'kernel-enterprise', 'kernel-smp',
- 'kernel-modules', 'kernel-debug', 'kernel-unsupported',
- 'kernel-source', 'kernel-devel', 'kernel-default',
- 'kernel-largesmp-devel', 'kernel-largesmp', 'kernel-xen',
- 'gpg-pubkey']
+ opt_prefix = self.name.lower()
+ self.installOnlyPkgs = self.setup["%s_installonly" % opt_prefix]
if 'gpg-pubkey' not in self.installOnlyPkgs:
self.installOnlyPkgs.append('gpg-pubkey')
- self.logger.debug('installOnlyPackages = %s' % self.installOnlyPkgs)
-
- # erase_flags
- self.erase_flags = []
- if RPMng_CP.has_option(self.name, 'erase_flags'):
- for i in RPMng_CP.get(self.name, 'erase_flags').split(','):
- self.erase_flags.append(i.strip())
- if self.erase_flags == []:
- self.erase_flags = ['allmatches']
- self.logger.debug('erase_flags = %s' % self.erase_flags)
-
- # pkg_checks
- if RPMng_CP.has_option(self.name, 'pkg_checks'):
- self.pkg_checks = RPMng_CP.get(self.name, 'pkg_checks').lower()
- else:
- self.pkg_checks = 'true'
- self.logger.debug('pkg_checks = %s' % self.pkg_checks)
-
- # pkg_verify
- if RPMng_CP.has_option(self.name, 'pkg_verify'):
- self.pkg_verify = RPMng_CP.get(self.name, 'pkg_verify').lower()
- else:
- self.pkg_verify = 'true'
- self.logger.debug('pkg_verify = %s' % self.pkg_verify)
-
- # installed_action
- if RPMng_CP.has_option(self.name, 'installed_action'):
- self.installed_action = RPMng_CP.get(self.name, 'installed_action').lower()
- else:
- self.installed_action = 'install'
- self.logger.debug('installed_action = %s' % self.installed_action)
-
- # version_fail_action
- if RPMng_CP.has_option(self.name, 'version_fail_action'):
- self.version_fail_action = RPMng_CP.get(self.name, 'version_fail_action').lower()
- else:
- self.version_fail_action = 'upgrade'
- self.logger.debug('version_fail_action = %s' % self.version_fail_action)
-
- # verify_fail_action
- if self.name == "RPMng":
- if RPMng_CP.has_option(self.name, 'verify_fail_action'):
- self.verify_fail_action = RPMng_CP.get(self.name, 'verify_fail_action').lower()
- else:
- self.verify_fail_action = 'reinstall'
- else: # yum can't reinstall packages.
- self.verify_fail_action = 'none'
- self.logger.debug('verify_fail_action = %s' % self.verify_fail_action)
-
- # version_fail_action
- if RPMng_CP.has_option(self.name, 'verify_flags'):
- self.verify_flags = RPMng_CP.get(self.name, 'verify_flags').lower().split(',')
- else:
- self.verify_flags = []
+ self.erase_flags = self.setup['%s_erase_flags' % opt_prefix]
+ self.pkg_checks = self.setup['%s_pkg_checks' % opt_prefix]
+ self.pkg_verify = self.setup['%s_pkg_verify' % opt_prefix]
+ self.installed_action = self.setup['%s_installed_action' % opt_prefix]
+ self.version_fail_action = self.setup['%s_version_fail_action' %
+ opt_prefix]
+ self.verify_fail_action = self.setup['%s_verify_fail_action' %
+ opt_prefix]
+ self.verify_flags = self.setup['%s_verify_flags' % opt_prefix]
if '' in self.verify_flags:
self.verify_flags.remove('')
- self.logger.debug('version_fail_action = %s' % self.version_fail_action)
+
+ self.logger.debug('%s: installOnlyPackages = %s' %
+ (self.name, self.installOnlyPkgs))
+ self.logger.debug('%s: erase_flags = %s' %
+ (self.name, self.erase_flags))
+ self.logger.debug('%s: pkg_checks = %s' %
+ (self.name, self.pkg_checks))
+ self.logger.debug('%s: pkg_verify = %s' %
+ (self.name, self.pkg_verify))
+ self.logger.debug('%s: installed_action = %s' %
+ (self.name, self.installed_action))
+ self.logger.debug('%s: version_fail_action = %s' %
+ (self.name, self.version_fail_action))
+ self.logger.debug('%s: verify_fail_action = %s' %
+ (self.name, self.verify_fail_action))
+ self.logger.debug('%s: verify_flags = %s' %
+ (self.name, self.verify_flags))
+
# Force a re- prelink of all packages if prelink exists.
- # Many, if not most package verifies can be caused by out of date prelinking.
+ # Many, if not most package verifies can be caused by out of
+ # date prelinking.
if os.path.isfile('/usr/sbin/prelink') and not self.setup['dryrun']:
cmdrc, output = self.cmd.run('/usr/sbin/prelink -a -mR')
if cmdrc == 0:
@@ -193,7 +151,8 @@ class RPMng(Bcfg2.Client.Tools.PkgTool):
instance = Bcfg2.Client.XML.SubElement(entry, 'Package')
for attrib in list(entry.attrib.keys()):
instance.attrib[attrib] = entry.attrib[attrib]
- if self.pkg_checks == 'true' and entry.get('pkg_checks', 'true') == 'true':
+ if (self.pkg_checks and
+ entry.get('pkg_checks', 'true').lower() == 'true'):
if 'any' in [entry.get('version'), pinned_version]:
version, release = 'any', 'any'
elif entry.get('version') == 'auto':
@@ -215,7 +174,8 @@ class RPMng(Bcfg2.Client.Tools.PkgTool):
if entry.get('name') in self.installed:
# There is at least one instance installed.
- if self.pkg_checks == 'true' and entry.get('pkg_checks', 'true') == 'true':
+ if (self.pkg_checks and
+ entry.get('pkg_checks', 'true').lower() == 'true'):
rpmTs = rpm.TransactionSet()
rpmHeader = None
for h in rpmTs.dbMatch(rpm.RPMTAG_NAME, entry.get('name')):
@@ -243,8 +203,8 @@ class RPMng(Bcfg2.Client.Tools.PkgTool):
self.logger.debug(" %s" % self.str_evra(inst))
self.instance_status[inst]['installed'] = True
- if self.pkg_verify == 'true' and \
- inst.get('pkg_verify', 'true') == 'true':
+ if (self.pkg_verify and
+ inst.get('pkg_verify', 'true').lower() == 'true'):
flags = inst.get('verify_flags', '').split(',') + self.verify_flags
if pkg.get('gpgkeyid', '')[-8:] not in self.gpg_keyids and \
entry.get('name') != 'gpg-pubkey':
@@ -302,8 +262,8 @@ class RPMng(Bcfg2.Client.Tools.PkgTool):
self.logger.debug(" %s" % self.str_evra(inst))
self.instance_status[inst]['installed'] = True
- if self.pkg_verify == 'true' and \
- inst.get('pkg_verify', 'true') == 'true':
+ if (self.pkg_verify and
+ inst.get('pkg_verify', 'true').lower() == 'true'):
flags = inst.get('verify_flags', '').split(',') + self.verify_flags
if pkg.get('gpgkeyid', '')[-8:] not in self.gpg_keyids and \
'nosignature' not in flags:
@@ -520,7 +480,7 @@ class RPMng(Bcfg2.Client.Tools.PkgTool):
self.extra = self.FindExtraPackages()
def FixInstance(self, instance, inst_status):
- """"
+ """
Control if a reinstall of a package happens or not based on the
results from RPMng.VerifyPackage().
@@ -824,8 +784,8 @@ class RPMng(Bcfg2.Client.Tools.PkgTool):
return False
# We don't want to do any checks so we don't care what the entry has in it.
- if self.pkg_checks == 'false' or \
- entry.get('pkg_checks', 'true').lower() == 'false':
+ if (not self.pkg_checks or
+ entry.get('pkg_checks', 'true').lower() == 'false'):
return True
instances = entry.findall('Instance')
diff --git a/src/lib/Bcfg2/Client/Tools/RcUpdate.py b/src/lib/Bcfg2/Client/Tools/RcUpdate.py
index 1b9a29478..ddf9c1f2d 100644
--- a/src/lib/Bcfg2/Client/Tools/RcUpdate.py
+++ b/src/lib/Bcfg2/Client/Tools/RcUpdate.py
@@ -23,22 +23,18 @@ class RcUpdate(Bcfg2.Client.Tools.SvcTool):
rc = self.cmd.run(cmd % entry.get('name'))[0]
is_enabled = (rc == 0)
- if entry.get('mode', 'default') == 'supervised':
- # check if init script exists
- try:
- os.stat('/etc/init.d/%s' % entry.get('name'))
- except OSError:
- self.logger.debug('Init script for service %s does not exist' %
- entry.get('name'))
- return False
+ # check if init script exists
+ try:
+ os.stat('/etc/init.d/%s' % entry.get('name'))
+ except OSError:
+ self.logger.debug('Init script for service %s does not exist' %
+ entry.get('name'))
+ return False
- # check if service is enabled
- cmd = '/etc/init.d/%s status | grep started'
- rc = self.cmd.run(cmd % entry.attrib['name'])[0]
- is_running = (rc == 0)
- else:
- # we don't care
- is_running = is_enabled
+ # check if service is enabled
+ cmd = '/etc/init.d/%s status | grep started'
+ rc = self.cmd.run(cmd % entry.attrib['name'])[0]
+ is_running = (rc == 0)
if entry.get('status') == 'on' and not (is_enabled and is_running):
entry.set('current_status', 'off')
@@ -53,19 +49,11 @@ class RcUpdate(Bcfg2.Client.Tools.SvcTool):
def InstallService(self, entry):
"""
Install Service entry
- In supervised mode we also take care it's (not) running.
"""
- # don't take any actions for mode='manual'
- if entry.get('mode', 'default') == 'manual':
- self.logger.info("Service %s mode set to manual. Skipping "
- "installation." % (entry.get('name')))
- return False
self.logger.info('Installing Service %s' % entry.get('name'))
if entry.get('status') == 'on':
- # make sure it's running if in supervised mode
- if entry.get('mode', 'default') == 'supervised' \
- and entry.get('current_status') == 'off':
+ if entry.get('current_status') == 'off':
self.start_service(entry)
# make sure it's enabled
cmd = '/sbin/rc-update add %s default'
@@ -73,9 +61,7 @@ class RcUpdate(Bcfg2.Client.Tools.SvcTool):
return (rc == 0)
elif entry.get('status') == 'off':
- # make sure it's not running if in supervised mode
- if entry.get('mode', 'default') == 'supervised' \
- and entry.get('current_status') == 'on':
+ if entry.get('current_status') == 'on':
self.stop_service(entry)
# make sure it's disabled
cmd = '/sbin/rc-update del %s default'
diff --git a/src/lib/Bcfg2/Client/Tools/SELinux.py b/src/lib/Bcfg2/Client/Tools/SELinux.py
new file mode 100644
index 000000000..1c0db904b
--- /dev/null
+++ b/src/lib/Bcfg2/Client/Tools/SELinux.py
@@ -0,0 +1,716 @@
+import os
+import re
+import sys
+import copy
+import glob
+import struct
+import socket
+import selinux
+import seobject
+import Bcfg2.Client.XML
+import Bcfg2.Client.Tools
+import Bcfg2.Client.Tools.POSIX
+
+def pack128(int_val):
+ """ pack a 128-bit integer in big-endian format """
+ max_int = 2 ** (128) - 1
+ max_word_size = 2 ** 32 - 1
+
+ if int_val <= max_word_size:
+ return struct.pack('>L', int_val)
+
+ words = []
+ for i in range(4):
+ word = int_val & max_word_size
+ words.append(int(word))
+ int_val >>= 32
+ words.reverse()
+ return struct.pack('>4I', *words)
+
+def netmask_itoa(netmask, proto="ipv4"):
+ """ convert an integer netmask (e.g., /16) to dotted-quad
+ notation (255.255.0.0) or IPv6 prefix notation (ffff::) """
+ if proto == "ipv4":
+ size = 32
+ family = socket.AF_INET
+ else: # ipv6
+ size = 128
+ family = socket.AF_INET6
+ try:
+ int(netmask)
+ except ValueError:
+ return netmask
+
+ if netmask > size:
+ raise ValueError("Netmask too large: %s" % netmask)
+
+ res = 0L
+ for n in range(netmask):
+ res |= 1 << (size - n - 1)
+ netmask = socket.inet_ntop(family, pack128(res))
+ return netmask
+
+
+class SELinux(Bcfg2.Client.Tools.Tool):
+ """ SELinux boolean and module support """
+ name = 'SELinux'
+ __handles__ = [('SELinux', 'boolean'),
+ ('SELinux', 'port'),
+ ('SELinux', 'fcontext'),
+ ('SELinux', 'node'),
+ ('SELinux', 'login'),
+ ('SELinux', 'user'),
+ ('SELinux', 'interface'),
+ ('SELinux', 'permissive'),
+ ('SELinux', 'module')]
+ __req__ = dict(SELinux=dict(boolean=['name', 'value'],
+ module=['name'],
+ port=['name', 'selinuxtype'],
+ fcontext=['name', 'selinuxtype'],
+ node=['name', 'selinuxtype', 'proto'],
+ login=['name', 'selinuxuser'],
+ user=['name', 'roles', 'prefix'],
+ interface=['name', 'selinuxtype'],
+ permissive=['name']))
+
+ def __init__(self, logger, setup, config):
+ Bcfg2.Client.Tools.Tool.__init__(self, logger, setup, config)
+ self.handlers = {}
+ for handles in self.__handles__:
+ etype = handles[1]
+ self.handlers[etype] = \
+ globals()["SELinux%sHandler" % etype.title()](self, logger,
+ setup, config)
+
+ def BundleUpdated(self, _, states):
+ for handler in self.handlers.values():
+ handler.BundleUpdated(states)
+
+ def FindExtra(self):
+ extra = []
+ for handler in self.handlers.values():
+ extra.extend(handler.FindExtra())
+ return extra
+
+ def canInstall(self, entry):
+ return (Bcfg2.Client.Tools.Tool.canInstall(self, entry) and
+ self.handlers[entry.get('type')].canInstall(entry))
+
+ def primarykey(self, entry):
+ """ return a string that should be unique amongst all entries
+ in the specification """
+ return self.handlers[entry.get('type')].primarykey(entry)
+
+ def Install(self, entries, states):
+ # start a transaction
+ sr = seobject.semanageRecords("")
+ if hasattr(sr, "start"):
+ self.logger.debug("Starting SELinux transaction")
+ sr.start()
+ else:
+ self.logger.debug("SELinux transactions not supported; this may "
+ "slow things down considerably")
+ Bcfg2.Client.Tools.Tool.Install(self, entries, states)
+ if hasattr(sr, "finish"):
+ self.logger.debug("Committing SELinux transaction")
+ sr.finish()
+
+ def InstallSELinux(self, entry):
+ """Dispatch install to the proper method according to type"""
+ return self.handlers[entry.get('type')].Install(entry)
+
+ def VerifySELinux(self, entry, _):
+ """Dispatch verify to the proper method according to type"""
+ rv = self.handlers[entry.get('type')].Verify(entry)
+ if entry.get('qtext') and self.setup['interactive']:
+ entry.set('qtext',
+ '%s\nInstall SELinux %s %s: (y/N) ' %
+ (entry.get('qtext'),
+ entry.get('type'),
+ self.handlers[entry.get('type')].tostring(entry)))
+ return rv
+
+ def Remove(self, entries):
+ """Dispatch verify to the proper removal method according to type"""
+ # sort by type
+ types = list()
+ for entry in entries:
+ if entry.get('type') not in types:
+ types.append(entry.get('type'))
+
+ for etype in types:
+ self.handlers[entry.get('type')].Remove([e for e in entries
+ if e.get('type') == etype])
+
+
+class SELinuxEntryHandler(object):
+ etype = None
+ key_format = ("name",)
+ value_format = ()
+ str_format = '%(name)s'
+ custom_re = re.compile(' (?P<name>\S+)$')
+ custom_format = None
+
+ def __init__(self, tool, logger, setup, config):
+ self.tool = tool
+ self.logger = logger
+ self._records = None
+ self._all = None
+ if not self.custom_format:
+ self.custom_format = self.key_format
+
+ @property
+ def records(self):
+ if self._records is None:
+ self._records = getattr(seobject, "%sRecords" % self.etype)("")
+ return self._records
+
+ @property
+ def all_records(self):
+ if self._all is None:
+ self._all = self.records.get_all()
+ return self._all
+
+ @property
+ def custom_records(self):
+ if hasattr(self.records, "customized") and self.custom_re:
+ return dict([(k, self.all_records[k]) for k in self.custom_keys])
+ else:
+ # ValueError is really a pretty dumb exception to raise,
+ # but that's what the seobject customized() method raises
+ # if it's defined but not implemented. yeah, i know, wtf.
+ raise ValueError("custom_records")
+
+ @property
+ def custom_keys(self):
+ keys = []
+ for cmd in self.records.customized():
+ match = self.custom_re.search(cmd)
+ if match:
+ if (len(self.custom_format) == 1 and
+ self.custom_format[0] == "name"):
+ keys.append(match.group("name"))
+ else:
+ keys.append(tuple([match.group(k)
+ for k in self.custom_format]))
+ return keys
+
+ def tostring(self, entry):
+ return self.str_format % entry.attrib
+
+ def keytostring(self, key):
+ return self.str_format % self._key2attrs(key)
+
+ def _key(self, entry):
+ if len(self.key_format) == 1 and self.key_format[0] == "name":
+ return entry.get("name")
+ else:
+ rv = []
+ for key in self.key_format:
+ rv.append(entry.get(key))
+ return tuple(rv)
+
+ def _key2attrs(self, key):
+ if isinstance(key, tuple):
+ rv = dict((self.key_format[i], key[i])
+ for i in range(len(self.key_format))
+ if self.key_format[i])
+ else:
+ rv = dict(name=key)
+ if self.value_format:
+ vals = self.all_records[key]
+ rv.update(dict((self.value_format[i], vals[i])
+ for i in range(len(self.value_format))
+ if self.value_format[i]))
+ return rv
+
+ def key2entry(self, key):
+ attrs = self._key2attrs(key)
+ attrs["type"] = self.etype
+ return Bcfg2.Client.XML.Element("SELinux", **attrs)
+
+ def _args(self, entry, method):
+ if hasattr(self, "_%sargs" % method):
+ return getattr(self, "_%sargs" % method)(entry)
+ elif hasattr(self, "_defaultargs"):
+ # default args
+ return self._defaultargs(entry)
+ else:
+ raise NotImplementedError
+
+ def _deleteargs(self, entry):
+ return (self._key(entry))
+
+ def canInstall(self, entry):
+ return bool(self._key(entry))
+
+ def primarykey(self, entry):
+ return ":".join([entry.tag, entry.get("type"), entry.get("name")])
+
+ def exists(self, entry):
+ if self._key(entry) not in self.all_records:
+ self.logger.debug("SELinux %s %s does not exist" %
+ (self.etype, self.tostring(entry)))
+ return False
+ return True
+
+ def Verify(self, entry):
+ if not self.exists(entry):
+ entry.set('current_exists', 'false')
+ return False
+
+ errors = []
+ current_attrs = self._key2attrs(self._key(entry))
+ desired_attrs = entry.attrib
+ for attr in self.value_format:
+ if not attr:
+ continue
+ if current_attrs[attr] != desired_attrs[attr]:
+ entry.set('current_%s' % attr, current_attrs[attr])
+ errors.append("SELinux %s %s has wrong %s: %s, should be %s" %
+ (self.etype, self.tostring(entry), attr,
+ current_attrs[attr], desired_attrs[attr]))
+
+ if errors:
+ for error in errors:
+ self.logger.debug(error)
+ entry.set('qtext', "\n".join([entry.get('qtext', '')] + errors))
+ return False
+ else:
+ return True
+
+ def Install(self, entry, method=None):
+ if not method:
+ if self.exists(entry):
+ method = "modify"
+ else:
+ method = "add"
+ self.logger.debug("%s SELinux %s %s" %
+ (method.title(), self.etype, self.tostring(entry)))
+
+ try:
+ getattr(self.records, method)(*self._args(entry, method))
+ self._all = None
+ return True
+ except ValueError:
+ err = sys.exc_info()[1]
+ self.logger.debug("Failed to %s SELinux %s %s: %s" %
+ (method, self.etype, self.tostring(entry), err))
+ return False
+
+ def Remove(self, entries):
+ for entry in entries:
+ try:
+ self.records.delete(*self._args(entry, "delete"))
+ self._all = None
+ except ValueError:
+ err = sys.exc_info()[1]
+ self.logger.info("Failed to remove SELinux %s %s: %s" %
+ (self.etype, self.tostring(entry), err))
+
+ def FindExtra(self):
+ specified = [self._key(e)
+ for e in self.tool.getSupportedEntries()
+ if e.get("type") == self.etype]
+ try:
+ records = self.custom_records
+ except ValueError:
+ records = self.all_records
+ return [self.key2entry(key)
+ for key in records.keys()
+ if key not in specified]
+
+ def BundleUpdated(self, states):
+ pass
+
+
+class SELinuxBooleanHandler(SELinuxEntryHandler):
+ etype = "boolean"
+ value_format = ("value",)
+
+ @property
+ def all_records(self):
+ # older versions of selinux return a single 0/1 value for each
+ # bool, while newer versions return a list of three 0/1 values
+ # representing various states. we don't care about the latter
+ # two values, but it's easier to coerce the older format into
+ # the newer format as far as interoperation with the rest of
+ # SELinuxEntryHandler goes
+ rv = SELinuxEntryHandler.all_records.fget(self)
+ if rv.values()[0] in [0, 1]:
+ for key, val in rv.items():
+ rv[key] = [val, val, val]
+ return rv
+
+ def _key2attrs(self, key):
+ rv = SELinuxEntryHandler._key2attrs(self, key)
+ status = self.all_records[key][0]
+ if status:
+ rv['value'] = "on"
+ else:
+ rv['value'] = "off"
+ return rv
+
+ def _defaultargs(self, entry):
+ # the only values recognized by both new and old versions of
+ # selinux are the strings "0" and "1". old selinux accepts
+ # ints or bools as well, new selinux accepts "on"/"off"
+ if entry.get("value").lower() == "on":
+ value = "1"
+ else:
+ value = "0"
+ return (entry.get("name"), value)
+
+ def canInstall(self, entry):
+ if entry.get("value").lower() not in ["on", "off"]:
+ self.logger.debug("SELinux %s %s has a bad value: %s" %
+ (self.etype, self.tostring(entry),
+ entry.get("value")))
+ return False
+ return (self.exists(entry) and
+ SELinuxEntryHandler.canInstall(self, entry))
+
+
+class SELinuxPortHandler(SELinuxEntryHandler):
+ etype = "port"
+ value_format = ('selinuxtype', None)
+ custom_re = re.compile(r'-p (?P<proto>tcp|udp).*? (?P<start>\d+)(?:-(?P<end>\d+))?$')
+
+ @property
+ def custom_keys(self):
+ keys = []
+ for cmd in self.records.customized():
+ match = self.custom_re.search(cmd)
+ if match:
+ if match.group('end'):
+ keys.append((int(match.group('start')),
+ int(match.group('end')),
+ match.group('proto')))
+ else:
+ keys.append((int(match.group('start')),
+ int(match.group('start')),
+ match.group('proto')))
+ return keys
+
+ @property
+ def all_records(self):
+ if self._all is None:
+ # older versions of selinux use (startport, endport) as
+ # they key for the ports.get_all() dict, and (type, proto,
+ # level) as the value; this is obviously broken, so newer
+ # versions use (startport, endport, proto) as the key, and
+ # (type, level) as the value. abstracting around this
+ # sucks.
+ ports = self.records.get_all()
+ if len(ports.keys()[0]) == 3:
+ self._all = ports
+ else:
+ # uglist list comprehension ever?
+ self._all = dict([((k[0], k[1], v[1]), (v[0], v[2]))
+ for k, v in ports.items()])
+ return self._all
+
+ def _key(self, entry):
+ try:
+ (port, proto) = entry.get("name").split("/")
+ except ValueError:
+ self.logger.error("Invalid SELinux node %s: no protocol specified" %
+ entry.get("name"))
+ return
+ if "-" in port:
+ start, end = port.split("-")
+ else:
+ start = port
+ end = port
+ return (int(start), int(end), proto)
+
+ def _key2attrs(self, key):
+ if key[0] == key[1]:
+ port = str(key[0])
+ else:
+ port = "%s-%s" % (key[0], key[1])
+ vals = self.all_records[key]
+ return dict(name="%s/%s" % (port, key[2]), selinuxtype=vals[0])
+
+ def _defaultargs(self, entry):
+ (port, proto) = entry.get("name").split("/")
+ return (port, proto, '', entry.get("selinuxtype"))
+
+ def _deleteargs(self, entry):
+ return tuple(entry.get("name").split("/"))
+
+
+class SELinuxFcontextHandler(SELinuxEntryHandler):
+ etype = "fcontext"
+ key_format = ("name", "filetype")
+ value_format = (None, None, "selinuxtype", None)
+ filetypeargs = dict(all="",
+ regular="--",
+ directory="-d",
+ symlink="-l",
+ pipe="-p",
+ socket="-s",
+ block="-b",
+ char="-c",
+ door="-D")
+ filetypenames = dict(all="all files",
+ regular="regular file",
+ directory="directory",
+ symlink="symbolic link",
+ pipe="named pipe",
+ socket="socket",
+ block="block device",
+ char="character device",
+ door="door")
+ filetypeattrs = dict([v, k] for k, v in filetypenames.iteritems())
+ custom_re = re.compile(r'-f \'(?P<filetype>[a-z ]+)\'.*? \'(?P<name>.*)\'')
+
+ @property
+ def all_records(self):
+ if self._all is None:
+ # on older selinux, fcontextRecords.get_all() returns a
+ # list of tuples of (filespec, filetype, seuser, serole,
+ # setype, level); on newer selinux, get_all() returns a
+ # dict of (filespec, filetype) => (seuser, serole, setype,
+ # level).
+ fcontexts = self.records.get_all()
+ if isinstance(fcontexts, dict):
+ self._all = fcontexts
+ else:
+ self._all = dict([(f[0:2], f[2:]) for f in fcontexts])
+ return self._all
+
+ def _key(self, entry):
+ ftype = entry.get("filetype", "all")
+ return (entry.get("name"),
+ self.filetypenames.get(ftype, ftype))
+
+ def _key2attrs(self, key):
+ rv = dict(name=key[0], filetype=self.filetypeattrs[key[1]])
+ vals = self.all_records[key]
+ # in older versions of selinux, an fcontext with no selinux
+ # type is the single value None; in newer versions, it's a
+ # tuple whose 0th (and only) value is None.
+ if vals and vals[0]:
+ rv["selinuxtype"] = vals[2]
+ else:
+ rv["selinuxtype"] = "<<none>>"
+ return rv
+
+ def canInstall(self, entry):
+ return (entry.get("filetype", "all") in self.filetypeargs and
+ SELinuxEntryHandler.canInstall(self, entry))
+
+ def _defaultargs(self, entry):
+ return (entry.get("name"), entry.get("selinuxtype"),
+ self.filetypeargs[entry.get("filetype", "all")],
+ '', '')
+
+ def primarykey(self, entry):
+ return ":".join([entry.tag, entry.get("type"), entry.get("name"),
+ entry.get("filetype", "all")])
+
+
+class SELinuxNodeHandler(SELinuxEntryHandler):
+ etype = "node"
+ value_format = (None, None, "selinuxtype", None)
+ str_format = '%(name)s (%(proto)s)'
+ custom_re = re.compile(r'-M (?P<netmask>\S+).*?-p (?P<proto>ipv\d).*? (?P<addr>\S+)$')
+ custom_format = ('addr', 'netmask', 'proto')
+
+ def _key(self, entry):
+ try:
+ (addr, netmask) = entry.get("name").split("/")
+ except ValueError:
+ self.logger.error("Invalid SELinux node %s: no netmask specified" %
+ entry.get("name"))
+ return
+ netmask = netmask_itoa(netmask, proto=entry.get("proto"))
+ return (addr, netmask, entry.get("proto"))
+
+ def _key2attrs(self, key):
+ vals = self.all_records[key]
+ return dict(name="%s/%s" % (key[0], key[1]), proto=key[2],
+ selinuxtype=vals[2])
+
+ def _defaultargs(self, entry):
+ (addr, netmask) = entry.get("name").split("/")
+ return (addr, netmask, entry.get("proto"), "", entry.get("selinuxtype"))
+
+
+class SELinuxLoginHandler(SELinuxEntryHandler):
+ etype = "login"
+ value_format = ("selinuxuser", None)
+
+ def _defaultargs(self, entry):
+ return (entry.get("name"), entry.get("selinuxuser"), "")
+
+
+class SELinuxUserHandler(SELinuxEntryHandler):
+ etype = "user"
+ value_format = ("prefix", None, None, "roles")
+
+ def __init__(self, tool, logger, setup, config):
+ SELinuxEntryHandler.__init__(self, tool, logger, setup, config)
+ self.needs_prefix = False
+
+ @property
+ def records(self):
+ if self._records is None:
+ self._records = seobject.seluserRecords()
+ return self._records
+
+ def Install(self, entry):
+ # in older versions of selinux, modify() is broken if you
+ # provide a prefix _at all_, so we try to avoid giving the
+ # prefix. however, in newer versions, prefix is _required_,
+ # so we a) try without a prefix; b) catch TypeError, which
+ # indicates that we had the wrong number of args (ValueError
+ # is thrown by the bug in older versions of selinux); and c)
+ # try with prefix.
+ try:
+ SELinuxEntryHandler.Install(self, entry)
+ except TypeError:
+ self.needs_prefix = True
+ SELinuxEntryHandler.Install(self, entry)
+
+ def _defaultargs(self, entry):
+ # in older versions of selinux, modify() is broken if you
+ # provide a prefix _at all_, so we try to avoid giving the
+ # prefix. see the comment in Install() above for more
+ # details.
+ rv = [entry.get("name"),
+ entry.get("roles", "").replace(" ", ",").split(",")]
+ if self.needs_prefix:
+ rv.extend(['', '', entry.get("prefix")])
+ else:
+ key = self._key(entry)
+ if key in self.all_records:
+ attrs = self._key2attrs(key)
+ if attrs['prefix'] != entry.get("prefix"):
+ rv.extend(['', '', entry.get("prefix")])
+ return tuple(rv)
+
+
+class SELinuxInterfaceHandler(SELinuxEntryHandler):
+ etype = "interface"
+ value_format = (None, None, "selinuxtype", None)
+
+ def _defaultargs(self, entry):
+ return (entry.get("name"), '', entry.get("selinuxtype"))
+
+
+class SELinuxPermissiveHandler(SELinuxEntryHandler):
+ etype = "permissive"
+
+ @property
+ def records(self):
+ try:
+ return SELinuxEntryHandler.records.fget(self)
+ except AttributeError:
+ self.logger.info("Permissive domains not supported by this version "
+ "of SELinux")
+ self._records = False
+ return self._records
+
+ @property
+ def all_records(self):
+ if self._all is None:
+ if self.records == False:
+ self._all = dict()
+ else:
+ # permissiveRecords.get_all() returns a list, so we just
+ # make it into a dict so that the rest of
+ # SELinuxEntryHandler works
+ self._all = dict([(d, d) for d in self.records.get_all()])
+ return self._all
+
+ def _defaultargs(self, entry):
+ return (entry.get("name"),)
+
+
+class SELinuxModuleHandler(SELinuxEntryHandler):
+ etype = "module"
+ value_format = (None, "disabled")
+
+ def __init__(self, tool, logger, setup, config):
+ SELinuxEntryHandler.__init__(self, tool, logger, setup, config)
+ self.posixtool = Bcfg2.Client.Tools.POSIX.POSIX(logger, setup, config)
+ try:
+ self.setype = selinux.selinux_getpolicytype()[1]
+ except IndexError:
+ self.logger.error("Unable to determine SELinux policy type")
+ self.setype = None
+
+ @property
+ def all_records(self):
+ if self._all is None:
+ # we get a list of tuples back; coerce it into a dict
+ self._all = dict([(m[0], (m[1], m[2]))
+ for m in self.records.get_all()])
+ return self._all
+
+ def _key2attrs(self, key):
+ rv = SELinuxEntryHandler._key2attrs(self, key)
+ status = self.all_records[key][1]
+ if status:
+ rv['disabled'] = "false"
+ else:
+ rv['disabled'] = "true"
+ return rv
+
+ def _filepath(self, entry):
+ return os.path.join("/usr/share/selinux", self.setype,
+ "%s.pp" % entry.get("name"))
+
+ def _pathentry(self, entry):
+ pathentry = copy.deepcopy(entry)
+ pathentry.set("name", self._filepath(pathentry))
+ pathentry.set("perms", "0644")
+ pathentry.set("owner", "root")
+ pathentry.set("group", "root")
+ pathentry.set("secontext", "__default__")
+ return pathentry
+
+ def Verify(self, entry):
+ if not entry.get("disabled"):
+ entry.set("disabled", "false")
+ return (SELinuxEntryHandler.Verify(self, entry) and
+ self.posixtool.Verifyfile(self._pathentry(entry), None))
+
+ def canInstall(self, entry):
+ return (entry.text and self.setype and
+ SELinuxEntryHandler.canInstall(self, entry))
+
+ def Install(self, entry):
+ rv = self.posixtool.Installfile(self._pathentry(entry))
+ try:
+ rv = rv and SELinuxEntryHandler.Install(self, entry)
+ except NameError:
+ # some versions of selinux have a bug in seobject that
+ # makes modify() calls fail. add() seems to have the same
+ # effect as modify, but without the bug
+ if self.exists(entry):
+ rv = rv and SELinuxEntryHandler.Install(self, entry,
+ method="add")
+
+ if entry.get("disabled", "false").lower() == "true":
+ method = "disable"
+ else:
+ method = "enable"
+ return rv and SELinuxEntryHandler.Install(self, entry, method=method)
+
+ def _addargs(self, entry):
+ return (self._filepath(entry),)
+
+ def _defaultargs(self, entry):
+ return (entry.get("name"),)
+
+ def FindExtra(self):
+ specified = [self._key(e)
+ for e in self.tool.getSupportedEntries()
+ if e.get("type") == self.etype]
+ return [self.key2entry(os.path.basename(f)[:-3])
+ for f in glob.glob(os.path.join("/usr/share/selinux",
+ self.setype, "*.pp"))
+ if f not in specified]
diff --git a/src/lib/Bcfg2/Client/Tools/SMF.py b/src/lib/Bcfg2/Client/Tools/SMF.py
index f824410ad..3e0a9da13 100644
--- a/src/lib/Bcfg2/Client/Tools/SMF.py
+++ b/src/lib/Bcfg2/Client/Tools/SMF.py
@@ -73,11 +73,6 @@ class SMF(Bcfg2.Client.Tools.SvcTool):
def InstallService(self, entry):
"""Install SMF Service entry."""
- # don't take any actions for mode='manual'
- if entry.get('mode', 'default') == 'manual':
- self.logger.info("Service %s mode set to manual. Skipping "
- "installation." % (entry.get('name')))
- return False
self.logger.info("Installing Service %s" % (entry.get('name')))
if entry.get('status') == 'off':
if entry.get("FMRI").startswith('lrc'):
diff --git a/src/lib/Bcfg2/Client/Tools/Systemd.py b/src/lib/Bcfg2/Client/Tools/Systemd.py
index e3f6a4169..a295bc608 100644
--- a/src/lib/Bcfg2/Client/Tools/Systemd.py
+++ b/src/lib/Bcfg2/Client/Tools/Systemd.py
@@ -42,18 +42,11 @@ class Systemd(Bcfg2.Client.Tools.SvcTool):
def InstallService(self, entry):
"""Install Service entry."""
- # don't take any actions for mode = 'manual'
- if entry.get('mode', 'default') == 'manual':
- self.logger.info("Service %s mode set to manual. Skipping "
- "installation." % (entry.get('name')))
- return True
-
if entry.get('status') == 'on':
- pstatus = self.cmd.run(self.get_svc_command(entry, 'enable'))[0]
- pstatus = self.cmd.run(self.get_svc_command(entry, 'start'))[0]
-
+ rv = self.cmd.run(self.get_svc_command(entry, 'enable'))[0] == 0
+ rv &= self.cmd.run(self.get_svc_command(entry, 'start'))[0] == 0
else:
- pstatus = self.cmd.run(self.get_svc_command(entry, 'stop'))[0]
- pstatus = self.cmd.run(self.get_svc_command(entry, 'disable'))[0]
+ rv = self.cmd.run(self.get_svc_command(entry, 'stop'))[0] == 0
+ rv &= self.cmd.run(self.get_svc_command(entry, 'disable'))[0] == 0
- return not pstatus
+ return rv
diff --git a/src/lib/Bcfg2/Client/Tools/Upstart.py b/src/lib/Bcfg2/Client/Tools/Upstart.py
index 7afc8edd7..aa5a921a6 100644
--- a/src/lib/Bcfg2/Client/Tools/Upstart.py
+++ b/src/lib/Bcfg2/Client/Tools/Upstart.py
@@ -69,11 +69,6 @@ class Upstart(Bcfg2.Client.Tools.SvcTool):
def InstallService(self, entry):
"""Install Service for entry."""
- # don't take any actions for mode='manual'
- if entry.get('mode', 'default') == 'manual':
- self.logger.info("Service %s mode set to manual. Skipping "
- "installation." % (entry.get('name')))
- return False
if entry.get('status') == 'on':
pstatus = self.cmd.run(self.get_svc_command(entry, 'start'))[0]
elif entry.get('status') == 'off':
diff --git a/src/lib/Bcfg2/Client/Tools/YUM24.py b/src/lib/Bcfg2/Client/Tools/YUM24.py
index 4e488b9da..2bc821db3 100644
--- a/src/lib/Bcfg2/Client/Tools/YUM24.py
+++ b/src/lib/Bcfg2/Client/Tools/YUM24.py
@@ -6,20 +6,6 @@ import sys
import yum
import Bcfg2.Client.XML
import Bcfg2.Client.Tools.RPMng
-# Compatibility import
-from Bcfg2.Bcfg2Py3k import ConfigParser
-
-YAD = True
-CP = ConfigParser.ConfigParser()
-try:
- if '-C' in sys.argv:
- CP.read([sys.argv[sys.argv.index('-C') + 1]])
- else:
- CP.read(['/etc/bcfg2.conf'])
- if CP.get('YUMng', 'autodep').lower() == 'false':
- YAD = False
-except:
- pass
if not hasattr(Bcfg2.Client.Tools.RPMng, 'RPMng'):
raise ImportError
@@ -79,6 +65,7 @@ class YUM24(Bcfg2.Client.Tools.RPMng.RPMng):
(entry.get('name').startswith('/etc/yum.d') \
or entry.get('name').startswith('/etc/yum.repos.d')) \
or entry.get('name') == '/etc/yum.conf']
+ self.autodep = setup.get("yum24_autodep")
self.yum_avail = dict()
self.yum_installed = dict()
self.yb = yum.YumBase()
@@ -273,7 +260,7 @@ class YUM24(Bcfg2.Client.Tools.RPMng.RPMng):
if len(install_pkgs) > 0:
self.logger.info("Attempting to install packages")
- if YAD:
+ if self.autodep:
pkgtool = "/usr/bin/yum -d0 -y install %s"
else:
pkgtool = "/usr/bin/yum -d0 install %s"
@@ -309,7 +296,7 @@ class YUM24(Bcfg2.Client.Tools.RPMng.RPMng):
if len(upgrade_pkgs) > 0:
self.logger.info("Attempting to upgrade packages")
- if YAD:
+ if self.autodep:
pkgtool = "/usr/bin/yum -d0 -y update %s"
else:
pkgtool = "/usr/bin/yum -d0 update %s"
@@ -359,7 +346,7 @@ class YUM24(Bcfg2.Client.Tools.RPMng.RPMng):
"""
self.logger.debug('Running YUMng.RemovePackages()')
- if YAD:
+ if self.autodep:
pkgtool = "/usr/bin/yum -d0 -y erase %s"
else:
pkgtool = "/usr/bin/yum -d0 erase %s"
diff --git a/src/lib/Bcfg2/Client/Tools/YUMng.py b/src/lib/Bcfg2/Client/Tools/YUMng.py
index 244b66cf4..34029b9fe 100644
--- a/src/lib/Bcfg2/Client/Tools/YUMng.py
+++ b/src/lib/Bcfg2/Client/Tools/YUMng.py
@@ -12,9 +12,6 @@ import yum.misc
import rpmUtils.arch
import Bcfg2.Client.XML
import Bcfg2.Client.Tools
-# Compatibility import
-from Bcfg2.Bcfg2Py3k import ConfigParser
-
def build_yname(pkgname, inst):
"""Build yum appropriate package name."""
@@ -58,20 +55,6 @@ def nevraString(p):
return ret
-class Parser(ConfigParser.ConfigParser):
-
- def get(self, section, option, default):
- """
- Override ConfigParser.get: If the request option is not in the
- config file then return the value of default rather than raise
- an exception. We still raise exceptions on missing sections.
- """
- try:
- return ConfigParser.ConfigParser.get(self, section, option)
- except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
- return default
-
-
class RPMDisplay(yum.rpmtrans.RPMBaseCallback):
"""We subclass the default RPM transaction callback so that we
can control Yum's verbosity and pipe it through the right logger."""
@@ -224,38 +207,24 @@ class YUMng(Bcfg2.Client.Tools.PkgTool):
def _loadConfig(self):
# Process the YUMng section from the config file.
- CP = Parser()
- CP.read(self.setup.get('setup'))
- truth = ['true', 'yes', '1']
-
# These are all boolean flags, either we do stuff or we don't
- self.pkg_checks = CP.get(self.name, "pkg_checks", "true").lower() \
- in truth
- self.pkg_verify = CP.get(self.name, "pkg_verify", "true").lower() \
- in truth
- self.doInstall = CP.get(self.name, "installed_action",
- "install").lower() == "install"
- self.doUpgrade = CP.get(self.name,
- "version_fail_action", "upgrade").lower() == "upgrade"
- self.doReinst = CP.get(self.name, "verify_fail_action",
- "reinstall").lower() == "reinstall"
- self.verifyFlags = CP.get(self.name, "verify_flags",
- "").lower().replace(' ', ',')
+ self.pkg_checks = self.setup["yumng_pkg_checks"]
+ self.pkg_verify = self.setup["yumng_pkg_verify"]
+ self.doInstall = self.setup["yumng_installed_action"] == "install"
+ self.doUpgrade = self.setup["yumng_version_fail_action"] == "upgrade"
+ self.doReinst = self.setup["yumng_verify_fail_action"] == "reinstall"
+ self.verifyFlags = self.setup["yumng_verify_flags"]
self.installOnlyPkgs = self.yb.conf.installonlypkgs
if 'gpg-pubkey' not in self.installOnlyPkgs:
self.installOnlyPkgs.append('gpg-pubkey')
- self.logger.debug("YUMng: Install missing: %s" \
- % self.doInstall)
+ self.logger.debug("YUMng: Install missing: %s" % self.doInstall)
self.logger.debug("YUMng: pkg_checks: %s" % self.pkg_checks)
self.logger.debug("YUMng: pkg_verify: %s" % self.pkg_verify)
- self.logger.debug("YUMng: Upgrade on version fail: %s" \
- % self.doUpgrade)
- self.logger.debug("YUMng: Reinstall on verify fail: %s" \
- % self.doReinst)
- self.logger.debug("YUMng: installOnlyPkgs: %s" \
- % str(self.installOnlyPkgs))
+ self.logger.debug("YUMng: Upgrade on version fail: %s" % self.doUpgrade)
+ self.logger.debug("YUMng: Reinstall on verify fail: %s" % self.doReinst)
+ self.logger.debug("YUMng: installOnlyPkgs: %s" % self.installOnlyPkgs)
self.logger.debug("YUMng: verify_flags: %s" % self.verifyFlags)
def _fixAutoVersion(self, entry):
@@ -425,8 +394,8 @@ class YUMng(Bcfg2.Client.Tools.PkgTool):
if entry.get('version', False) == 'auto':
self._fixAutoVersion(entry)
- self.logger.debug("Verifying package instances for %s" \
- % entry.get('name'))
+ self.logger.debug("Verifying package instances for %s" %
+ entry.get('name'))
self.verifyCache = {} # Used for checking multilib packages
self.modlists[entry] = modlist
@@ -450,8 +419,8 @@ class YUMng(Bcfg2.Client.Tools.PkgTool):
POs = self.yb.rpmdb.searchProvides(entry.get('name'))
if len(POs) > 0:
virtPkg = True
- self.logger.info("%s appears to be provided by:" \
- % entry.get('name'))
+ self.logger.info("%s appears to be provided by:" %
+ entry.get('name'))
for p in POs:
self.logger.info(" %s" % p)
@@ -473,8 +442,13 @@ class YUMng(Bcfg2.Client.Tools.PkgTool):
stat['verify_fail'] = False
stat['pkg'] = entry
stat['modlist'] = modlist
- verify_flags = inst.get('verify_flags', self.verifyFlags)
- verify_flags = verify_flags.lower().replace(' ', ',').split(',')
+ if inst.get('verify_flags'):
+ # this splits on either space or comma
+ verify_flags = \
+ inst.get('verify_flags').lower().replace(' ',
+ ',').split(',')
+ else:
+ verify_flags = self.verifyFlags
if 'arch' in nevra:
# If arch is specified use it to select the package
@@ -483,6 +457,7 @@ class YUMng(Bcfg2.Client.Tools.PkgTool):
_POs = POs
if len(_POs) == 0:
# Package (name, arch) not installed
+ entry.set('current_exists', 'false')
self.logger.debug(" %s is not installed" % nevraString(nevra))
stat['installed'] = False
package_fail = True
@@ -494,8 +469,23 @@ class YUMng(Bcfg2.Client.Tools.PkgTool):
# Check EVR
if virtPkg:
- self.logger.debug(" Not checking version for virtual package")
- _POs = [po for po in POs] # Make a copy
+ # we need to make sure that the version of the symbol
+ # provided matches the one required in the
+ # configuration
+ vlist = []
+ for attr in ["epoch", "version", "release"]:
+ vlist.append(nevra.get(attr))
+ if tuple(vlist) == (None, None, None):
+ # we just require the package name, no particular
+ # version, so just make a copy of POs since every
+ # package that provides this symbol satisfies the
+ # requirement
+ _POs = [po for po in POs]
+ else:
+ _POs = [po for po in POs
+ if po.checkPrco('provides',
+ (nevra["name"], 'EQ',
+ tuple(vlist)))]
elif entry.get('name') == 'gpg-pubkey':
if 'version' not in nevra:
m = "Skipping verify: gpg-pubkey without an RPM version."
@@ -513,10 +503,33 @@ class YUMng(Bcfg2.Client.Tools.PkgTool):
package_fail = True
stat['version_fail'] = True
# Just chose the first pkg for the error message
- self.logger.info(" %s: Wrong version installed. "
- "Want %s, but have %s" % (entry.get("name"),
- nevraString(nevra),
- nevraString(POs[0])))
+ if virtPkg:
+ provTuple = \
+ [p for p in POs[0].provides
+ if p[0] == entry.get("name")][0]
+ entry.set('current_version', "%s:%s-%s" % provTuple[2])
+ self.logger.info(" %s: Wrong version installed. "
+ "Want %s, but %s provides %s" %
+ (entry.get("name"),
+ nevraString(nevra),
+ nevraString(POs[0]),
+ yum.misc.prco_tuple_to_string(provTuple)))
+ else:
+ entry.set('current_version', "%s:%s-%s.%s" %
+ (POs[0].epoch,
+ POs[0].version,
+ POs[0].release,
+ POs[0].arch))
+ self.logger.info(" %s: Wrong version installed. "
+ "Want %s, but have %s" %
+ (entry.get("name"),
+ nevraString(nevra),
+ nevraString(POs[0])))
+ entry.set('version', "%s:%s-%s.%s" %
+ (nevra.get('epoch', 'any'),
+ nevra.get('version', 'any'),
+ nevra.get('release', 'any'),
+ nevra.get('arch', 'any')))
qtext_versions.append("U(%s)" % str(POs[0]))
continue
@@ -547,7 +560,7 @@ class YUMng(Bcfg2.Client.Tools.PkgTool):
package_fail = True
continue
- # Now take out the Yum specific objects / modlists / unproblmes
+ # Now take out the Yum specific objects / modlists / unproblems
ignores = [ig.get('name') for ig in entry.findall('Ignore')] + \
[ig.get('name') for ig in inst.findall('Ignore')] + \
self.ignores
diff --git a/src/lib/Bcfg2/Client/Tools/__init__.py b/src/lib/Bcfg2/Client/Tools/__init__.py
index c6cb6e239..026c7ade0 100644
--- a/src/lib/Bcfg2/Client/Tools/__init__.py
+++ b/src/lib/Bcfg2/Client/Tools/__init__.py
@@ -1,16 +1,27 @@
"""This contains all Bcfg2 Tool modules"""
import os
-import stat
import sys
-from subprocess import Popen, PIPE
+import stat
import time
+import pkgutil
+from subprocess import Popen, PIPE
import Bcfg2.Client.XML
-
-__all__ = [tool.split('.')[0] \
- for tool in os.listdir(os.path.dirname(__file__)) \
- if tool.endswith(".py") and tool != "__init__.py"]
-
+from Bcfg2.Bcfg2Py3k import input
+
+if hasattr(pkgutil, 'walk_packages'):
+ submodules = pkgutil.walk_packages(path=__path__)
+else:
+ # python 2.4
+ import glob
+ submodules = []
+ for path in __path__:
+ for submodule in glob.glob(os.path.join(path, "*.py")):
+ mod = os.path.splitext(os.path.basename(submodule))[0]
+ if mod not in ['__init__']:
+ submodules.append((None, mod, True))
+
+__all__ = [m[1] for m in submodules]
drivers = [item for item in __all__ if item not in ['rpmtools']]
default = [item for item in drivers if item not in ['RPM', 'Yum']]
@@ -36,7 +47,7 @@ class executor:
return (p.returncode, output.splitlines())
-class Tool:
+class Tool(object):
"""
All tools subclass this. It defines all interfaces that need to be defined.
"""
@@ -47,10 +58,6 @@ class Tool:
__important__ = []
def __init__(self, logger, setup, config):
- self.__important__ = [entry.get('name') \
- for struct in config for entry in struct \
- if entry.tag == 'Path' and \
- entry.get('important') in ['true', 'True']]
self.setup = setup
self.logger = logger
if not hasattr(self, '__ireq__'):
@@ -59,8 +66,15 @@ class Tool:
self.cmd = executor(logger)
self.modified = []
self.extra = []
- self.handled = [entry for struct in self.config for entry in struct \
- if self.handlesEntry(entry)]
+ self.__important__ = []
+ self.handled = []
+ for struct in config:
+ for entry in struct:
+ if (entry.tag == 'Path' and
+ entry.get('important', 'false').lower() == 'true'):
+ self.__important__.append(entry.get('name'))
+ if self.handlesEntry(entry):
+ self.handled.append(entry)
for filename in self.__execs__:
try:
mode = stat.S_IMODE(os.stat(filename)[stat.ST_MODE])
@@ -130,12 +144,24 @@ class Tool:
'''Build a list of potentially modified POSIX paths for this entry'''
return [entry.get('name') for struct in self.config.getchildren() \
for entry in struct.getchildren() \
- if entry.tag in ['Ignore', 'Path']]
+ if entry.tag == 'Path']
def gatherCurrentData(self, entry):
"""Default implementation of the information gathering routines."""
pass
+ def missing_attrs(self, entry):
+ required = self.__req__[entry.tag]
+ if isinstance(required, dict):
+ required = ["type"]
+ try:
+ required.extend(self.__req__[entry.tag][entry.get("type")])
+ except KeyError:
+ pass
+
+ return [attr for attr in required
+ if attr not in entry.attrib or not entry.attrib[attr]]
+
def canVerify(self, entry):
"""Test if entry has enough information to be verified."""
if not self.handlesEntry(entry):
@@ -148,13 +174,12 @@ class Tool:
entry.get('failure')))
return False
- missing = [attr for attr in self.__req__[entry.tag] \
- if attr not in entry.attrib]
+ missing = self.missing_attrs(entry)
if missing:
- self.logger.error("Incomplete information for entry %s:%s; cannot verify" \
- % (entry.tag, entry.get('name')))
- self.logger.error("\t... due to absence of %s attribute(s)" % \
- (":".join(missing)))
+ self.logger.error("Cannot verify entry %s:%s due to missing "
+ "required attribute(s): %s" %
+ (entry.tag, entry.get('name'),
+ ", ".join(missing)))
try:
self.gatherCurrentData(entry)
except:
@@ -167,6 +192,11 @@ class Tool:
"""Return a list of extra entries."""
return []
+ def primarykey(self, entry):
+ """ return a string that should be unique amongst all entries
+ in the specification """
+ return "%s:%s" % (entry.tag, entry.get("name"))
+
def canInstall(self, entry):
"""Test if entry has enough information to be installed."""
if not self.handlesEntry(entry):
@@ -177,13 +207,12 @@ class Tool:
(entry.tag, entry.get('name')))
return False
- missing = [attr for attr in self.__ireq__[entry.tag] \
- if attr not in entry.attrib or not entry.attrib[attr]]
+ missing = self.missing_attrs(entry)
if missing:
- self.logger.error("Incomplete information for entry %s:%s; cannot install" \
- % (entry.tag, entry.get('name')))
- self.logger.error("\t... due to absence of %s attribute" % \
- (":".join(missing)))
+ self.logger.error("Incomplete information for entry %s:%s; cannot "
+ "install due to absence of attribute(s): %s" %
+ (entry.tag, entry.get('name'),
+ ", ".join(missing)))
return False
return True
@@ -305,8 +334,7 @@ class SvcTool(Tool):
return self.cmd.run(self.get_svc_command(service, restart_target))[0]
def check_service(self, service):
- # not supported for this driver
- return 0
+ return self.cmd.run(self.get_svc_command(service, 'status'))[0] == 0
def Remove(self, services):
""" Dummy implementation of service removal method """
@@ -321,13 +349,12 @@ class SvcTool(Tool):
return
for entry in [ent for ent in bundle if self.handlesEntry(ent)]:
- mode = entry.get('mode', 'default')
- if (mode == 'manual' or
- (mode == 'interactive_only' and
+ restart = entry.get("restart", "true")
+ if (restart.lower() == "false" or
+ (restart.lower == "interactive" and
not self.setup['interactive'])):
continue
- # need to handle servicemode = (build|default)
- # need to handle mode = (default|supervised)
+
rc = None
if entry.get('status') == 'on':
if self.setup['servicemode'] == 'build':
@@ -336,11 +363,7 @@ class SvcTool(Tool):
if self.setup['interactive']:
prompt = ('Restart service %s?: (y/N): ' %
entry.get('name'))
- # py3k compatibility
- try:
- ans = raw_input(prompt)
- except NameError:
- ans = input(prompt)
+ ans = input(prompt)
if ans not in ['y', 'Y']:
continue
rc = self.restart_service(entry)
@@ -351,3 +374,19 @@ class SvcTool(Tool):
if rc:
self.logger.error("Failed to manipulate service %s" %
(entry.get('name')))
+
+ def Install(self, entries, states):
+ """Install all entries in sublist."""
+ for entry in entries:
+ if entry.get('install', 'true').lower() == 'false':
+ self.logger.info("Service %s installation is false. Skipping "
+ "installation." % (entry.get('name')))
+ continue
+ try:
+ func = getattr(self, "Install%s" % (entry.tag))
+ states[entry] = func(entry)
+ if states[entry]:
+ self.modified.append(entry)
+ except:
+ self.logger.error("Unexpected failure of install method for entry type %s"
+ % (entry.tag), exc_info=1)
diff --git a/src/lib/Bcfg2/Client/Tools/launchd.py b/src/lib/Bcfg2/Client/Tools/launchd.py
index c022d32ae..6f08559a2 100644
--- a/src/lib/Bcfg2/Client/Tools/launchd.py
+++ b/src/lib/Bcfg2/Client/Tools/launchd.py
@@ -88,11 +88,6 @@ class launchd(Bcfg2.Client.Tools.Tool):
def InstallService(self, entry):
"""Enable or disable launchd item."""
- # don't take any actions for mode='manual'
- if entry.get('mode', 'default') == 'manual':
- self.logger.info("Service %s mode set to manual. Skipping "
- "installation." % (entry.get('name')))
- return False
name = entry.get('name')
if entry.get('status') == 'on':
self.logger.error("Installing service %s" % name)
diff --git a/src/lib/Bcfg2/Client/Tools/rpmtools.py b/src/lib/Bcfg2/Client/Tools/rpmtools.py
index 7441b2c06..32a04262d 100755
--- a/src/lib/Bcfg2/Client/Tools/rpmtools.py
+++ b/src/lib/Bcfg2/Client/Tools/rpmtools.py
@@ -43,7 +43,6 @@ try:
isprelink_imported = True
except ImportError:
isprelink_imported = False
- #print '*********************** isprelink not loaded ***********************'
# If the prelink command is installed on the system then we need to do
# prelink -y on files.
@@ -333,7 +332,6 @@ def prelink_size_check(filename):
fsize += len(data)
elif whitelist_re.search(filename) and not blacklist_re.search(filename):
- # print "***** Warning isprelink extension failed to import ******"
plf.close()
cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \
% (re.escape(filename))
@@ -601,7 +599,6 @@ def rpm_verify_package(vp_ts, header, verify_options):
omitmask |= VERIFY_RDEV
omitmask = ((~omitmask & VERIFY_ATTRS) ^ VERIFY_ATTRS)
- #print 'omitmask =', omitmask
package_results = {}
@@ -754,58 +751,41 @@ class Rpmtscallback(object):
"""
if reason == rpm.RPMCALLBACK_INST_OPEN_FILE:
pass
- #print 'rpm.RPMCALLBACK_INST_OPEN_FILE'
elif reason == rpm.RPMCALLBACK_INST_CLOSE_FILE:
pass
- #print 'rpm.RPMCALLBACK_INST_CLOSE_FILE'
elif reason == rpm.RPMCALLBACK_INST_START:
pass
- #print 'rpm.RPMCALLBACK_INST_START'
elif reason == rpm.RPMCALLBACK_TRANS_PROGRESS or \
reason == rpm.RPMCALLBACK_INST_PROGRESS:
pass
- #print 'rpm.RPMCALLBACK_TRANS_PROGRESS or \
# rpm.RPMCALLBACK_INST_PROGRESS'
elif reason == rpm.RPMCALLBACK_TRANS_START:
pass
- #print 'rpm.RPMCALLBACK_TRANS_START'
elif reason == rpm.RPMCALLBACK_TRANS_STOP:
pass
- #print 'rpm.RPMCALLBACK_TRANS_STOP'
elif reason == rpm.RPMCALLBACK_REPACKAGE_START:
pass
- #print 'rpm.RPMCALLBACK_REPACKAGE_START'
elif reason == rpm.RPMCALLBACK_REPACKAGE_PROGRESS:
pass
- #print 'rpm.RPMCALLBACK_REPACKAGE_PROGRESS'
elif reason == rpm.RPMCALLBACK_REPACKAGE_STOP:
pass
- #print 'rpm.RPMCALLBACK_REPACKAGE_STOP'
elif reason == rpm.RPMCALLBACK_UNINST_PROGRESS:
pass
- #print 'rpm.RPMCALLBACK_UNINST_PROGRESS'
elif reason == rpm.RPMCALLBACK_UNINST_START:
pass
- #print 'rpm.RPMCALLBACK_UNINST_START'
elif reason == rpm.RPMCALLBACK_UNINST_STOP:
pass
- #print 'rpm.RPMCALLBACK_UNINST_STOP'
- #print '***Package ', key, ' deleted ***'
# How do we get at this?
# RPM.modified += key
elif reason == rpm.RPMCALLBACK_UNPACK_ERROR:
pass
- #print 'rpm.RPMCALLBACK_UNPACK_ERROR'
elif reason == rpm.RPMCALLBACK_CPIO_ERROR:
pass
- #print 'rpm.RPMCALLBACK_CPIO_ERROR'
elif reason == rpm.RPMCALLBACK_UNKNOWN:
pass
- #print 'rpm.RPMCALLBACK_UNKNOWN'
else:
print('ERROR - Fell through callBack')
- #print reason, amount, total, key, client_data
def rpm_erase(erase_pkgspecs, erase_flags):
"""
@@ -836,7 +816,6 @@ def rpm_erase(erase_pkgspecs, erase_flags):
erase_ts.addErase(idx)
#for te in erase_ts:
- # print "%s %s:%s-%s.%s" % (te.N(), te.E(), te.V(), te.R(), te.A())
erase_problems = []
if 'nodeps' not in erase_flags:
@@ -847,8 +826,6 @@ def rpm_erase(erase_pkgspecs, erase_flags):
erase_callback = Rpmtscallback()
erase_ts.run(erase_callback.callback, 'Erase')
#else:
- # print 'ERROR - Dependency failures on package erase'
- # print erase_problems
erase_ts.closeDB()
del erase_ts
diff --git a/src/lib/Bcfg2/Component.py b/src/lib/Bcfg2/Component.py
index eb9ea166a..bb0e64102 100644
--- a/src/lib/Bcfg2/Component.py
+++ b/src/lib/Bcfg2/Component.py
@@ -6,6 +6,7 @@ import inspect
import logging
import os
import pydoc
+import socket
import sys
import time
import threading
@@ -59,12 +60,14 @@ def run_component(component_cls, listen_all, location, daemon, pidfile_name,
pidfile.close()
component = component_cls(cfile=cfile, **cls_kwargs)
- up = urlparse(location)
- port = tuple(up[1].split(':'))
- port = (port[0], int(port[1]))
+ hostname, port = urlparse(location)[1].split(':')
+ server_address = socket.getaddrinfo(hostname,
+ port,
+ socket.AF_UNSPEC,
+ socket.SOCK_STREAM)[0][4]
try:
server = XMLRPCServer(listen_all,
- port,
+ server_address,
keyfile=keyfile,
certfile=certfile,
register=register,
@@ -82,23 +85,6 @@ def run_component(component_cls, listen_all, location, daemon, pidfile_name,
server.server_close()
component.shutdown()
-def exposed(func):
- """Mark a method to be exposed publically.
-
- Examples:
- class MyComponent (Component):
- @expose
- def my_method (self, param1, param2):
- do_stuff()
-
- class MyComponent (Component):
- def my_method (self, param1, param2):
- do_stuff()
- my_method = expose(my_method)
-
- """
- func.exposed = True
- return func
def automatic(func, period=10):
"""Mark a method to be run periodically."""
@@ -150,6 +136,11 @@ class Component (object):
self.lock = threading.Lock()
self.instance_statistics = Statistics()
+ def critical_error(self, operation):
+ """Log and err, traceback and return an xmlrpc fault to client."""
+ logger.error(operation, exc_info=1)
+ raise xmlrpclib.Fault(xmlrpclib.APPLICATION_ERROR, "Critical unexpected failure: %s" % (operation))
+
def do_tasks(self):
"""Perform automatic tasks for the component.
@@ -213,7 +204,8 @@ class Component (object):
method_func = self._resolve_exposed_method(method)
except NoExposedMethod:
self.logger.error("Unknown method %s" % (method))
- raise xmlrpclib.Fault(7, "Unknown method %s" % method)
+ raise xmlrpclib.Fault(xmlrpclib.METHOD_NOT_FOUND,
+ "Unknown method %s" % method)
except Exception:
e = sys.exc_info()[1]
if getattr(e, "log", True):
@@ -246,14 +238,7 @@ class Component (object):
raise xmlrpclib.Fault(getattr(e, "fault_code", 1), str(e))
return result
- def listMethods(self):
- """Custom XML-RPC introspective method list."""
- return [
- name for name, func in inspect.getmembers(self, callable)
- if getattr(func, "exposed", False)
- ]
- listMethods = exposed(listMethods)
-
+ @exposed
def methodHelp(self, method_name):
"""Custom XML-RPC introspective method help.
@@ -266,19 +251,18 @@ class Component (object):
except NoExposedMethod:
return ""
return pydoc.getdoc(func)
- methodHelp = exposed(methodHelp)
+ @exposed
def get_name(self):
"""The name of the component."""
return self.name
- get_name = exposed(get_name)
+ @exposed
def get_implementation(self):
"""The implementation of the component."""
return self.implementation
- get_implementation = exposed(get_implementation)
+ @exposed
def get_statistics(self, _):
"""Get current statistics about component execution"""
return self.instance_statistics.display()
- get_statistics = exposed(get_statistics)
diff --git a/src/lib/Bcfg2/Encryption.py b/src/lib/Bcfg2/Encryption.py
new file mode 100755
index 000000000..62b22d7de
--- /dev/null
+++ b/src/lib/Bcfg2/Encryption.py
@@ -0,0 +1,75 @@
+#!/usr/bin/python -Ott
+
+import os
+import base64
+from M2Crypto import Rand
+from M2Crypto.EVP import Cipher, EVPError
+from Bcfg2.Bcfg2Py3k import StringIO
+
+try:
+ from hashlib import md5
+except ImportError:
+ from md5 import md5
+
+ENCRYPT = 1
+DECRYPT = 0
+ALGORITHM = "aes_256_cbc"
+IV = '\0' * 16
+
+Rand.rand_seed(os.urandom(1024))
+
+def _cipher_filter(cipher, instr):
+ inbuf = StringIO(instr)
+ outbuf = StringIO()
+ while 1:
+ buf = inbuf.read()
+ if not buf:
+ break
+ outbuf.write(cipher.update(buf))
+ outbuf.write(cipher.final())
+ rv = outbuf.getvalue()
+ inbuf.close()
+ outbuf.close()
+ return rv
+
+def str_encrypt(plaintext, key, iv=IV, algorithm=ALGORITHM, salt=None):
+ """ encrypt a string """
+ cipher = Cipher(alg=algorithm, key=key, iv=iv, op=ENCRYPT, salt=salt)
+ return _cipher_filter(cipher, plaintext)
+
+def str_decrypt(crypted, key, iv=IV, algorithm=ALGORITHM):
+ """ decrypt a string """
+ cipher = Cipher(alg=algorithm, key=key, iv=iv, op=DECRYPT)
+ return _cipher_filter(cipher, crypted)
+
+def ssl_decrypt(data, passwd, algorithm=ALGORITHM):
+ """ decrypt openssl-encrypted data """
+ # base64-decode the data if necessary
+ try:
+ data = base64.b64decode(data)
+ except TypeError:
+ # already decoded
+ pass
+
+ salt = data[8:16]
+ hashes = [md5(passwd + salt).digest()]
+ for i in range(1,3):
+ hashes.append(md5(hashes[i-1] + passwd + salt).digest())
+ key = hashes[0] + hashes[1]
+ iv = hashes[2]
+
+ return str_decrypt(data[16:], key=key, iv=iv)
+
+def ssl_encrypt(plaintext, passwd, algorithm=ALGORITHM, salt=None):
+ """ encrypt data in a format that is openssl compatible """
+ if salt is None:
+ salt = Rand.rand_bytes(8)
+
+ hashes = [md5(passwd + salt).digest()]
+ for i in range(1,3):
+ hashes.append(md5(hashes[i-1] + passwd + salt).digest())
+ key = hashes[0] + hashes[1]
+ iv = hashes[2]
+
+ crypted = str_encrypt(plaintext, key=key, salt=salt, iv=iv)
+ return base64.b64encode("Salted__" + salt + crypted) + "\n"
diff --git a/src/lib/Bcfg2/Logger.py b/src/lib/Bcfg2/Logger.py
index 81b45550f..26c1d52f6 100644
--- a/src/lib/Bcfg2/Logger.py
+++ b/src/lib/Bcfg2/Logger.py
@@ -57,9 +57,11 @@ class TermiosFormatter(logging.Formatter):
lines = int(math.ceil(float(len(record.msg)) / columns))
for lineNumber in range(lines):
indices = [idx for idx in [(colNum * lines) + lineNumber
- for colNum in range(columns)] if idx < len(record.msg)]
- format = (len(indices) * (" %%-%ds " % columnWidth))
- returns.append(format % tuple([record.msg[idx] for idx in indices]))
+ for colNum in range(columns)]
+ if idx < len(record.msg)]
+ retformat = (len(indices) * (" %%-%ds " % columnWidth))
+ returns.append(retformat % tuple([record.msg[idx]
+ for idx in indices]))
else:
returns.append(str(record.msg))
if record.exc_info:
@@ -86,6 +88,8 @@ class FragmentingSysLogHandler(logging.handlers.SysLogHandler):
error = record.exc_info
record.exc_info = None
msgdata = record.msg
+ if len(msgdata) == 0:
+ return
while msgdata:
newrec = copy.copy(record)
newrec.msg = msgdata[:250]
@@ -122,20 +126,15 @@ class FragmentingSysLogHandler(logging.handlers.SysLogHandler):
"""
pass
-
-def add_console_handler(level):
+def add_console_handler(level=logging.DEBUG):
"""Add a logging handler that logs at a level to sys.stdout."""
console = logging.StreamHandler(sys.stdout)
- if level is True:
- console.setLevel(logging.DEBUG)
- else:
- console.setLevel(level)
+ console.setLevel(level)
# tell the handler to use this format
console.setFormatter(TermiosFormatter())
logging.root.addHandler(console)
-
-def add_syslog_handler(procname, syslog_facility):
+def add_syslog_handler(procname, syslog_facility, level=logging.DEBUG):
"""Add a logging handler that logs as procname to syslog_facility."""
try:
try:
@@ -146,7 +145,7 @@ def add_syslog_handler(procname, syslog_facility):
syslog = FragmentingSysLogHandler(procname,
('localhost', 514),
syslog_facility)
- syslog.setLevel(logging.DEBUG)
+ syslog.setLevel(level)
syslog.setFormatter(logging.Formatter('%(name)s[%(process)d]: %(message)s'))
logging.root.addHandler(syslog)
except socket.error:
@@ -154,15 +153,13 @@ def add_syslog_handler(procname, syslog_facility):
except:
print("Failed to activate syslogging")
-
-def add_file_handler(to_file):
+def add_file_handler(to_file, level=logging.DEBUG):
"""Add a logging handler that logs to to_file."""
filelog = logging.FileHandler(to_file)
- filelog.setLevel(logging.DEBUG)
+ filelog.setLevel(level)
filelog.setFormatter(logging.Formatter('%(asctime)s %(name)s[%(process)d]: %(message)s'))
logging.root.addHandler(filelog)
-
def setup_logging(procname, to_console=True, to_syslog=True,
syslog_facility='daemon', level=0, to_file=None):
"""Setup logging for Bcfg2 software."""
@@ -170,11 +167,16 @@ def setup_logging(procname, to_console=True, to_syslog=True,
return
if to_console:
- add_console_handler(to_console)
+ if to_console == True:
+ clvl = min(logging.WARNING, level)
+ else:
+ clvl = min(to_console, level)
+ add_console_handler(clvl)
if to_syslog:
- add_syslog_handler(procname, syslog_facility)
+ slvl = min(level, logging.INFO)
+ add_syslog_handler(procname, syslog_facility, level=slvl)
if to_file is not None:
- add_file_handler(to_file)
+ add_file_handler(to_file, level=level)
- logging.root.setLevel(level)
+ logging.root.setLevel(logging.DEBUG)
logging.already_setup = True
diff --git a/src/lib/Bcfg2/Options.py b/src/lib/Bcfg2/Options.py
index dfb062341..1883bc222 100644
--- a/src/lib/Bcfg2/Options.py
+++ b/src/lib/Bcfg2/Options.py
@@ -1,25 +1,24 @@
"""Option parsing library for utilities."""
+import copy
import getopt
-import re
+import inspect
import os
-import sys
+import re
import shlex
+import sys
import Bcfg2.Client.Tools
# Compatibility imports
from Bcfg2.Bcfg2Py3k import ConfigParser
+from Bcfg2.version import __version__
-def bool_cook(x):
- if x:
- return True
- else:
- return False
class OptionFailure(Exception):
pass
-DEFAULT_CONFIG_LOCATION = '/etc/bcfg2.conf' #/etc/bcfg2.conf
-DEFAULT_INSTALL_PREFIX = '/usr' #/usr
+DEFAULT_CONFIG_LOCATION = '/etc/bcfg2.conf'
+DEFAULT_INSTALL_PREFIX = '/usr'
+
class DefaultConfigParser(ConfigParser.ConfigParser):
def get(self, section, option, **kwargs):
@@ -55,16 +54,9 @@ class DefaultConfigParser(ConfigParser.ConfigParser):
class Option(object):
- def get_cooked_value(self, value):
- if self.boolean:
- return True
- if self.cook:
- return self.cook(value)
- else:
- return value
-
def __init__(self, desc, default, cmd=False, odesc=False,
- env=False, cf=False, cook=False, long_arg=False):
+ env=False, cf=False, cook=False, long_arg=False,
+ deprecated_cf=None):
self.desc = desc
self.default = default
self.cmd = cmd
@@ -72,33 +64,38 @@ class Option(object):
if not self.long:
if cmd and (cmd[0] != '-' or len(cmd) != 2):
raise OptionFailure("Poorly formed command %s" % cmd)
- else:
- if cmd and (not cmd.startswith('--')):
- raise OptionFailure("Poorly formed command %s" % cmd)
+ elif cmd and (not cmd.startswith('--')):
+ raise OptionFailure("Poorly formed command %s" % cmd)
self.odesc = odesc
self.env = env
self.cf = cf
+ self.deprecated_cf = deprecated_cf
self.boolean = False
- if not odesc and not cook:
+ if not odesc and not cook and isinstance(self.default, bool):
self.boolean = True
self.cook = cook
+ def get_cooked_value(self, value):
+ if self.boolean:
+ return True
+ if self.cook:
+ return self.cook(value)
+ else:
+ return value
+
def buildHelpMessage(self):
- msg = ''
- if self.cmd:
- if not self.long:
- msg = self.cmd.ljust(3)
- else:
- msg = self.cmd
- if self.odesc:
- if self.long:
- msg = "%-28s" % ("%s=%s" % (self.cmd, self.odesc))
- else:
- msg += '%-25s' % (self.odesc)
+ vals = []
+ if not self.cmd:
+ return ''
+ if self.odesc:
+ if self.long:
+ vals.append("%s=%s" % (self.cmd, self.odesc))
else:
- msg += '%-25s' % ('')
- msg += "%s\n" % self.desc
- return msg
+ vals.append("%s %s" % (self.cmd, self.odesc))
+ else:
+ vals.append(self.cmd)
+ vals.append(self.desc)
+ return " %-28s %s\n" % tuple(vals)
def buildGetopt(self):
gstr = ''
@@ -112,7 +109,7 @@ class Option(object):
def buildLongGetopt(self):
if self.odesc:
- return self.cmd[2:]+'='
+ return self.cmd[2:] + '='
else:
return self.cmd[2:]
@@ -127,7 +124,10 @@ class Option(object):
self.value = True
return
if self.cmd and self.cmd in rawopts:
- data = rawopts[rawopts.index(self.cmd) + 1]
+ if self.odesc:
+ data = rawopts[rawopts.index(self.cmd) + 1]
+ else:
+ data = True
self.value = self.get_cooked_value(data)
return
# No command line option found
@@ -140,9 +140,20 @@ class Option(object):
return
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
pass
+ if self.deprecated_cf:
+ try:
+ self.value = self.get_cooked_value(configparser.get(*self.deprecated_cf))
+ print("Warning: [%s] %s is deprecated, use [%s] %s instead"
+ % (self.deprecated_cf[0], self.deprecated_cf[1],
+ self.cf[0], self.cf[1]))
+ return
+ except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
+ pass
+
# Default value not cooked
self.value = self.default
+
class OptionSet(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args)
@@ -154,8 +165,14 @@ class OptionSet(dict):
self.cfp = DefaultConfigParser()
if (len(self.cfp.read(self.cfile)) == 0 and
('quiet' not in kwargs or not kwargs['quiet'])):
- print("Warning! Unable to read specified configuration file: %s" %
- self.cfile)
+ # suppress warnings if called from bcfg2-admin init
+ caller = inspect.stack()[-1][1].split('/')[-1]
+ if caller == 'bcfg2-admin' and len(sys.argv) > 1:
+ if sys.argv[1] == 'init':
+ return
+ else:
+ print("Warning! Unable to read specified configuration file: %s" %
+ self.cfile)
def buildGetopt(self):
return ''.join([opt.buildGetopt() for opt in list(self.values())])
@@ -170,18 +187,28 @@ class OptionSet(dict):
hlist = [] # list of _non-empty_ help messages
for opt in list(self.values()):
hm = opt.buildHelpMessage()
- if hm != '':
+ if hm:
hlist.append(hm)
- return ' '.join(hlist)
+ return ''.join(hlist)
def helpExit(self, msg='', code=1):
if msg:
print(msg)
- print("Usage:\n %s" % self.buildHelpMessage())
+ print("Usage:")
+ print(self.buildHelpMessage())
+ raise SystemExit(code)
+
+ def versionExit(self, code=0):
+ print("%s %s on Python %s" %
+ (os.path.basename(sys.argv[0]),
+ __version__,
+ ".".join(str(v) for v in sys.version_info[0:3])))
raise SystemExit(code)
def parse(self, argv, do_getopt=True):
'''Parse options from command line.'''
+ if VERSION not in self.values():
+ self['__version__'] = VERSION
if do_getopt:
try:
opts, args = getopt.getopt(argv, self.buildGetopt(),
@@ -191,6 +218,8 @@ class OptionSet(dict):
self.helpExit(err)
if '-h' in argv:
self.helpExit('', 0)
+ if '--version' in argv:
+ self.versionExit()
self['args'] = args
for key in list(self.keys()):
if key == 'args':
@@ -203,17 +232,22 @@ class OptionSet(dict):
if hasattr(option, 'value'):
val = option.value
self[key] = val
+ if "__version__" in self:
+ del self['__version__']
+
def list_split(c_string):
if c_string:
return re.split("\s*,\s*", c_string)
return []
+
def colon_split(c_string):
if c_string:
return c_string.split(':')
return []
+
def get_bool(s):
# these values copied from ConfigParser.RawConfigParser.getboolean
# with the addition of True and False
@@ -225,210 +259,784 @@ def get_bool(s):
return False
else:
raise ValueError
+
+
+"""
+Options:
+
+ Accepts keyword argument list with the following values:
+ default: default value for the option
+ cmd: command line switch
+ odesc: option description
+ cf: tuple containing section/option
+ cook: method for parsing option
+ long_arg: (True|False) specifies whether cmd is a long argument
+"""
# General options
-CFILE = Option('Specify configuration file', DEFAULT_CONFIG_LOCATION, cmd='-C',
- odesc='<conffile>')
-LOCKFILE = Option('Specify lockfile',
- "/var/lock/bcfg2.run",
- cf=('components', 'lockfile'),
- odesc='<Path to lockfile>')
-HELP = Option('Print this usage message', False, cmd='-h')
-DEBUG = Option("Enable debugging output", False, cmd='-d')
-VERBOSE = Option("Enable verbose output", False, cmd='-v')
-DAEMON = Option("Daemonize process, storing pid", False,
- cmd='-D', odesc="<pidfile>")
-INSTALL_PREFIX = Option('Installation location', cf=('server', 'prefix'),
- default=DEFAULT_INSTALL_PREFIX, odesc='</path>')
-SENDMAIL_PATH = Option('Path to sendmail', cf=('reports', 'sendmailpath'),
- default='/usr/lib/sendmail')
-INTERACTIVE = Option('Run interactively, prompting the user for each change',
- default=False,
- cmd='-I', )
-ENCODING = Option('Encoding of cfg files',
- default='UTF-8',
- cmd='-E',
- odesc='<encoding>',
- cf=('components', 'encoding'))
-PARANOID_PATH = Option('Specify path for paranoid file backups',
- default='/var/cache/bcfg2', cf=('paranoid', 'path'),
- odesc='<paranoid backup path>')
-PARANOID_MAX_COPIES = Option('Specify the number of paranoid copies you want',
- default=1, cf=('paranoid', 'max_copies'),
- odesc='<max paranoid copies>')
-OMIT_LOCK_CHECK = Option('Omit lock check', default=False, cmd='-O')
-CORE_PROFILE = Option('profile',
- default=False, cmd='-p', )
-FILES_ON_STDIN = Option('Operate on a list of files supplied on stdin',
- cmd='--stdin', default=False, long_arg=True)
-SCHEMA_PATH = Option('Path to XML Schema files', cmd='--schema',
- odesc='<schema path>',
- default="%s/share/bcfg2/schemas" % DEFAULT_INSTALL_PREFIX,
- long_arg=True)
-REQUIRE_SCHEMA = Option("Require property files to have matching schema files",
- cmd="--require-schema", default=False, long_arg=True)
-
-# Metadata options
-MDATA_OWNER = Option('Default Path owner',
- default='root', cf=('mdata', 'owner'),
- odesc='owner permissions')
-MDATA_GROUP = Option('Default Path group',
- default='root', cf=('mdata', 'group'),
- odesc='group permissions')
-MDATA_IMPORTANT = Option('Default Path priority (importance)',
- default='False', cf=('mdata', 'important'),
- odesc='Important entries are installed first')
-MDATA_PERMS = Option('Default Path permissions',
- '644', cf=('mdata', 'perms'),
- odesc='octal permissions')
-MDATA_PARANOID = Option('Default Path paranoid setting',
- 'true', cf=('mdata', 'paranoid'),
- odesc='Path paranoid setting')
-MDATA_SENSITIVE = Option('Default Path sensitive setting',
- 'false', cf=('mdata', 'sensitive'),
- odesc='Path sensitive setting')
+CFILE = \
+ Option('Specify configuration file',
+ default=DEFAULT_CONFIG_LOCATION,
+ cmd='-C',
+ odesc='<conffile>')
+LOCKFILE = \
+ Option('Specify lockfile',
+ default='/var/lock/bcfg2.run',
+ odesc='<Path to lockfile>',
+ cf=('components', 'lockfile'))
+HELP = \
+ Option('Print this usage message',
+ default=False,
+ cmd='-h')
+VERSION = \
+ Option('Print the version and exit',
+ default=False,
+ cmd='--version', long_arg=True)
+DAEMON = \
+ Option("Daemonize process, storing pid",
+ default=None,
+ cmd='-D',
+ odesc='<pidfile>')
+INSTALL_PREFIX = \
+ Option('Installation location',
+ default=DEFAULT_INSTALL_PREFIX,
+ odesc='</path>',
+ cf=('server', 'prefix'))
+SENDMAIL_PATH = \
+ Option('Path to sendmail',
+ default='/usr/lib/sendmail',
+ cf=('reports', 'sendmailpath'))
+INTERACTIVE = \
+ Option('Run interactively, prompting the user for each change',
+ default=False,
+ cmd='-I', )
+ENCODING = \
+ Option('Encoding of cfg files',
+ default='UTF-8',
+ cmd='-E',
+ odesc='<encoding>',
+ cf=('components', 'encoding'))
+PARANOID_PATH = \
+ Option('Specify path for paranoid file backups',
+ default='/var/cache/bcfg2',
+ odesc='<paranoid backup path>',
+ cf=('paranoid', 'path'))
+PARANOID_MAX_COPIES = \
+ Option('Specify the number of paranoid copies you want',
+ default=1,
+ odesc='<max paranoid copies>',
+ cf=('paranoid', 'max_copies'))
+OMIT_LOCK_CHECK = \
+ Option('Omit lock check',
+ default=False,
+ cmd='-O')
+CORE_PROFILE = \
+ Option('profile',
+ default=False,
+ cmd='-p')
+SCHEMA_PATH = \
+ Option('Path to XML Schema files',
+ default='%s/share/bcfg2/schemas' % DEFAULT_INSTALL_PREFIX,
+ cmd='--schema',
+ odesc='<schema path>',
+ cf=('lint', 'schema'),
+ long_arg=True)
+INTERPRETER = \
+ Option("Python interpreter to use",
+ default='best',
+ cmd="--interpreter",
+ odesc='<python|bpython|ipython|best>',
+ cf=('bcfg2-info', 'interpreter'),
+ long_arg=True)
+
+# Metadata options (mdata section)
+MDATA_OWNER = \
+ Option('Default Path owner',
+ default='root',
+ odesc='owner permissions',
+ cf=('mdata', 'owner'))
+MDATA_GROUP = \
+ Option('Default Path group',
+ default='root',
+ odesc='group permissions',
+ cf=('mdata', 'group'))
+MDATA_IMPORTANT = \
+ Option('Default Path priority (importance)',
+ default='False',
+ odesc='Important entries are installed first',
+ cf=('mdata', 'important'))
+MDATA_PERMS = \
+ Option('Default Path permissions',
+ default='644',
+ odesc='octal permissions',
+ cf=('mdata', 'perms'))
+MDATA_SECONTEXT = \
+ Option('Default SELinux context',
+ default='__default__',
+ odesc='SELinux context',
+ cf=('mdata', 'secontext'))
+MDATA_PARANOID = \
+ Option('Default Path paranoid setting',
+ default='true',
+ odesc='Path paranoid setting',
+ cf=('mdata', 'paranoid'))
+MDATA_SENSITIVE = \
+ Option('Default Path sensitive setting',
+ default='false',
+ odesc='Path sensitive setting',
+ cf=('mdata', 'sensitive'))
# Server options
-SERVER_REPOSITORY = Option('Server repository path', '/var/lib/bcfg2',
- cf=('server', 'repository'), cmd='-Q',
- odesc='<repository path>')
-SERVER_PLUGINS = Option('Server plugin list', cf=('server', 'plugins'),
- # default server plugins
- default=[
- 'Bundler',
- 'Cfg',
- 'Metadata',
- 'Pkgmgr',
- 'Rules',
- 'SSHbase',
- ],
- cook=list_split)
-SERVER_MCONNECT = Option('Server Metadata Connector list', cook=list_split,
- cf=('server', 'connectors'), default=['Probes'], )
-SERVER_FILEMONITOR = Option('Server file monitor', cf=('server', 'filemonitor'),
- default='default', odesc='File monitoring driver')
-SERVER_LISTEN_ALL = Option('Listen on all interfaces',
- cf=('server', 'listen_all'),
- cmd='--listen-all',
- default=False,
- long_arg=True,
- cook=get_bool,
- odesc='True|False')
-SERVER_LOCATION = Option('Server Location', cf=('components', 'bcfg2'),
- default='https://localhost:6789', cmd='-S',
- odesc='https://server:port')
-SERVER_STATIC = Option('Server runs on static port', cf=('components', 'bcfg2'),
- default=False, cook=bool_cook)
-SERVER_KEY = Option('Path to SSL key', cf=('communication', 'key'),
- default=False, cmd='--ssl-key', odesc='<ssl key>',
- long_arg=True)
-SERVER_CERT = Option('Path to SSL certificate', default='/etc/bcfg2.key',
- cf=('communication', 'certificate'), odesc='<ssl cert>')
-SERVER_CA = Option('Path to SSL CA Cert', default=None,
- cf=('communication', 'ca'), odesc='<ca cert>')
-SERVER_PASSWORD = Option('Communication Password', cmd='-x', odesc='<password>',
- cf=('communication', 'password'), default=False)
-SERVER_PROTOCOL = Option('Server Protocol', cf=('communication', 'procotol'),
- default='xmlrpc/ssl')
+SERVER_REPOSITORY = \
+ Option('Server repository path',
+ default='/var/lib/bcfg2',
+ cmd='-Q',
+ odesc='<repository path>',
+ cf=('server', 'repository'))
+SERVER_PLUGINS = \
+ Option('Server plugin list',
+ # default server plugins
+ default=['Bundler', 'Cfg', 'Metadata', 'Pkgmgr', 'Rules', 'SSHbase'],
+ cf=('server', 'plugins'),
+ cook=list_split)
+SERVER_MCONNECT = \
+ Option('Server Metadata Connector list',
+ default=['Probes'],
+ cf=('server', 'connectors'),
+ cook=list_split)
+SERVER_FILEMONITOR = \
+ Option('Server file monitor',
+ default='default',
+ odesc='File monitoring driver',
+ cf=('server', 'filemonitor'))
+SERVER_FAM_IGNORE = \
+ Option('File globs to ignore',
+ default=['*~', '*#', '.#*', '*.swp', '.*.swx', 'SCCS', '.svn',
+ '4913', '.gitignore',],
+ cf=('server', 'ignore_files'),
+ cook=list_split)
+SERVER_LISTEN_ALL = \
+ Option('Listen on all interfaces',
+ default=False,
+ cmd='--listen-all',
+ cf=('server', 'listen_all'),
+ cook=get_bool,
+ long_arg=True)
+SERVER_LOCATION = \
+ Option('Server Location',
+ default='https://localhost:6789',
+ cmd='-S',
+ odesc='https://server:port',
+ cf=('components', 'bcfg2'))
+SERVER_STATIC = \
+ Option('Server runs on static port',
+ default=False,
+ cf=('components', 'bcfg2'))
+SERVER_KEY = \
+ Option('Path to SSL key',
+ default=None,
+ cmd='--ssl-key',
+ odesc='<ssl key>',
+ cf=('communication', 'key'),
+ long_arg=True)
+SERVER_CERT = \
+ Option('Path to SSL certificate',
+ default='/etc/bcfg2.key',
+ odesc='<ssl cert>',
+ cf=('communication', 'certificate'))
+SERVER_CA = \
+ Option('Path to SSL CA Cert',
+ default=None,
+ odesc='<ca cert>',
+ cf=('communication', 'ca'))
+SERVER_PASSWORD = \
+ Option('Communication Password',
+ default=None,
+ cmd='-x',
+ odesc='<password>',
+ cf=('communication', 'password'))
+SERVER_PROTOCOL = \
+ Option('Server Protocol',
+ default='xmlrpc/ssl',
+ cf=('communication', 'procotol'))
+SERVER_BACKEND = \
+ Option('Server Backend',
+ default='best',
+ cf=('server', 'backend'))
+
+# database options
+DB_ENGINE = \
+ Option('Database engine',
+ default='sqlite3',
+ cf=('database', 'engine'),
+ deprecated_cf=('statistics', 'database_engine'))
+DB_NAME = \
+ Option('Database name',
+ default=os.path.join(SERVER_REPOSITORY.default, "bcfg2.sqlite"),
+ cf=('database', 'name'),
+ deprecated_cf=('statistics', 'database_name'))
+DB_USER = \
+ Option('Database username',
+ default=None,
+ cf=('database', 'user'),
+ deprecated_cf=('statistics', 'database_user'))
+DB_PASSWORD = \
+ Option('Database password',
+ default=None,
+ cf=('database', 'password'),
+ deprecated_cf=('statistics', 'database_password'))
+DB_HOST = \
+ Option('Database host',
+ default='localhost',
+ cf=('database', 'host'),
+ deprecated_cf=('statistics', 'database_host'))
+DB_PORT = \
+ Option('Database port',
+ default='',
+ cf=('database', 'port'),
+ deprecated_cf=('statistics', 'database_port'))
+
+# Django options
+WEB_CFILE = \
+ Option('Web interface configuration file',
+ default="/etc/bcfg2-web.conf",
+ cmd='-W',
+ cf=('statistics', 'config'),)
+DJANGO_TIME_ZONE = \
+ Option('Django timezone',
+ default=None,
+ cf=('statistics', 'time_zone'),)
+DJANGO_DEBUG = \
+ Option('Django debug',
+ default=None,
+ cf=('statistics', 'web_debug'),
+ cook=get_bool,)
+# Django options
+DJANGO_WEB_PREFIX = \
+ Option('Web prefix',
+ default=None,
+ cf=('statistics', 'web_prefix'),)
+
+
# Client options
-CLIENT_KEY = Option('Path to SSL key', cf=('communication', 'key'),
- default=None, cmd="--ssl-key", odesc='<ssl key>',
- long_arg=True)
-CLIENT_CERT = Option('Path to SSL certificate', default=None, cmd="--ssl-cert",
- cf=('communication', 'certificate'), odesc='<ssl cert>',
- long_arg=True)
-CLIENT_CA = Option('Path to SSL CA Cert', default=None, cmd="--ca-cert",
- cf=('communication', 'ca'), odesc='<ca cert>',
- long_arg=True)
-CLIENT_SCNS = Option('List of server commonNames', default=None, cmd="--ssl-cns",
- cf=('communication', 'serverCommonNames'),
- odesc='<commonName1:commonName2>', cook=list_split,
- long_arg=True)
-CLIENT_PROFILE = Option('Assert the given profile for the host',
- default=False, cmd='-p', odesc="<profile>")
-CLIENT_RETRIES = Option('The number of times to retry network communication',
- default='3', cmd='-R', cf=('communication', 'retries'),
- odesc="<retry count>")
-CLIENT_DRYRUN = Option('Do not actually change the system',
- default=False, cmd='-n', )
-CLIENT_EXTRA_DISPLAY = Option('enable extra entry output',
- default=False, cmd='-e', )
-CLIENT_PARANOID = Option('Make automatic backups of config files',
- default=False,
- cmd='-P',
- cook=get_bool,
- cf=('client', 'paranoid'))
-CLIENT_DRIVERS = Option('Specify tool driver set', cmd='-D',
- cf=('client', 'drivers'),
- odesc="<driver1,driver2>", cook=list_split,
- default=Bcfg2.Client.Tools.default)
-CLIENT_CACHE = Option('Store the configuration in a file',
- default=False, cmd='-c', odesc="<cache path>")
-CLIENT_REMOVE = Option('Force removal of additional configuration items',
- default=False, cmd='-r', odesc="<entry type|all>")
-CLIENT_BUNDLE = Option('Only configure the given bundle(s)', default=[],
- cmd='-b', odesc='<bundle:bundle>', cook=colon_split)
-CLIENT_BUNDLEQUICK = Option('only verify/configure the given bundle(s)', default=False,
- cmd='-Q')
-CLIENT_INDEP = Option('Only configure independent entries, ignore bundles', default=False,
- cmd='-z')
-CLIENT_KEVLAR = Option('Run in kevlar (bulletproof) mode', default=False,
- cmd='-k', )
-CLIENT_DLIST = Option('Run client in server decision list mode', default='none',
- cf=('client', 'decision'),
- cmd='-l', odesc='<whitelist|blacklist|none>')
-CLIENT_FILE = Option('Configure from a file rather than querying the server',
- default=False, cmd='-f', odesc='<specification path>')
-CLIENT_QUICK = Option('Disable some checksum verification', default=False,
- cmd='-q', )
-CLIENT_USER = Option('The user to provide for authentication', default='root',
- cmd='-u', cf=('communication', 'user'), odesc='<user>')
-CLIENT_SERVICE_MODE = Option('Set client service mode', default='default',
- cmd='-s', odesc='<default|disabled|build>')
-CLIENT_TIMEOUT = Option('Set the client XML-RPC timeout', default=90,
- cmd='-t', cf=('communication', 'timeout'),
- odesc='<timeout>')
-
-# bcfg2-test options
-TEST_NOSEOPTS = Option('Options to pass to nosetests', default=[],
- cmd='--nose-options', cf=('bcfg2_test', 'nose_options'),
- odesc='<opts>', long_arg=True, cook=shlex.split)
-TEST_IGNORE = Option('Ignore these entries if they fail to build.', default=[],
- cmd='--ignore',
- cf=('bcfg2_test', 'ignore_entries'), long_arg=True,
- odesc='<Type>:<name>,<Type>:<name>', cook=list_split)
-
-# APT client tool options
-CLIENT_APT_TOOLS_INSTALL_PATH = Option('Apt tools install path',
- cf=('APT', 'install_path'),
- default='/usr')
-CLIENT_APT_TOOLS_VAR_PATH = Option('Apt tools var path',
- cf=('APT', 'var_path'), default='/var')
-CLIENT_SYSTEM_ETC_PATH = Option('System etc path', cf=('APT', 'etc_path'),
- default='/etc')
+CLIENT_KEY = \
+ Option('Path to SSL key',
+ default=None,
+ cmd='--ssl-key',
+ odesc='<ssl key>',
+ cf=('communication', 'key'),
+ long_arg=True)
+CLIENT_CERT = \
+ Option('Path to SSL certificate',
+ default=None,
+ cmd='--ssl-cert',
+ odesc='<ssl cert>',
+ cf=('communication', 'certificate'),
+ long_arg=True)
+CLIENT_CA = \
+ Option('Path to SSL CA Cert',
+ default=None,
+ cmd='--ca-cert',
+ odesc='<ca cert>',
+ cf=('communication', 'ca'),
+ long_arg=True)
+CLIENT_SCNS = \
+ Option('List of server commonNames',
+ default=None,
+ cmd='--ssl-cns',
+ odesc='<CN1:CN2>',
+ cf=('communication', 'serverCommonNames'),
+ cook=list_split,
+ long_arg=True)
+CLIENT_PROFILE = \
+ Option('Assert the given profile for the host',
+ default=None,
+ cmd='-p',
+ odesc='<profile>')
+CLIENT_RETRIES = \
+ Option('The number of times to retry network communication',
+ default='3',
+ cmd='-R',
+ odesc='<retry count>',
+ cf=('communication', 'retries'))
+CLIENT_RETRY_DELAY = \
+ Option('The time in seconds to wait between retries',
+ default='1',
+ cmd='-y',
+ odesc='<retry delay>',
+ cf=('communication', 'retry_delay'))
+CLIENT_DRYRUN = \
+ Option('Do not actually change the system',
+ default=False,
+ cmd='-n')
+CLIENT_EXTRA_DISPLAY = \
+ Option('enable extra entry output',
+ default=False,
+ cmd='-e')
+CLIENT_PARANOID = \
+ Option('Make automatic backups of config files',
+ default=False,
+ cmd='-P',
+ cf=('client', 'paranoid'),
+ cook=get_bool)
+CLIENT_DRIVERS = \
+ Option('Specify tool driver set',
+ default=Bcfg2.Client.Tools.default,
+ cmd='-D',
+ odesc='<driver1,driver2>',
+ cf=('client', 'drivers'),
+ cook=list_split)
+CLIENT_CACHE = \
+ Option('Store the configuration in a file',
+ default=None,
+ cmd='-c',
+ odesc='<cache path>')
+CLIENT_REMOVE = \
+ Option('Force removal of additional configuration items',
+ default=None,
+ cmd='-r',
+ odesc='<entry type|all>')
+CLIENT_BUNDLE = \
+ Option('Only configure the given bundle(s)',
+ default=[],
+ cmd='-b',
+ odesc='<bundle:bundle>',
+ cook=colon_split)
+CLIENT_SKIPBUNDLE = \
+ Option('Configure everything except the given bundle(s)',
+ default=[],
+ cmd='-B',
+ odesc='<bundle:bundle>',
+ cook=colon_split)
+CLIENT_BUNDLEQUICK = \
+ Option('Only verify/configure the given bundle(s)',
+ default=False,
+ cmd='-Q')
+CLIENT_INDEP = \
+ Option('Only configure independent entries, ignore bundles',
+ default=False,
+ cmd='-z')
+CLIENT_SKIPINDEP = \
+ Option('Do not configure independent entries',
+ default=False,
+ cmd='-Z')
+CLIENT_KEVLAR = \
+ Option('Run in kevlar (bulletproof) mode',
+ default=False,
+ cmd='-k', )
+CLIENT_FILE = \
+ Option('Configure from a file rather than querying the server',
+ default=None,
+ cmd='-f',
+ odesc='<specification path>')
+CLIENT_QUICK = \
+ Option('Disable some checksum verification',
+ default=False,
+ cmd='-q')
+CLIENT_USER = \
+ Option('The user to provide for authentication',
+ default='root',
+ cmd='-u',
+ odesc='<user>',
+ cf=('communication', 'user'))
+CLIENT_SERVICE_MODE = \
+ Option('Set client service mode',
+ default='default',
+ cmd='-s',
+ odesc='<default|disabled|build>')
+CLIENT_TIMEOUT = \
+ Option('Set the client XML-RPC timeout',
+ default=90,
+ cmd='-t',
+ odesc='<timeout>',
+ cf=('communication', 'timeout'))
+CLIENT_DLIST = \
+ Option('Run client in server decision list mode',
+ default='none',
+ cmd='-l',
+ odesc='<whitelist|blacklist|none>',
+ cf=('client', 'decision'))
+CLIENT_DECISION_LIST = \
+ Option('Decision List',
+ default=False,
+ cmd='--decision-list',
+ odesc='<file>',
+ long_arg=True)
+
+# bcfg2-test and bcfg2-lint options
+TEST_NOSEOPTS = \
+ Option('Options to pass to nosetests',
+ default=[],
+ cmd='--nose-options',
+ odesc='<opts>',
+ cf=('bcfg2_test', 'nose_options'),
+ cook=shlex.split,
+ long_arg=True)
+TEST_IGNORE = \
+ Option('Ignore these entries if they fail to build.',
+ default=[],
+ cmd='--ignore',
+ odesc='<Type>:<name>,<Type>:<name>',
+ cf=('bcfg2_test', 'ignore_entries'),
+ cook=list_split,
+ long_arg=True)
+LINT_CONFIG = \
+ Option('Specify bcfg2-lint configuration file',
+ default='/etc/bcfg2-lint.conf',
+ cmd='--lint-config',
+ odesc='<conffile>',
+ long_arg=True)
+LINT_SHOW_ERRORS = \
+ Option('Show error handling',
+ default=False,
+ cmd='--list-errors',
+ long_arg=True)
+LINT_FILES_ON_STDIN = \
+ Option('Operate on a list of files supplied on stdin',
+ default=False,
+ cmd='--stdin',
+ long_arg=True)
+
+# individual client tool options
+CLIENT_APT_TOOLS_INSTALL_PATH = \
+ Option('Apt tools install path',
+ default='/usr',
+ cf=('APT', 'install_path'))
+CLIENT_APT_TOOLS_VAR_PATH = \
+ Option('Apt tools var path',
+ default='/var',
+ cf=('APT', 'var_path'))
+CLIENT_SYSTEM_ETC_PATH = \
+ Option('System etc path',
+ default='/etc',
+ cf=('APT', 'etc_path'))
+CLIENT_PORTAGE_BINPKGONLY = \
+ Option('Portage binary packages only',
+ default=False,
+ cf=('Portage', 'binpkgonly'),
+ cook=get_bool)
+CLIENT_RPMNG_INSTALLONLY = \
+ Option('RPMng install-only packages',
+ default=['kernel', 'kernel-bigmem', 'kernel-enterprise',
+ 'kernel-smp', 'kernel-modules', 'kernel-debug',
+ 'kernel-unsupported', 'kernel-devel', 'kernel-source',
+ 'kernel-default', 'kernel-largesmp-devel',
+ 'kernel-largesmp', 'kernel-xen', 'gpg-pubkey'],
+ cf=('RPMng', 'installonlypackages'),
+ cook=list_split)
+CLIENT_RPMNG_PKG_CHECKS = \
+ Option("Perform RPMng package checks",
+ default=True,
+ cf=('RPMng', 'pkg_checks'),
+ cook=get_bool)
+CLIENT_RPMNG_PKG_VERIFY = \
+ Option("Perform RPMng package verify",
+ default=True,
+ cf=('RPMng', 'pkg_verify'),
+ cook=get_bool)
+CLIENT_RPMNG_INSTALLED_ACTION = \
+ Option("RPMng installed action",
+ default="install",
+ cf=('RPMng', 'installed_action'))
+CLIENT_RPMNG_ERASE_FLAGS = \
+ Option("RPMng erase flags",
+ default=["allmatches"],
+ cf=('RPMng', 'erase_flags'),
+ cook=list_split)
+CLIENT_RPMNG_VERSION_FAIL_ACTION = \
+ Option("RPMng version fail action",
+ default="upgrade",
+ cf=('RPMng', 'version_fail_action'))
+CLIENT_RPMNG_VERIFY_FAIL_ACTION = \
+ Option("RPMng verify fail action",
+ default="reinstall",
+ cf=('RPMng', 'verify_fail_action'))
+CLIENT_RPMNG_VERIFY_FLAGS = \
+ Option("RPMng verify flags",
+ default=[],
+ cf=('RPMng', 'verify_flags'),
+ cook=list_split)
+CLIENT_YUM24_INSTALLONLY = \
+ Option('RPMng install-only packages',
+ default=['kernel', 'kernel-bigmem', 'kernel-enterprise',
+ 'kernel-smp', 'kernel-modules', 'kernel-debug',
+ 'kernel-unsupported', 'kernel-devel', 'kernel-source',
+ 'kernel-default', 'kernel-largesmp-devel',
+ 'kernel-largesmp', 'kernel-xen', 'gpg-pubkey'],
+ cf=('RPMng', 'installonlypackages'),
+ cook=list_split)
+CLIENT_YUM24_PKG_CHECKS = \
+ Option("Perform YUM24 package checks",
+ default=True,
+ cf=('YUM24', 'pkg_checks'),
+ cook=get_bool)
+CLIENT_YUM24_PKG_VERIFY = \
+ Option("Perform YUM24 package verify",
+ default=True,
+ cf=('YUM24', 'pkg_verify'),
+ cook=get_bool)
+CLIENT_YUM24_INSTALLED_ACTION = \
+ Option("YUM24 installed action",
+ default="install",
+ cf=('YUM24', 'installed_action'))
+CLIENT_YUM24_ERASE_FLAGS = \
+ Option("YUM24 erase flags",
+ default=["allmatches"],
+ cf=('YUM24', 'erase_flags'),
+ cook=list_split)
+CLIENT_YUM24_VERSION_FAIL_ACTION = \
+ Option("YUM24 version fail action",
+ cf=('YUM24', 'version_fail_action'),
+ default="upgrade")
+CLIENT_YUM24_VERIFY_FAIL_ACTION = \
+ Option("YUM24 verify fail action",
+ default="reinstall",
+ cf=('YUM24', 'verify_fail_action'))
+CLIENT_YUM24_VERIFY_FLAGS = \
+ Option("YUM24 verify flags",
+ default=[],
+ cf=('YUM24', 'verify_flags'),
+ cook=list_split)
+CLIENT_YUM24_AUTODEP = \
+ Option("YUM24 autodependency processing",
+ default=True,
+ cf=('YUM24', 'autodep'),
+ cook=get_bool)
+CLIENT_YUMNG_PKG_CHECKS = \
+ Option("Perform YUMng package checks",
+ default=True,
+ cf=('YUMng', 'pkg_checks'),
+ cook=get_bool)
+CLIENT_YUMNG_PKG_VERIFY = \
+ Option("Perform YUMng package verify",
+ default=True,
+ cf=('YUMng', 'pkg_verify'),
+ cook=get_bool)
+CLIENT_YUMNG_INSTALLED_ACTION = \
+ Option("YUMng installed action",
+ default="install",
+ cf=('YUMng', 'installed_action'))
+CLIENT_YUMNG_VERSION_FAIL_ACTION = \
+ Option("YUMng version fail action",
+ default="upgrade",
+ cf=('YUMng', 'version_fail_action'))
+CLIENT_YUMNG_VERIFY_FAIL_ACTION = \
+ Option("YUMng verify fail action",
+ default="reinstall",
+ cf=('YUMng', 'verify_fail_action'))
+CLIENT_YUMNG_VERIFY_FLAGS = \
+ Option("YUMng verify flags",
+ default=[],
+ cf=('YUMng', 'verify_flags'),
+ cook=list_split)
# Logging options
-LOGGING_FILE_PATH = Option('Set path of file log', default=None,
- cmd='-o', odesc='<path>', cf=('logging', 'path'))
+LOGGING_FILE_PATH = \
+ Option('Set path of file log',
+ default=None,
+ cmd='-o',
+ odesc='<path>',
+ cf=('logging', 'path'))
+LOGGING_SYSLOG = \
+ Option('Log to syslog',
+ default=True,
+ cook=get_bool,
+ cf=('logging', 'syslog'))
+DEBUG = \
+ Option("Enable debugging output",
+ default=False,
+ cmd='-d',
+ cook=get_bool,
+ cf=('logging', 'debug'))
+VERBOSE = \
+ Option("Enable verbose output",
+ default=False,
+ cmd='-v',
+ cook=get_bool,
+ cf=('logging', 'verbose'))
# Plugin-specific options
-CFG_VALIDATION = Option('Run validation on Cfg files', default=True,
- cf=('cfg', 'validation'), cmd='--cfg-validation',
- long_arg=True, cook=get_bool)
+CFG_VALIDATION = \
+ Option('Run validation on Cfg files',
+ default=True,
+ cmd='--cfg-validation',
+ cf=('cfg', 'validation'),
+ long_arg=True,
+ cook=get_bool)
+
+# bcfg2-crypt options
+ENCRYPT = \
+ Option('Encrypt the specified file',
+ default=False,
+ cmd='--encrypt',
+ long_arg=True)
+DECRYPT = \
+ Option('Decrypt the specified file',
+ default=False,
+ cmd='--decrypt',
+ long_arg=True)
+CRYPT_PASSPHRASE = \
+ Option('Encryption passphrase (name or passphrase)',
+ default=None,
+ cmd='-p',
+ odesc='<passphrase>')
+CRYPT_XPATH = \
+ Option('XPath expression to select elements to encrypt',
+ default=None,
+ cmd='--xpath',
+ odesc='<xpath>',
+ long_arg=True)
+CRYPT_PROPERTIES = \
+ Option('Encrypt the specified file as a Properties file',
+ default=False,
+ cmd="--properties",
+ long_arg=True)
+CRYPT_CFG = \
+ Option('Encrypt the specified file as a Cfg file',
+ default=False,
+ cmd="--cfg",
+ long_arg=True)
+CRYPT_REMOVE = \
+ Option('Remove the plaintext file after encrypting',
+ default=False,
+ cmd="--remove",
+ long_arg=True)
+
+# Option groups
+CLI_COMMON_OPTIONS = dict(configfile=CFILE,
+ debug=DEBUG,
+ help=HELP,
+ version=VERSION,
+ verbose=VERBOSE,
+ encoding=ENCODING,
+ logging=LOGGING_FILE_PATH,
+ syslog=LOGGING_SYSLOG)
+
+DAEMON_COMMON_OPTIONS = dict(daemon=DAEMON,
+ listen_all=SERVER_LISTEN_ALL)
+
+SERVER_COMMON_OPTIONS = dict(repo=SERVER_REPOSITORY,
+ plugins=SERVER_PLUGINS,
+ password=SERVER_PASSWORD,
+ filemonitor=SERVER_FILEMONITOR,
+ ignore=SERVER_FAM_IGNORE,
+ location=SERVER_LOCATION,
+ static=SERVER_STATIC,
+ key=SERVER_KEY,
+ cert=SERVER_CERT,
+ ca=SERVER_CA,
+ protocol=SERVER_PROTOCOL,
+ web_configfile=WEB_CFILE,
+ backend=SERVER_BACKEND)
+
+CRYPT_OPTIONS = dict(encrypt=ENCRYPT,
+ decrypt=DECRYPT,
+ passphrase=CRYPT_PASSPHRASE,
+ xpath=CRYPT_XPATH,
+ properties=CRYPT_PROPERTIES,
+ cfg=CRYPT_CFG,
+ remove=CRYPT_REMOVE)
+
+DRIVER_OPTIONS = \
+ dict(apt_install_path=CLIENT_APT_TOOLS_INSTALL_PATH,
+ apt_var_path=CLIENT_APT_TOOLS_VAR_PATH,
+ apt_etc_path=CLIENT_SYSTEM_ETC_PATH,
+ portage_binpkgonly=CLIENT_PORTAGE_BINPKGONLY,
+ rpmng_installonly=CLIENT_RPMNG_INSTALLONLY,
+ rpmng_pkg_checks=CLIENT_RPMNG_PKG_CHECKS,
+ rpmng_pkg_verify=CLIENT_RPMNG_PKG_VERIFY,
+ rpmng_installed_action=CLIENT_RPMNG_INSTALLED_ACTION,
+ rpmng_erase_flags=CLIENT_RPMNG_ERASE_FLAGS,
+ rpmng_version_fail_action=CLIENT_RPMNG_VERSION_FAIL_ACTION,
+ rpmng_verify_fail_action=CLIENT_RPMNG_VERIFY_FAIL_ACTION,
+ rpmng_verify_flags=CLIENT_RPMNG_VERIFY_FLAGS,
+ yum24_installonly=CLIENT_YUM24_INSTALLONLY,
+ yum24_pkg_checks=CLIENT_YUM24_PKG_CHECKS,
+ yum24_pkg_verify=CLIENT_YUM24_PKG_VERIFY,
+ yum24_installed_action=CLIENT_YUM24_INSTALLED_ACTION,
+ yum24_erase_flags=CLIENT_YUM24_ERASE_FLAGS,
+ yum24_version_fail_action=CLIENT_YUM24_VERSION_FAIL_ACTION,
+ yum24_verify_fail_action=CLIENT_YUM24_VERIFY_FAIL_ACTION,
+ yum24_verify_flags=CLIENT_YUM24_VERIFY_FLAGS,
+ yum24_autodep=CLIENT_YUM24_AUTODEP,
+ yumng_pkg_checks=CLIENT_YUMNG_PKG_CHECKS,
+ yumng_pkg_verify=CLIENT_YUMNG_PKG_VERIFY,
+ yumng_installed_action=CLIENT_YUMNG_INSTALLED_ACTION,
+ yumng_version_fail_action=CLIENT_YUMNG_VERSION_FAIL_ACTION,
+ yumng_verify_fail_action=CLIENT_YUMNG_VERIFY_FAIL_ACTION,
+ yumng_verify_flags=CLIENT_YUMNG_VERIFY_FLAGS)
+
+CLIENT_COMMON_OPTIONS = \
+ dict(extra=CLIENT_EXTRA_DISPLAY,
+ quick=CLIENT_QUICK,
+ lockfile=LOCKFILE,
+ drivers=CLIENT_DRIVERS,
+ dryrun=CLIENT_DRYRUN,
+ paranoid=CLIENT_PARANOID,
+ ppath=PARANOID_PATH,
+ max_copies=PARANOID_MAX_COPIES,
+ bundle=CLIENT_BUNDLE,
+ skipbundle=CLIENT_SKIPBUNDLE,
+ bundle_quick=CLIENT_BUNDLEQUICK,
+ indep=CLIENT_INDEP,
+ skipindep=CLIENT_SKIPINDEP,
+ file=CLIENT_FILE,
+ interactive=INTERACTIVE,
+ cache=CLIENT_CACHE,
+ profile=CLIENT_PROFILE,
+ remove=CLIENT_REMOVE,
+ server=SERVER_LOCATION,
+ user=CLIENT_USER,
+ password=SERVER_PASSWORD,
+ retries=CLIENT_RETRIES,
+ retry_delay=CLIENT_RETRY_DELAY,
+ kevlar=CLIENT_KEVLAR,
+ omit_lock_check=OMIT_LOCK_CHECK,
+ decision=CLIENT_DLIST,
+ servicemode=CLIENT_SERVICE_MODE,
+ key=CLIENT_KEY,
+ certificate=CLIENT_CERT,
+ ca=CLIENT_CA,
+ serverCN=CLIENT_SCNS,
+ timeout=CLIENT_TIMEOUT,
+ decision_list=CLIENT_DECISION_LIST)
+CLIENT_COMMON_OPTIONS.update(DRIVER_OPTIONS)
+CLIENT_COMMON_OPTIONS.update(CLI_COMMON_OPTIONS)
+
+DATABASE_COMMON_OPTIONS = dict(web_configfile=WEB_CFILE,
+ db_engine=DB_ENGINE,
+ db_name=DB_NAME,
+ db_user=DB_USER,
+ db_password=DB_PASSWORD,
+ db_host=DB_HOST,
+ db_port=DB_PORT,
+ time_zone=DJANGO_TIME_ZONE,
+ django_debug=DJANGO_DEBUG,
+ web_prefix=DJANGO_WEB_PREFIX)
+
class OptionParser(OptionSet):
"""
OptionParser bootstraps option parsing,
getting the value of the config file
"""
- def __init__(self, args):
+ def __init__(self, args, argv=None, quiet=False):
+ if argv is None:
+ argv = sys.argv[1:]
+ # the bootstrap is always quiet, since it's running with a
+ # default config file and so might produce warnings otherwise
self.Bootstrap = OptionSet([('configfile', CFILE)], quiet=True)
- self.Bootstrap.parse(sys.argv[1:], do_getopt=False)
- OptionSet.__init__(self, args, configfile=self.Bootstrap['configfile'])
- self.optinfo = args
+ self.Bootstrap.parse(argv, do_getopt=False)
+ OptionSet.__init__(self, args, configfile=self.Bootstrap['configfile'],
+ quiet=quiet)
+ self.optinfo = copy.copy(args)
def HandleEvent(self, event):
- if not self['configfile'].endswith(event.filename):
+ if 'configfile' not in self or not isinstance(self['configfile'], str):
+ # we haven't parsed options yet, or CFILE wasn't included
+ # in the options
+ return
+ if event.filename != self['configfile']:
print("Got event for unknown file: %s" % event.filename)
return
if event.code2str() == 'deleted':
@@ -447,3 +1055,10 @@ class OptionParser(OptionSet):
self.do_getopt = do_getopt
OptionSet.parse(self, self.argv, do_getopt=self.do_getopt)
+ def add_option(self, name, opt):
+ self[name] = opt
+ self.optinfo[name] = opt
+
+ def update(self, optdict):
+ dict.update(self, optdict)
+ self.optinfo.update(optdict)
diff --git a/src/lib/Bcfg2/Proxy.py b/src/lib/Bcfg2/Proxy.py
index 422d642db..220b89b5f 100644
--- a/src/lib/Bcfg2/Proxy.py
+++ b/src/lib/Bcfg2/Proxy.py
@@ -1,13 +1,3 @@
-"""RPC client access to cobalt components.
-
-Classes:
-ComponentProxy -- an RPC client proxy to Cobalt components
-
-Functions:
-load_config -- read configuration files
-
-"""
-
import logging
import re
import socket
@@ -34,7 +24,6 @@ import time
from Bcfg2.Bcfg2Py3k import httplib, xmlrpclib, urlparse
version = sys.version_info[:2]
-has_py23 = version >= (2, 3)
has_py26 = version >= (2, 6)
__all__ = ["ComponentProxy",
@@ -70,50 +59,58 @@ class CertificateError(Exception):
return ("Got unallowed commonName %s from server"
% self.commonName)
+_orig_Method = xmlrpclib._Method
class RetryMethod(xmlrpclib._Method):
"""Method with error handling and retries built in."""
log = logging.getLogger('xmlrpc')
- max_retries = 4
+ max_retries = 3
+ retry_delay = 1
def __call__(self, *args):
for retry in range(self.max_retries):
+ if retry >= self.max_retries - 1:
+ final = True
+ else:
+ final = False
+ msg = None
try:
- return xmlrpclib._Method.__call__(self, *args)
+ return _orig_Method.__call__(self, *args)
except xmlrpclib.ProtocolError:
err = sys.exc_info()[1]
- self.log.error("Server failure: Protocol Error: %s %s" % \
- (err.errcode, err.errmsg))
- raise xmlrpclib.Fault(20, "Server Failure")
+ msg = "Server failure: Protocol Error: %s %s" % \
+ (err.errcode, err.errmsg)
except xmlrpclib.Fault:
- raise
+ msg = sys.exc_info()[1]
except socket.error:
err = sys.exc_info()[1]
if hasattr(err, 'errno') and err.errno == 336265218:
- self.log.error("SSL Key error")
- break
- if hasattr(err, 'errno') and err.errno == 185090050:
- self.log.error("SSL CA error")
- break
- if retry == 3:
- self.log.error("Server failure: %s" % err)
- raise xmlrpclib.Fault(20, err)
+ msg = "SSL Key error: %s" % err
+ elif hasattr(err, 'errno') and err.errno == 185090050:
+ msg = "SSL CA error: %s" % err
+ elif final:
+ msg = "Server failure: %s" % err
except CertificateError:
- ce = sys.exc_info()[1]
- self.log.error("Got unallowed commonName %s from server" \
- % ce.commonName)
- break
+ err = sys.exc_info()[1]
+ msg = "Got unallowed commonName %s from server" % err.commonName
except KeyError:
- self.log.error("Server disallowed connection")
- break
+ err = sys.exc_info()[1]
+ msg = "Server disallowed connection: %s" % err
+ except ProxyError:
+ err = sys.exc_info()[1]
+ msg = err
except:
- self.log.error("Unknown failure", exc_info=1)
- break
- time.sleep(0.5)
- raise xmlrpclib.Fault(20, "Server Failure")
+ err = sys.exc_info()[1]
+ msg = "Unknown failure: %s" % err
+ if msg:
+ if final:
+ self.log.error(msg)
+ raise ProxyError(msg)
+ else:
+ self.log.info(msg)
+ time.sleep(self.retry_delay)
-# sorry jon
-_Method = RetryMethod
+xmlrpclib._Method = RetryMethod
class SSLHTTPConnection(httplib.HTTPConnection):
@@ -192,7 +189,15 @@ class SSLHTTPConnection(httplib.HTTPConnection):
def _connect_py26ssl(self):
"""Initiates a connection using the ssl module."""
- rawsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ # check for IPv6
+ hostip = socket.getaddrinfo(self.host,
+ self.port,
+ socket.AF_UNSPEC,
+ socket.SOCK_STREAM)[0][4][0]
+ if ':' in hostip:
+ rawsock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
+ else:
+ rawsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.protocol == 'xmlrpc/ssl':
ssl_protocol_ver = ssl.PROTOCOL_SSLv23
elif self.protocol == 'xmlrpc/tlsv1':
@@ -212,8 +217,7 @@ class SSLHTTPConnection(httplib.HTTPConnection):
self.logger.warning("SSL key specfied, but no cert. Cannot authenticate this client with SSL.")
self.key = None
- if has_py23:
- rawsock.settimeout(self.timeout)
+ rawsock.settimeout(self.timeout)
self.sock = ssl.SSLSocket(rawsock, cert_reqs=other_side_required,
ca_certs=self.ca, suppress_ragged_eofs=True,
keyfile=self.key, certfile=self.cert,
@@ -286,26 +290,21 @@ class XMLRPCTransport(xmlrpclib.Transport):
def make_connection(self, host):
host, self._extra_headers = self.get_host_info(host)[0:2]
- http = SSLHTTPConnection(host,
+ return SSLHTTPConnection(host,
key=self.key,
cert=self.cert,
ca=self.ca,
scns=self.scns,
timeout=self.timeout)
- https = httplib.HTTP()
- https._setup(http)
- return https
def request(self, host, handler, request_body, verbose=0):
"""Send request to server and return response."""
- h = self.make_connection(host)
-
try:
- self.send_request(h, handler, request_body)
- self.send_host(h, host)
- self.send_user_agent(h)
- self.send_content(h, request_body)
- errcode, errmsg, headers = h.getreply()
+ conn = self.send_request(host, handler, request_body, False)
+ response = conn.getresponse()
+ errcode = response.status
+ errmsg = response.reason
+ headers = response.msg
except (socket.error, SSL_ERROR):
err = sys.exc_info()[1]
raise ProxyError(xmlrpclib.ProtocolError(host + handler,
@@ -320,8 +319,17 @@ class XMLRPCTransport(xmlrpclib.Transport):
headers))
self.verbose = verbose
- msglen = int(headers.dict['content-length'])
- return self._get_response(h.getfile(), msglen)
+ return self.parse_response(response)
+
+ if sys.hexversion < 0x03000000:
+ def send_request(self, host, handler, request_body, debug):
+ """ send_request() changed significantly in py3k."""
+ conn = self.make_connection(host)
+ xmlrpclib.Transport.send_request(self, conn, handler, request_body)
+ self.send_host(conn, host)
+ self.send_user_agent(conn)
+ self.send_content(conn, request_body)
+ return conn
def _get_response(self, fd, length):
# read response from input file/socket, and parse it
@@ -345,9 +353,8 @@ class XMLRPCTransport(xmlrpclib.Transport):
return u.close()
-def ComponentProxy(url, user=None, password=None,
- key=None, cert=None, ca=None,
- allowedServerCNs=None, timeout=90):
+def ComponentProxy(url, user=None, password=None, key=None, cert=None, ca=None,
+ allowedServerCNs=None, timeout=90, retries=3, delay=1):
"""Constructs proxies to components.
@@ -357,6 +364,8 @@ def ComponentProxy(url, user=None, password=None,
Additional arguments are passed to the ServerProxy constructor.
"""
+ xmlrpclib._Method.max_retries = retries
+ xmlrpclib._Method.retry_delay = delay
if user and password:
method, path = urlparse(url)[:2]
diff --git a/src/lib/Bcfg2/SSLServer.py b/src/lib/Bcfg2/SSLServer.py
index 6aa46ea58..aef44e419 100644
--- a/src/lib/Bcfg2/SSLServer.py
+++ b/src/lib/Bcfg2/SSLServer.py
@@ -45,7 +45,7 @@ class XMLRPCDispatcher (SimpleXMLRPCServer.SimpleXMLRPCDispatcher):
params = (address, ) + params
response = self.instance._dispatch(method, params, self.funcs)
# py3k compatibility
- if type(response) not in [bool, str, list, dict] or response is None:
+ if type(response) not in [bool, str, list, dict]:
response = (response.decode('utf-8'), )
else:
response = (response, )
@@ -98,14 +98,23 @@ class SSLServer (SocketServer.TCPServer, object):
timeout -- timeout for non-blocking request handling
"""
-
+ # check whether or not we should listen on all interfaces
if listen_all:
listen_address = ('', server_address[1])
else:
listen_address = (server_address[0], server_address[1])
+
+ # check for IPv6 address
+ if ':' in server_address[0]:
+ self.address_family = socket.AF_INET6
+
try:
SocketServer.TCPServer.__init__(self, listen_address,
RequestHandlerClass)
+ except socket.gaierror:
+ e = sys.exc_info()[1]
+ self.logger.error("Failed to bind to socket: %s" % e)
+ raise
except socket.error:
self.logger.error("Failed to bind to socket")
raise
diff --git a/src/lib/Bcfg2/Server/Admin/Bundle.py b/src/lib/Bcfg2/Server/Admin/Bundle.py
index 89c099602..e5e4eadf3 100644
--- a/src/lib/Bcfg2/Server/Admin/Bundle.py
+++ b/src/lib/Bcfg2/Server/Admin/Bundle.py
@@ -8,12 +8,11 @@ from Bcfg2.Server.Plugins.Metadata import MetadataConsistencyError
class Bundle(Bcfg2.Server.Admin.MetadataCore):
- __shorthelp__ = "Create or delete bundle entries"
- # TODO: add/del functions
+ __shorthelp__ = "List and view bundle entries"
__longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin bundle list-xml"
"\nbcfg2-admin bundle list-genshi"
"\nbcfg2-admin bundle show\n")
- __usage__ = ("bcfg2-admin bundle [options] [add|del] [group]")
+ __usage__ = ("bcfg2-admin bundle [options] [list-xml|list-genshi|show]")
def __call__(self, args):
Bcfg2.Server.Admin.MetadataCore.__call__(self, args)
@@ -28,18 +27,6 @@ class Bundle(Bcfg2.Server.Admin.MetadataCore):
if len(args) == 0:
self.errExit("No argument specified.\n"
"Please see bcfg2-admin bundle help for usage.")
-# if args[0] == 'add':
-# try:
-# self.metadata.add_bundle(args[1])
-# except MetadataConsistencyError:
-# print("Error in adding bundle.")
-# raise SystemExit(1)
-# elif args[0] in ['delete', 'remove', 'del', 'rm']:
-# try:
-# self.metadata.remove_bundle(args[1])
-# except MetadataConsistencyError:
-# print("Error in deleting bundle.")
-# raise SystemExit(1)
# Lists all available xml bundles
elif args[0] in ['list-xml', 'ls-xml']:
bundle_name = []
@@ -63,7 +50,6 @@ class Bundle(Bcfg2.Server.Admin.MetadataCore):
bundle_name = []
bundle_list = xml_list + genshi_list
for bundle_path in bundle_list:
- print "matching %s" % bundle_path
bundle_name.append(rg.search(bundle_path).group(1))
text = "Available bundles (Number of bundles: %s)" % \
(len(bundle_list))
@@ -85,8 +71,6 @@ class Bundle(Bcfg2.Server.Admin.MetadataCore):
print('Details for the "%s" bundle:' % \
(bundle_name[int(lineno)].split('.')[0]))
tree = lxml.etree.parse(bundle_list[int(lineno)])
- #Prints bundle content
- #print(lxml.etree.tostring(tree))
names = ['Action', 'Package', 'Path', 'Service']
for name in names:
for node in tree.findall("//" + name):
diff --git a/src/lib/Bcfg2/Server/Admin/Client.py b/src/lib/Bcfg2/Server/Admin/Client.py
index 4d580c54c..34dfd7550 100644
--- a/src/lib/Bcfg2/Server/Admin/Client.py
+++ b/src/lib/Bcfg2/Server/Admin/Client.py
@@ -4,50 +4,23 @@ from Bcfg2.Server.Plugins.Metadata import MetadataConsistencyError
class Client(Bcfg2.Server.Admin.MetadataCore):
- __shorthelp__ = "Create, delete, or modify client entries"
+ __shorthelp__ = "Create, delete, or list client entries"
__longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin client add <client> "
- "attr1=val1 attr2=val2"
- "\nbcfg2-admin client update <client> "
- "attr1=val1 attr2=val2"
"\nbcfg2-admin client list"
"\nbcfg2-admin client del <client>\n")
- __usage__ = ("bcfg2-admin client [options] [add|del|update|list] [attr=val]")
+ __usage__ = ("bcfg2-admin client [options] [add|del|list] [attr=val]")
def __call__(self, args):
Bcfg2.Server.Admin.MetadataCore.__call__(self, args)
if len(args) == 0:
self.errExit("No argument specified.\n"
- "Please see bcfg2-admin client help for usage.")
+ "Usage: %s" % self.usage)
if args[0] == 'add':
- attr_d = {}
- for i in args[2:]:
- attr, val = i.split('=', 1)
- if attr not in ['profile', 'uuid', 'password',
- 'location', 'secure', 'address',
- 'auth']:
- print("Attribute %s unknown" % attr)
- raise SystemExit(1)
- attr_d[attr] = val
try:
- self.metadata.add_client(args[1], attr_d)
+ self.metadata.add_client(args[1])
except MetadataConsistencyError:
print("Error in adding client")
raise SystemExit(1)
- elif args[0] in ['update', 'up']:
- attr_d = {}
- for i in args[2:]:
- attr, val = i.split('=', 1)
- if attr not in ['profile', 'uuid', 'password',
- 'location', 'secure', 'address',
- 'auth']:
- print("Attribute %s unknown" % attr)
- raise SystemExit(1)
- attr_d[attr] = val
- try:
- self.metadata.update_client(args[1], attr_d)
- except MetadataConsistencyError:
- print("Error in updating client")
- raise SystemExit(1)
elif args[0] in ['delete', 'remove', 'del', 'rm']:
try:
self.metadata.remove_client(args[1])
@@ -55,10 +28,9 @@ class Client(Bcfg2.Server.Admin.MetadataCore):
print("Error in deleting client")
raise SystemExit(1)
elif args[0] in ['list', 'ls']:
- tree = lxml.etree.parse(self.metadata.data + "/clients.xml")
- tree.xinclude()
- for node in tree.findall("//Client"):
- print(node.attrib["name"])
+ for client in self.metadata.list_clients():
+ print(client.hostname)
else:
print("No command specified")
raise SystemExit(1)
+
diff --git a/src/lib/Bcfg2/Server/Admin/Compare.py b/src/lib/Bcfg2/Server/Admin/Compare.py
index 050dd69f8..78b30120a 100644
--- a/src/lib/Bcfg2/Server/Admin/Compare.py
+++ b/src/lib/Bcfg2/Server/Admin/Compare.py
@@ -18,7 +18,8 @@ class Compare(Bcfg2.Server.Admin.Mode):
'important', 'paranoid', 'sensitive',
'dev_type', 'major', 'minor', 'prune',
'encoding', 'empty', 'to', 'recursive',
- 'vcstype', 'sourceurl', 'revision'],
+ 'vcstype', 'sourceurl', 'revision',
+ 'secontext'],
'Package': ['name', 'type', 'version', 'simplefile',
'verify'],
'Service': ['name', 'type', 'status', 'mode',
diff --git a/src/lib/Bcfg2/Server/Admin/Group.py b/src/lib/Bcfg2/Server/Admin/Group.py
deleted file mode 100644
index 16a773d6f..000000000
--- a/src/lib/Bcfg2/Server/Admin/Group.py
+++ /dev/null
@@ -1,63 +0,0 @@
-import lxml.etree
-import Bcfg2.Server.Admin
-from Bcfg2.Server.Plugins.Metadata import MetadataConsistencyError
-
-
-class Group(Bcfg2.Server.Admin.MetadataCore):
- __shorthelp__ = "Create, delete, or modify group entries"
- __longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin group add <group> "
- "attr1=val1 attr2=val2"
- "\nbcfg2-admin group update <group> "
- "attr1=val1 attr2=val2"
- "\nbcfg2-admin group list"
- "\nbcfg2-admin group del <group>\n")
- __usage__ = ("bcfg2-admin group [options] [add|del|update|list] [attr=val]")
-
- def __call__(self, args):
- Bcfg2.Server.Admin.MetadataCore.__call__(self, args)
- if len(args) == 0:
- self.errExit("No argument specified.\n"
- "Please see bcfg2-admin group help for usage.")
- if args[0] == 'add':
- attr_d = {}
- for i in args[2:]:
- attr, val = i.split('=', 1)
- if attr not in ['profile', 'public', 'default',
- 'name', 'auth', 'toolset', 'category',
- 'comment']:
- print("Attribute %s unknown" % attr)
- raise SystemExit(1)
- attr_d[attr] = val
- try:
- self.metadata.add_group(args[1], attr_d)
- except MetadataConsistencyError:
- print("Error in adding group")
- raise SystemExit(1)
- elif args[0] in ['update', 'up']:
- attr_d = {}
- for i in args[2:]:
- attr, val = i.split('=', 1)
- if attr not in ['profile', 'public', 'default',
- 'name', 'auth', 'toolset', 'category',
- 'comment']:
- print("Attribute %s unknown" % attr)
- raise SystemExit(1)
- attr_d[attr] = val
- try:
- self.metadata.update_group(args[1], attr_d)
- except MetadataConsistencyError:
- print("Error in updating group")
- raise SystemExit(1)
- elif args[0] in ['delete', 'remove', 'del', 'rm']:
- try:
- self.metadata.remove_group(args[1])
- except MetadataConsistencyError:
- print("Error in deleting group")
- raise SystemExit(1)
- elif args[0] in ['list', 'ls']:
- tree = lxml.etree.parse(self.metadata.data + "/groups.xml")
- for node in tree.findall("//Group"):
- print(node.attrib["name"])
- else:
- print("No command specified")
- raise SystemExit(1)
diff --git a/src/lib/Bcfg2/Server/Admin/Init.py b/src/lib/Bcfg2/Server/Admin/Init.py
index c1f9ed484..fefd17d6a 100644
--- a/src/lib/Bcfg2/Server/Admin/Init.py
+++ b/src/lib/Bcfg2/Server/Admin/Init.py
@@ -6,9 +6,11 @@ import stat
import string
import sys
import subprocess
+
import Bcfg2.Server.Admin
import Bcfg2.Server.Plugin
import Bcfg2.Options
+from Bcfg2.Bcfg2Py3k import input
# default config file
config = '''
@@ -18,18 +20,22 @@ plugins = %s
[statistics]
sendmailpath = %s
-database_engine = sqlite3
+#web_debug = False
+#time_zone =
+
+[database]
+#engine = sqlite3
# 'postgresql', 'mysql', 'mysql_old', 'sqlite3' or 'ado_mssql'.
-database_name =
+#name =
# Or path to database file if using sqlite3.
-#<repository>/etc/brpt.sqlite is default path if left empty
-database_user =
+#<repository>/bcfg2.sqlite is default path if left empty
+#user =
# Not used with sqlite3.
-database_password =
+#password =
# Not used with sqlite3.
-database_host =
+#host =
# Not used with sqlite3.
-database_port =
+#port =
[communication]
protocol = %s
@@ -61,7 +67,7 @@ groups = '''<Groups version='3.0'>
# Default contents of clients.xml
clients = '''<Clients version="3.0">
- <Client profile="basic" pingable="Y" pingtime="0" name="%s"/>
+ <Client profile="basic" name="%s"/>
</Clients>
'''
@@ -98,7 +104,6 @@ plugin_list = ['Account',
'SSHbase',
'SSLCA',
'Statistics',
- 'Svcmgr',
'TCheetah',
'TGenshi']
@@ -106,14 +111,6 @@ plugin_list = ['Account',
default_plugins = Bcfg2.Options.SERVER_PLUGINS.default
-def get_input(prompt):
- """py3k compatible function to get input"""
- try:
- return raw_input(prompt)
- except NameError:
- return input(prompt)
-
-
def gen_password(length):
"""Generates a random alphanumeric password with length characters."""
chars = string.letters + string.digits
@@ -147,8 +144,8 @@ def create_key(hostname, keypath, certpath, country, state, location):
def create_conf(confpath, confdata, keypath):
# Don't overwrite existing bcfg2.conf file
if os.path.exists(confpath):
- result = get_input("\nWarning: %s already exists. "
- "Overwrite? [y/N]: " % confpath)
+ result = input("\nWarning: %s already exists. "
+ "Overwrite? [y/N]: " % confpath)
if result not in ['Y', 'y']:
print("Leaving %s unchanged" % confpath)
return
@@ -206,8 +203,8 @@ class Init(Bcfg2.Server.Admin.Mode):
def _prompt_hostname(self):
"""Ask for the server hostname."""
- data = get_input("What is the server's hostname [%s]: " %
- socket.getfqdn())
+ data = input("What is the server's hostname [%s]: " %
+ socket.getfqdn())
if data != '':
self.shostname = data
else:
@@ -215,21 +212,21 @@ class Init(Bcfg2.Server.Admin.Mode):
def _prompt_config(self):
"""Ask for the configuration file path."""
- newconfig = get_input("Store Bcfg2 configuration in [%s]: " %
- self.configfile)
+ newconfig = input("Store Bcfg2 configuration in [%s]: " %
+ self.configfile)
if newconfig != '':
self.configfile = os.path.abspath(newconfig)
def _prompt_repopath(self):
"""Ask for the repository path."""
while True:
- newrepo = get_input("Location of Bcfg2 repository [%s]: " %
- self.repopath)
+ newrepo = input("Location of Bcfg2 repository [%s]: " %
+ self.repopath)
if newrepo != '':
self.repopath = os.path.abspath(newrepo)
if os.path.isdir(self.repopath):
- response = get_input("Directory %s exists. Overwrite? [y/N]:" \
- % self.repopath)
+ response = input("Directory %s exists. Overwrite? [y/N]:" \
+ % self.repopath)
if response.lower().strip() == 'y':
break
else:
@@ -245,8 +242,8 @@ class Init(Bcfg2.Server.Admin.Mode):
def _prompt_server(self):
"""Ask for the server name."""
- newserver = get_input("Input the server location [%s]: " %
- self.server_uri)
+ newserver = input("Input the server location [%s]: " %
+ self.server_uri)
if newserver != '':
self.server_uri = newserver
@@ -258,19 +255,19 @@ class Init(Bcfg2.Server.Admin.Mode):
prompt += ': '
while True:
try:
- osidx = int(get_input(prompt))
+ osidx = int(input(prompt))
self.os_sel = os_list[osidx - 1][1]
break
except ValueError:
continue
def _prompt_plugins(self):
- default = get_input("Use default plugins? (%s) [Y/n]: " %
- ''.join(default_plugins)).lower()
+ default = input("Use default plugins? (%s) [Y/n]: " %
+ ''.join(default_plugins)).lower()
if default != 'y' or default != '':
while True:
plugins_are_valid = True
- plug_str = get_input("Specify plugins: ")
+ plug_str = input("Specify plugins: ")
plugins = plug_str.split(',')
for plugin in plugins:
plugin = plugin.strip()
@@ -284,26 +281,26 @@ class Init(Bcfg2.Server.Admin.Mode):
"""Ask for the key details (country, state, and location)."""
print("The following questions affect SSL certificate generation.")
print("If no data is provided, the default values are used.")
- newcountry = get_input("Country name (2 letter code) for certificate: ")
+ newcountry = input("Country name (2 letter code) for certificate: ")
if newcountry != '':
if len(newcountry) == 2:
self.country = newcountry
else:
while len(newcountry) != 2:
- newcountry = get_input("2 letter country code (eg. US): ")
+ newcountry = input("2 letter country code (eg. US): ")
if len(newcountry) == 2:
self.country = newcountry
break
else:
self.country = 'US'
- newstate = get_input("State or Province Name (full name) for certificate: ")
+ newstate = input("State or Province Name (full name) for certificate: ")
if newstate != '':
self.state = newstate
else:
self.state = 'Illinois'
- newlocation = get_input("Locality Name (eg, city) for certificate: ")
+ newlocation = input("Locality Name (eg, city) for certificate: ")
if newlocation != '':
self.location = newlocation
else:
@@ -313,10 +310,10 @@ class Init(Bcfg2.Server.Admin.Mode):
"""Initialize each plugin-specific portion of the repository."""
for plugin in self.plugins:
if plugin == 'Metadata':
- Bcfg2.Server.Plugins.Metadata.Metadata.init_repo(self.repopath,
- groups,
- self.os_sel,
- clients)
+ Bcfg2.Server.Plugins.Metadata.Metadata.init_repo(
+ self.repopath,
+ groups_xml=groups % self.os_sel,
+ clients_xml=clients % socket.getfqdn())
else:
try:
module = __import__("Bcfg2.Server.Plugins.%s" % plugin, '',
diff --git a/src/lib/Bcfg2/Server/Admin/Pull.py b/src/lib/Bcfg2/Server/Admin/Pull.py
index daf353107..64327e018 100644
--- a/src/lib/Bcfg2/Server/Admin/Pull.py
+++ b/src/lib/Bcfg2/Server/Admin/Pull.py
@@ -2,6 +2,7 @@ import getopt
import sys
import Bcfg2.Server.Admin
+from Bcfg2.Bcfg2Py3k import input
class Pull(Bcfg2.Server.Admin.MetadataCore):
@@ -26,7 +27,7 @@ class Pull(Bcfg2.Server.Admin.MetadataCore):
"interactive",
"-s",
"stdin"))
- allowed = ['Metadata', 'BB', "DBStats", "Statistics", "Cfg", "SSHbase"]
+ allowed = ['Metadata', "DBStats", "Statistics", "Cfg", "SSHbase"]
def __init__(self, setup):
Bcfg2.Server.Admin.MetadataCore.__init__(self, setup)
@@ -92,7 +93,6 @@ class Pull(Bcfg2.Server.Admin.MetadataCore):
for k, v in list(data.items()):
if v:
new_entry[k] = v
- #print new_entry
return new_entry
def Choose(self, choices):
@@ -109,11 +109,8 @@ class Pull(Bcfg2.Server.Admin.MetadataCore):
(choice.group, choice.prio))
else:
print(" => host entry: %s" % (choice.hostname))
- # py3k compatibility
- try:
- ans = raw_input("Use this entry? [yN]: ") in ['y', 'Y']
- except NameError:
- ans = input("Use this entry? [yN]: ") in ['y', 'Y']
+
+ ans = input("Use this entry? [yN]: ") in ['y', 'Y']
if ans:
return choice
return False
diff --git a/src/lib/Bcfg2/Server/Admin/Query.py b/src/lib/Bcfg2/Server/Admin/Query.py
index 3dd326645..f81ec41d2 100644
--- a/src/lib/Bcfg2/Server/Admin/Query.py
+++ b/src/lib/Bcfg2/Server/Admin/Query.py
@@ -7,8 +7,8 @@ import Bcfg2.Server.Admin
class Query(Bcfg2.Server.Admin.MetadataCore):
__shorthelp__ = "Query clients"
__longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin query [-n] [-c] "
- "[-f filename] g=group p=profile")
- __usage__ = ("bcfg2-admin query [options] <g=group> <p=profile>\n\n"
+ "[-f filename] g=group p=profile b=bundle")
+ __usage__ = ("bcfg2-admin query [options] <g=group> <p=profile> <b=bundle>\n\n"
" %-25s%s\n"
" %-25s%s\n"
" %-25s%s\n" %
@@ -22,7 +22,8 @@ class Query(Bcfg2.Server.Admin.MetadataCore):
def __init__(self, setup):
Bcfg2.Server.Admin.MetadataCore.__init__(self, setup)
logging.root.setLevel(100)
- Bcfg2.Logger.setup_logging(100, to_console=False, to_syslog=False)
+ Bcfg2.Logger.setup_logging(100, to_console=False,
+ to_syslog=setup['syslog'])
def __call__(self, args):
Bcfg2.Server.Admin.MetadataCore.__call__(self, args)
@@ -55,8 +56,10 @@ class Query(Bcfg2.Server.Admin.MetadataCore):
for g in glist:
if g in v.split(','):
nc.append(c)
+ elif k == 'b':
+ nc = self.metadata.get_client_names_by_bundles(v.split(','))
else:
- print("One of g= or p= must be specified")
+ print("One of g=, p= or b= must be specified")
raise SystemExit(1)
clients = [c for c in clients if c in nc]
if '-n' in args:
diff --git a/src/lib/Bcfg2/Server/Admin/Reports.py b/src/lib/Bcfg2/Server/Admin/Reports.py
index 974cdff9d..335d6a1e7 100644
--- a/src/lib/Bcfg2/Server/Admin/Reports.py
+++ b/src/lib/Bcfg2/Server/Admin/Reports.py
@@ -19,17 +19,15 @@ if sys.version_info >= (2, 5):
else:
from md5 import md5
-# Prereq issues can be signaled with ImportError, so no try needed
-# FIXME - settings file uses a hardcoded path for /etc/bcfg2.conf
-import Bcfg2.Server.Reports.settings
+import Bcfg2.settings
# Load django and reports stuff _after_ we know we can load settings
import django.core.management
from Bcfg2.Server.Reports.importscript import load_stats
-from Bcfg2.Server.Reports.updatefix import update_database
+from Bcfg2.Server.SchemaUpdater import update_database, UpdaterError
from Bcfg2.Server.Reports.utils import *
-project_directory = os.path.dirname(Bcfg2.Server.Reports.settings.__file__)
+project_directory = os.path.dirname(Bcfg2.settings.__file__)
project_name = os.path.basename(project_directory)
sys.path.append(os.path.join(project_directory, '..'))
project_module = __import__(project_name, '', '', [''])
@@ -41,7 +39,7 @@ from django.db import connection, transaction
from Bcfg2.Server.Reports.reports.models import Client, Interaction, Entries, \
Entries_interactions, Performance, \
- Reason, Ping
+ Reason
def printStats(fn):
@@ -55,7 +53,6 @@ def printStats(fn):
start_i = Interaction.objects.count()
start_ei = Entries_interactions.objects.count()
start_perf = Performance.objects.count()
- start_ping = Ping.objects.count()
fn(self, *data)
@@ -67,8 +64,6 @@ def printStats(fn):
(start_ei - Entries_interactions.objects.count()))
self.log.info("Metrics removed: %s" %
(start_perf - Performance.objects.count()))
- self.log.info("Ping metrics removed: %s" %
- (start_ping - Ping.objects.count()))
return print_stats
@@ -77,16 +72,13 @@ class Reports(Bcfg2.Server.Admin.Mode):
'''Admin interface for dynamic reports'''
__shorthelp__ = "Manage dynamic reports"
__longhelp__ = (__shorthelp__)
- django_commands = ['syncdb', 'sqlall', 'validate']
+ django_commands = ['dbshell', 'shell', 'syncdb', 'sqlall', 'validate']
__usage__ = ("bcfg2-admin reports [command] [options]\n"
- " -v|--verbose Be verbose\n"
- " -q|--quiet Print only errors\n"
"\n"
" Commands:\n"
" init Initialize the database\n"
" load_stats Load statistics data\n"
" -s|--stats Path to statistics.xml file\n"
- " -c|--clients-file Path to clients.xml file\n"
" -O3 Fast mode. Duplicates data!\n"
" purge Purge records\n"
" --client [n] Client to operate on\n"
@@ -95,12 +87,11 @@ class Reports(Bcfg2.Server.Admin.Mode):
" scrub Scrub the database for duplicate reasons and orphaned entries\n"
" update Apply any updates to the reporting database\n"
"\n"
- " Django commands:\n "
- "\n ".join(django_commands))
+ " Django commands:\n " \
+ + "\n ".join(django_commands))
def __init__(self, setup):
Bcfg2.Server.Admin.Mode.__init__(self, setup)
- self.log.setLevel(logging.INFO)
def __call__(self, args):
Bcfg2.Server.Admin.Mode.__call__(self, args)
@@ -108,28 +99,21 @@ class Reports(Bcfg2.Server.Admin.Mode):
print(self.__usage__)
raise SystemExit(0)
- verb = 0
-
- if '-v' in args or '--verbose' in args:
- self.log.setLevel(logging.DEBUG)
- verb = 1
- if '-q' in args or '--quiet' in args:
- self.log.setLevel(logging.WARNING)
-
# FIXME - dry run
if args[0] in self.django_commands:
self.django_command_proxy(args[0])
elif args[0] == 'scrub':
self.scrub()
- elif args[0] == 'init':
- update_database()
- elif args[0] == 'update':
- update_database()
+ elif args[0] in ['init', 'update']:
+ try:
+ update_database()
+ except UpdaterError:
+ print("Update failed")
+ raise SystemExit(-1)
elif args[0] == 'load_stats':
quick = '-O3' in args
stats_file = None
- clients_file = None
i = 1
while i < len(args):
if args[i] == '-s' or args[i] == '--stats':
@@ -137,11 +121,9 @@ class Reports(Bcfg2.Server.Admin.Mode):
if stats_file[0] == '-':
self.errExit("Invalid statistics file: %s" % stats_file)
elif args[i] == '-c' or args[i] == '--clients-file':
- clients_file = args[i + 1]
- if clients_file[0] == '-':
- self.errExit("Invalid clients file: %s" % clients_file)
+ print("DeprecationWarning: %s is no longer used" % args[i])
i = i + 1
- self.load_stats(stats_file, clients_file, verb, quick)
+ self.load_stats(stats_file, self.log.getEffectiveLevel() > logging.WARNING, quick)
elif args[0] == 'purge':
expired = False
client = None
@@ -239,7 +221,7 @@ class Reports(Bcfg2.Server.Admin.Mode):
else:
django.core.management.call_command(command)
- def load_stats(self, stats_file=None, clientspath=None, verb=0, quick=False):
+ def load_stats(self, stats_file=None, verb=0, quick=False):
'''Load statistics data into the database'''
location = ''
@@ -258,27 +240,18 @@ class Reports(Bcfg2.Server.Admin.Mode):
except:
encoding = 'UTF-8'
- if not clientspath:
- try:
- clientspath = "%s/Metadata/clients.xml" % \
- self.cfp.get('server', 'repository')
- except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
- self.errExit("Could not read bcfg2.conf; exiting")
- try:
- clientsdata = XML(open(clientspath).read())
- except (IOError, XMLSyntaxError):
- self.errExit("StatReports: Failed to parse %s" % (clientspath))
-
try:
- load_stats(clientsdata,
- statsdata,
+ load_stats(statsdata,
encoding,
verb,
self.log,
quick=quick,
location=platform.node())
+ except UpdaterError:
+ self.errExit("StatReports: Database updater failed")
except:
- pass
+ self.errExit("failed to import stats: %s"
+ % traceback.format_exc().splitlines()[-1])
@printStats
def purge(self, client=None, maxdate=None, state=None):
@@ -306,12 +279,10 @@ class Reports(Bcfg2.Server.Admin.Mode):
self.log.debug("Filtering by maxdate: %s" % maxdate)
ipurge = ipurge.filter(timestamp__lt=maxdate)
- # Handle ping data as well
- ping = Ping.objects.filter(endtime__lt=maxdate)
- if client:
- ping = ping.filter(client=cobj)
- ping.delete()
-
+ if Bcfg2.settings.DATABASES['default']['ENGINE'] == 'django.db.backends.sqlite3':
+ grp_limit = 100
+ else:
+ grp_limit = 1000
if state:
filtered = True
if state not in ('dirty', 'clean', 'modified'):
@@ -324,7 +295,7 @@ class Reports(Bcfg2.Server.Admin.Mode):
rnum = 0
try:
while rnum < count:
- grp = list(ipurge[:1000].values("id"))
+ grp = list(ipurge[:grp_limit].values("id"))
# just in case...
if not grp:
break
diff --git a/src/lib/Bcfg2/Server/Admin/Syncdb.py b/src/lib/Bcfg2/Server/Admin/Syncdb.py
new file mode 100644
index 000000000..bff232b05
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Admin/Syncdb.py
@@ -0,0 +1,36 @@
+import Bcfg2.settings
+import Bcfg2.Options
+import Bcfg2.Server.Admin
+from Bcfg2.Server.SchemaUpdater import update_database, UpdaterError
+from django.core.management import setup_environ
+
+class Syncdb(Bcfg2.Server.Admin.Mode):
+ __shorthelp__ = ("Sync the Django ORM with the configured database")
+ __longhelp__ = __shorthelp__ + "\n\nbcfg2-admin syncdb"
+ __usage__ = "bcfg2-admin syncdb"
+ options = {'web_configfile': Bcfg2.Options.WEB_CFILE,
+ 'repo': Bcfg2.Options.SERVER_REPOSITORY}
+
+ def __call__(self, args):
+ import Bcfg2.Server.Admin
+ Bcfg2.Server.Admin.Mode.__call__(self, args)
+
+ # Parse options
+ self.opts = Bcfg2.Options.OptionParser(self.options)
+ self.opts.parse(args)
+
+ # we have to set up the django environment before we import
+ # the syncdb command, but we have to wait to set up the
+ # environment until we've read the config, which has to wait
+ # until we've parsed options. it's a windy, twisting road.
+ Bcfg2.settings.read_config(cfile=self.opts['web_configfile'],
+ repo=self.opts['repo'])
+ setup_environ(Bcfg2.settings)
+ import Bcfg2.Server.models
+ Bcfg2.Server.models.load_models(cfile=self.opts['configfile'])
+
+ try:
+ update_database()
+ except UpdaterError:
+ print("Update failed")
+ raise SystemExit(-1)
diff --git a/src/lib/Bcfg2/Server/Admin/Tidy.py b/src/lib/Bcfg2/Server/Admin/Tidy.py
index 82319b93e..65aa955b4 100644
--- a/src/lib/Bcfg2/Server/Admin/Tidy.py
+++ b/src/lib/Bcfg2/Server/Admin/Tidy.py
@@ -3,6 +3,7 @@ import re
import socket
import Bcfg2.Server.Admin
+from Bcfg2.Bcfg2Py3k import input
class Tidy(Bcfg2.Server.Admin.Mode):
@@ -22,11 +23,7 @@ class Tidy(Bcfg2.Server.Admin.Mode):
if '-f' in args or '-I' in args:
if '-I' in args:
for name in badfiles[:]:
- # py3k compatibility
- try:
- answer = raw_input("Unlink file %s? [yN] " % name)
- except NameError:
- answer = input("Unlink file %s? [yN] " % name)
+ answer = input("Unlink file %s? [yN] " % name)
if answer not in ['y', 'Y']:
badfiles.remove(name)
for name in badfiles:
diff --git a/src/lib/Bcfg2/Server/Admin/Viz.py b/src/lib/Bcfg2/Server/Admin/Viz.py
index 2faa423c1..b190dd62a 100644
--- a/src/lib/Bcfg2/Server/Admin/Viz.py
+++ b/src/lib/Bcfg2/Server/Admin/Viz.py
@@ -35,7 +35,7 @@ class Viz(Bcfg2.Server.Admin.MetadataCore):
__plugin_blacklist__ = ['DBStats', 'Snapshots', 'Cfg', 'Pkgmgr', 'Packages',
'Rules', 'Account', 'Decisions', 'Deps', 'Git',
'Svn', 'Fossil', 'Bzr', 'Bundler', 'TGenshi',
- 'SGenshi', 'Base']
+ 'Base']
def __call__(self, args):
Bcfg2.Server.Admin.MetadataCore.__call__(self, args)
diff --git a/src/lib/Bcfg2/Server/Admin/__init__.py b/src/lib/Bcfg2/Server/Admin/__init__.py
index 618fa450e..3a7ba45cf 100644
--- a/src/lib/Bcfg2/Server/Admin/__init__.py
+++ b/src/lib/Bcfg2/Server/Admin/__init__.py
@@ -11,6 +11,7 @@ __all__ = [
'Query',
'Reports',
'Snapshots',
+ 'Syncdb',
'Tidy',
'Viz',
'Xcmd'
@@ -117,15 +118,7 @@ class MetadataCore(Mode):
if p not in self.__plugin_blacklist__]
try:
- self.bcore = \
- Bcfg2.Server.Core.Core(setup['repo'],
- setup['plugins'],
- setup['password'],
- setup['encoding'],
- filemonitor=setup['filemonitor'],
- setup=setup)
- if setup['event debug']:
- self.bcore.fam.debug = True
+ self.bcore = Bcfg2.Server.Core.BaseCore(setup)
except Bcfg2.Server.Core.CoreInitError:
msg = sys.exc_info()[1]
self.errExit("Core load failed: %s" % msg)
diff --git a/src/lib/Bcfg2/Server/BuiltinCore.py b/src/lib/Bcfg2/Server/BuiltinCore.py
new file mode 100644
index 000000000..c52c49931
--- /dev/null
+++ b/src/lib/Bcfg2/Server/BuiltinCore.py
@@ -0,0 +1,103 @@
+""" the core of the builtin bcfg2 server """
+
+import os
+import sys
+import time
+import socket
+import logging
+from Bcfg2.Server.Core import BaseCore
+from Bcfg2.Bcfg2Py3k import xmlrpclib, urlparse
+from Bcfg2.SSLServer import XMLRPCServer
+
+logger = logging.getLogger()
+
+class NoExposedMethod (Exception):
+ """There is no method exposed with the given name."""
+
+
+class Core(BaseCore):
+ name = 'bcfg2-server'
+
+ def _resolve_exposed_method(self, method_name):
+ """Resolve an exposed method.
+
+ Arguments:
+ method_name -- name of the method to resolve
+
+ """
+ try:
+ func = getattr(self, method_name)
+ except AttributeError:
+ raise NoExposedMethod(method_name)
+ if not getattr(func, "exposed", False):
+ raise NoExposedMethod(method_name)
+ return func
+
+ def _dispatch(self, method, args, dispatch_dict):
+ """Custom XML-RPC dispatcher for components.
+
+ method -- XML-RPC method name
+ args -- tuple of paramaters to method
+
+ """
+ if method in dispatch_dict:
+ method_func = dispatch_dict[method]
+ else:
+ try:
+ method_func = self._resolve_exposed_method(method)
+ except NoExposedMethod:
+ self.logger.error("Unknown method %s" % (method))
+ raise xmlrpclib.Fault(xmlrpclib.METHOD_NOT_FOUND,
+ "Unknown method %s" % method)
+
+ try:
+ method_start = time.time()
+ try:
+ result = method_func(*args)
+ finally:
+ method_done = time.time()
+ except xmlrpclib.Fault:
+ raise
+ except Exception:
+ e = sys.exc_info()[1]
+ if getattr(e, "log", True):
+ self.logger.error(e, exc_info=True)
+ raise xmlrpclib.Fault(getattr(e, "fault_code", 1), str(e))
+ return result
+
+ def run(self):
+ if self.setup['daemon']:
+ self._daemonize()
+
+ hostname, port = urlparse(self.setup['location'])[1].split(':')
+ server_address = socket.getaddrinfo(hostname,
+ port,
+ socket.AF_UNSPEC,
+ socket.SOCK_STREAM)[0][4]
+ try:
+ server = XMLRPCServer(self.setup['listen_all'],
+ server_address,
+ keyfile=self.setup['key'],
+ certfile=self.setup['cert'],
+ register=False,
+ timeout=1,
+ ca=self.setup['ca'],
+ protocol=self.setup['protocol'])
+ except:
+ err = sys.exc_info()[1]
+ self.logger.error("Server startup failed: %s" % err)
+ os._exit(1)
+ server.register_instance(self)
+
+ try:
+ server.serve_forever()
+ finally:
+ server.server_close()
+ self.shutdown()
+
+ def methodHelp(self, method_name):
+ try:
+ func = self._resolve_exposed_method(method_name)
+ except NoExposedMethod:
+ return ""
+ return func.__doc__
diff --git a/src/lib/Bcfg2/Server/CherryPyCore.py b/src/lib/Bcfg2/Server/CherryPyCore.py
new file mode 100644
index 000000000..91e7f89bd
--- /dev/null
+++ b/src/lib/Bcfg2/Server/CherryPyCore.py
@@ -0,0 +1,131 @@
+""" the core of the CherryPy-powered server """
+
+import sys
+import base64
+import atexit
+import cherrypy
+import Bcfg2.Options
+from Bcfg2.Bcfg2Py3k import urlparse, xmlrpclib
+from Bcfg2.Server.Core import BaseCore
+from cherrypy.lib import xmlrpcutil
+from cherrypy._cptools import ErrorTool
+
+if cherrypy.engine.state == 0:
+ cherrypy.engine.start(blocking=False)
+ atexit.register(cherrypy.engine.stop)
+
+# define our own error handler that handles xmlrpclib.Fault objects
+# and so allows for the possibility of returning proper error
+# codes. this obviates the need to use the builtin CherryPy xmlrpc
+# tool
+def on_error(*args, **kwargs):
+ err = sys.exc_info()[1]
+ if not isinstance(err, xmlrpclib.Fault):
+ err = xmlrpclib.Fault(xmlrpclib.INTERNAL_ERROR, str(err))
+ xmlrpcutil._set_response(xmlrpclib.dumps(err))
+cherrypy.tools.xmlrpc_error = ErrorTool(on_error)
+
+
+class Core(BaseCore):
+ _cp_config = {'tools.xmlrpc_error.on': True,
+ 'tools.bcfg2_authn.on': True}
+
+ def __init__(self, *args, **kwargs):
+ BaseCore.__init__(self, *args, **kwargs)
+
+ cherrypy.tools.bcfg2_authn = cherrypy.Tool('on_start_resource',
+ self.do_authn)
+
+ self.rmi = self._get_rmi()
+
+ def do_authn(self):
+ try:
+ header = cherrypy.request.headers['Authorization']
+ except KeyError:
+ self.critical_error("No authentication data presented")
+ auth_type, auth_content = header.split()
+ try:
+ # py3k compatibility
+ auth_content = base64.standard_b64decode(auth_content)
+ except TypeError:
+ auth_content = \
+ base64.standard_b64decode(bytes(auth_content.encode('ascii')))
+ try:
+ # py3k compatibility
+ try:
+ username, password = auth_content.split(":")
+ except TypeError:
+ username, pw = auth_content.split(bytes(":", encoding='utf-8'))
+ password = pw.decode('utf-8')
+ except ValueError:
+ username = auth_content
+ password = ""
+
+ # FIXME: Get client cert
+ cert = None
+ address = (cherrypy.request.remote.ip, cherrypy.request.remote.name)
+ return self.authenticate(cert, username, password, address)
+
+ @cherrypy.expose
+ def default(self, *vpath, **params):
+ # needed to make enough changes to the stock XMLRPCController
+ # to support plugin.__rmi__ and prepending client address that
+ # we just rewrote. it clearly wasn't written with inheritance
+ # in mind :(
+ rpcparams, rpcmethod = xmlrpcutil.process_body()
+ if "." not in rpcmethod:
+ address = (cherrypy.request.remote.ip, cherrypy.request.remote.name)
+ rpcparams = (address, ) + rpcparams
+
+ handler = getattr(self, rpcmethod)
+ if not handler or not getattr(handler, "exposed", False):
+ raise Exception('method "%s" is not supported' % attr)
+ else:
+ try:
+ handler = self.rmi[rpcmethod]
+ except:
+ raise Exception('method "%s" is not supported' % rpcmethod)
+
+ body = handler(*rpcparams, **params)
+
+ xmlrpcutil.respond(body, 'utf-8', True)
+ return cherrypy.serving.response.body
+
+ def run(self):
+ hostname, port = urlparse(self.setup['location'])[1].split(':')
+ if self.setup['listen_all']:
+ hostname = '0.0.0.0'
+
+ config = {'engine.autoreload.on': False,
+ 'server.socket_port': int(port)}
+ if self.setup['cert'] and self.setup['key']:
+ config.update({'server.ssl_module': 'pyopenssl',
+ 'server.ssl_certificate': self.setup['cert'],
+ 'server.ssl_private_key': self.setup['key']})
+ if self.setup['debug']:
+ config['log.screen'] = True
+ cherrypy.config.update(config)
+ cherrypy.quickstart(self, config={'/': self.setup})
+
+
+def parse_opts(argv=None):
+ if argv is None:
+ argv = sys.argv[1:]
+ optinfo = dict()
+ optinfo.update(Bcfg2.Options.CLI_COMMON_OPTIONS)
+ optinfo.update(Bcfg2.Options.SERVER_COMMON_OPTIONS)
+ optinfo.update(Bcfg2.Options.DAEMON_COMMON_OPTIONS)
+ setup = Bcfg2.Options.OptionParser(optinfo, argv=argv)
+ setup.parse(argv)
+ return setup
+
+def application(environ, start_response):
+ """ running behind Apache as a WSGI app is not currently
+ supported, but I'm keeping this code here because I hope for it to
+ be supported some day. we'll need to set up an AMQP task queue
+ and related magic for that to happen, though. """
+ cherrypy.config.update({'environment': 'embedded'})
+ setup = parse_opts(argv=['-C', environ['config']])
+ root = Core(setup, start_fam_thread=True)
+ cherrypy.tree.mount(root)
+ return cherrypy.tree(environ, start_response)
diff --git a/src/lib/Bcfg2/Server/Core.py b/src/lib/Bcfg2/Server/Core.py
index 8482925b7..f39453edd 100644
--- a/src/lib/Bcfg2/Server/Core.py
+++ b/src/lib/Bcfg2/Server/Core.py
@@ -1,35 +1,21 @@
"""Bcfg2.Server.Core provides the runtime support for Bcfg2 modules."""
+import os
import atexit
import logging
import select
import sys
import threading
import time
+import inspect
+import lxml.etree
from traceback import format_exc
-
-try:
- import lxml.etree
-except ImportError:
- print("Failed to import lxml dependency. Shutting down server.")
- raise SystemExit(1)
-
-from Bcfg2.Component import Component, exposed
-from Bcfg2.Server.Plugin import PluginInitError, PluginExecutionError
+import Bcfg2.settings
+import Bcfg2.Server
+import Bcfg2.Logger
import Bcfg2.Server.FileMonitor
-import Bcfg2.Server.Plugins.Metadata
-# Compatibility imports
-from Bcfg2.Bcfg2Py3k import xmlrpclib
-if sys.hexversion >= 0x03000000:
- from functools import reduce
-
-logger = logging.getLogger('Bcfg2.Server.Core')
-
-
-def critical_error(operation):
- """Log and err, traceback and return an xmlrpc fault to client."""
- logger.error(operation, exc_info=1)
- raise xmlrpclib.Fault(7, "Critical unexpected failure: %s" % (operation))
+from Bcfg2.Bcfg2Py3k import xmlrpclib, reduce
+from Bcfg2.Server.Plugin import PluginInitError, PluginExecutionError
try:
import psyco
@@ -37,6 +23,11 @@ try:
except:
pass
+os.environ['DJANGO_SETTINGS_MODULE'] = 'Bcfg2.settings'
+
+def exposed(func):
+ func.exposed = True
+ return func
def sort_xml(node, key=None):
for child in node:
@@ -54,88 +45,134 @@ class CoreInitError(Exception):
pass
-class Core(Component):
+class BaseCore(object):
"""The Core object is the container for all
Bcfg2 Server logic and modules.
"""
- name = 'bcfg2-server'
- implementation = 'bcfg2-server'
-
- def __init__(self, repo, plugins, password, encoding,
- cfile='/etc/bcfg2.conf', ca=None, setup=None,
- filemonitor='default', start_fam_thread=False):
- Component.__init__(self)
- self.datastore = repo
- if filemonitor not in Bcfg2.Server.FileMonitor.available:
- logger.error("File monitor driver %s not available; "
- "forcing to default" % filemonitor)
- filemonitor = 'default'
+
+ def __init__(self, setup, start_fam_thread=False):
+ self.datastore = setup['repo']
+
+ if setup['debug']:
+ level = logging.DEBUG
+ elif setup['verbose']:
+ level = logging.INFO
+ else:
+ level = logging.WARNING
+ # we set a higher log level for the console by default. we
+ # assume that if someone is running bcfg2-server in such a way
+ # that it _can_ log to console, they want more output. if
+ # level is set to DEBUG, that will get handled by
+ # setup_logging and the console will get DEBUG output.
+ Bcfg2.Logger.setup_logging('bcfg2-server',
+ to_console=logging.INFO,
+ to_syslog=setup['syslog'],
+ to_file=setup['logging'],
+ level=level)
+ self.logger = logging.getLogger('bcfg2-server')
+
try:
- self.fam = Bcfg2.Server.FileMonitor.available[filemonitor]()
+ fm = Bcfg2.Server.FileMonitor.available[setup['filemonitor']]
+ except KeyError:
+ self.logger.error("File monitor driver %s not available; "
+ "forcing to default" % filemonitor)
+ fm = Bcfg2.Server.FileMonitor.available['default']
+ famargs = dict(ignore=[], debug=False)
+ if 'ignore' in setup:
+ famargs['ignore'] = setup['ignore']
+ if 'debug' in setup:
+ famargs['debug'] = setup['debug']
+ try:
+ self.fam = fm(**famargs)
except IOError:
- logger.error("Failed to instantiate fam driver %s" % filemonitor,
- exc_info=1)
- raise CoreInitError("failed to instantiate fam driver (used %s)" % \
- filemonitor)
+ msg = "Failed to instantiate fam driver %s" % setup['filemonitor']
+ self.logger.error(msg, exc_info=1)
+ raise CoreInitError(msg)
self.pubspace = {}
- self.cfile = cfile
+ self.cfile = setup['configfile']
self.cron = {}
self.plugins = {}
self.plugin_blacklist = {}
self.revision = '-1'
- self.password = password
- self.encoding = encoding
+ self.password = setup['password']
+ self.encoding = setup['encoding']
self.setup = setup
atexit.register(self.shutdown)
# Create an event to signal worker threads to shutdown
self.terminate = threading.Event()
- if '' in plugins:
- plugins.remove('')
+ # generate Django ORM settings. this must be done _before_ we
+ # load plugins
+ Bcfg2.settings.read_config(cfile=self.setup['web_configfile'],
+ repo=self.datastore)
- for plugin in plugins:
+ self._database_available = False
+ # verify our database schema
+ try:
+ from Bcfg2.Server.SchemaUpdater import update_database, UpdaterError
+ try:
+ update_database()
+ self._database_available = True
+ except UpdaterError:
+ err = sys.exc_info()[1]
+ self.logger.error("Failed to update database schema: %s" % err)
+ except ImportError:
+ # assume django is not installed
+ pass
+ except Exception:
+ inst = sys.exc_info()[1]
+ self.logger.error("Failed to update database schema")
+ self.logger.error(str(inst))
+ self.logger.error(str(type(inst)))
+ raise CoreInitError
+
+ if '' in setup['plugins']:
+ setup['plugins'].remove('')
+
+ for plugin in setup['plugins']:
if not plugin in self.plugins:
self.init_plugins(plugin)
# Remove blacklisted plugins
for p, bl in list(self.plugin_blacklist.items()):
if len(bl) > 0:
- logger.error("The following plugins conflict with %s;"
- "Unloading %s" % (p, bl))
+ self.logger.error("The following plugins conflict with %s;"
+ "Unloading %s" % (p, bl))
for plug in bl:
del self.plugins[plug]
# This section logs the experimental plugins
expl = [plug for (name, plug) in list(self.plugins.items())
if plug.experimental]
if expl:
- logger.info("Loading experimental plugin(s): %s" % \
- (" ".join([x.name for x in expl])))
- logger.info("NOTE: Interfaces subject to change")
+ self.logger.info("Loading experimental plugin(s): %s" %
+ (" ".join([x.name for x in expl])))
+ self.logger.info("NOTE: Interfaces subject to change")
# This section logs the deprecated plugins
depr = [plug for (name, plug) in list(self.plugins.items())
if plug.deprecated]
if depr:
- logger.info("Loading deprecated plugin(s): %s" % \
- (" ".join([x.name for x in depr])))
+ self.logger.info("Loading deprecated plugin(s): %s" %
+ (" ".join([x.name for x in depr])))
mlist = self.plugins_by_type(Bcfg2.Server.Plugin.Metadata)
if len(mlist) == 1:
self.metadata = mlist[0]
else:
- logger.error("No Metadata Plugin loaded; failed to instantiate Core")
+ self.logger.error("No Metadata Plugin loaded; "
+ "failed to instantiate Core")
raise CoreInitError("No Metadata Plugin")
self.statistics = self.plugins_by_type(Bcfg2.Server.Plugin.Statistics)
self.pull_sources = self.plugins_by_type(Bcfg2.Server.Plugin.PullSource)
self.generators = self.plugins_by_type(Bcfg2.Server.Plugin.Generator)
self.structures = self.plugins_by_type(Bcfg2.Server.Plugin.Structure)
self.connectors = self.plugins_by_type(Bcfg2.Server.Plugin.Connector)
- self.ca = ca
- self.fam_thread = threading.Thread(target=self._file_monitor_thread)
+ self.ca = setup['ca']
+ self.fam_thread = \
+ threading.Thread(name="%sFAMThread" % setup['filemonitor'],
+ target=self._file_monitor_thread)
+ self.lock = threading.Lock()
+
if start_fam_thread:
self.fam_thread.start()
- self.monitor_cfile()
-
- def monitor_cfile(self):
- if self.setup:
self.fam.AddMonitor(self.cfile, self.setup)
def plugins_by_type(self, base_cls):
@@ -171,16 +208,21 @@ class Core(Component):
def init_plugins(self, plugin):
"""Handling for the plugins."""
+ self.logger.debug("Loading plugin %s" % plugin)
try:
mod = getattr(__import__("Bcfg2.Server.Plugins.%s" %
(plugin)).Server.Plugins, plugin)
except ImportError:
try:
- mod = __import__(plugin)
+ mod = __import__(plugin, globals(), locals(), [plugin.split('.')[-1]])
except:
- logger.error("Failed to load plugin %s" % (plugin))
+ self.logger.error("Failed to load plugin %s" % plugin)
return
- plug = getattr(mod, plugin)
+ try:
+ plug = getattr(mod, plugin.split('.')[-1])
+ except AttributeError:
+ self.logger.error("Failed to load plugin %s (AttributeError)" % plugin)
+ return
# Blacklist conflicting plugins
cplugs = [conflict for conflict in plug.conflicts
if conflict in self.plugins]
@@ -188,18 +230,35 @@ class Core(Component):
try:
self.plugins[plugin] = plug(self, self.datastore)
except PluginInitError:
- logger.error("Failed to instantiate plugin %s" % (plugin))
+ self.logger.error("Failed to instantiate plugin %s" % plugin,
+ exc_info=1)
except:
- logger.error("Unexpected instantiation failure for plugin %s" %
- (plugin), exc_info=1)
+ self.logger.error("Unexpected instantiation failure for plugin %s" %
+ plugin, exc_info=1)
def shutdown(self):
"""Shutting down the plugins."""
if not self.terminate.isSet():
self.terminate.set()
+ self.fam.shutdown()
for plugin in list(self.plugins.values()):
plugin.shutdown()
+ def client_run_hook(self, hook, metadata):
+ """Checks the data structure."""
+ for plugin in self.plugins_by_type(Bcfg2.Server.Plugin.ClientRunHooks):
+ try:
+ getattr(plugin, hook)(metadata)
+ except AttributeError:
+ err = sys.exc_info()[1]
+ self.logger.error("Unknown attribute: %s" % err)
+ raise
+ except:
+ err = sys.exc_info()[1]
+ self.logger.error("%s: Error invoking hook %s: %s" % (plugin,
+ hook,
+ err))
+
def validate_structures(self, metadata, data):
"""Checks the data structure."""
for plugin in self.plugins_by_type(Bcfg2.Server.Plugin.StructureValidator):
@@ -207,12 +266,12 @@ class Core(Component):
plugin.validate_structures(metadata, data)
except Bcfg2.Server.Plugin.ValidationError:
err = sys.exc_info()[1]
- logger.error("Plugin %s structure validation failed: %s" \
- % (plugin.name, err.message))
+ self.logger.error("Plugin %s structure validation failed: %s" %
+ (plugin.name, err))
raise
except:
- logger.error("Plugin %s: unexpected structure validation failure" \
- % (plugin.name), exc_info=1)
+ self.logger.error("Plugin %s: unexpected structure validation "
+ "failure" % plugin.name, exc_info=1)
def validate_goals(self, metadata, data):
"""Checks that the config matches the goals enforced by the plugins."""
@@ -221,23 +280,23 @@ class Core(Component):
plugin.validate_goals(metadata, data)
except Bcfg2.Server.Plugin.ValidationError:
err = sys.exc_info()[1]
- logger.error("Plugin %s goal validation failed: %s" \
- % (plugin.name, err.message))
+ self.logger.error("Plugin %s goal validation failed: %s" %
+ (plugin.name, err.message))
raise
except:
- logger.error("Plugin %s: unexpected goal validation failure" \
- % (plugin.name), exc_info=1)
+ self.logger.error("Plugin %s: unexpected goal validation "
+ "failure" % plugin.name, exc_info=1)
def GetStructures(self, metadata):
"""Get all structures for client specified by metadata."""
structures = reduce(lambda x, y: x + y,
- [struct.BuildStructures(metadata) for struct \
- in self.structures], [])
+ [struct.BuildStructures(metadata)
+ for struct in self.structures], [])
sbundles = [b.get('name') for b in structures if b.tag == 'Bundle']
missing = [b for b in metadata.bundles if b not in sbundles]
if missing:
- logger.error("Client %s configuration missing bundles: %s" \
- % (metadata.hostname, ':'.join(missing)))
+ self.logger.error("Client %s configuration missing bundles: %s" %
+ (metadata.hostname, ':'.join(missing)))
return structures
def BindStructure(self, structure, metadata):
@@ -252,14 +311,14 @@ class Core(Component):
exc = sys.exc_info()[1]
if 'failure' not in entry.attrib:
entry.set('failure', 'bind error: %s' % format_exc())
- logger.error("Failed to bind entry %s:%s: %s" %
- (entry.tag, entry.get('name'), exc))
+ self.logger.error("Failed to bind entry %s:%s: %s" %
+ (entry.tag, entry.get('name'), exc))
except Exception:
exc = sys.exc_info()[1]
if 'failure' not in entry.attrib:
entry.set('failure', 'bind error: %s' % format_exc())
- logger.error("Unexpected failure in BindStructure: %s %s" \
- % (entry.tag, entry.get('name')), exc_info=1)
+ self.logger.error("Unexpected failure in BindStructure: %s %s" %
+ (entry.tag, entry.get('name')), exc_info=1)
def Bind(self, entry, metadata):
"""Bind an entry using the appropriate generator."""
@@ -275,11 +334,11 @@ class Core(Component):
return ret
except:
entry.set('name', oldname)
- logger.error("Failed binding entry %s:%s with altsrc %s" \
- % (entry.tag, entry.get('name'),
- entry.get('altsrc')))
- logger.error("Falling back to %s:%s" % (entry.tag,
- entry.get('name')))
+ self.logger.error("Failed binding entry %s:%s with altsrc %s" %
+ (entry.tag, entry.get('name'),
+ entry.get('altsrc')))
+ self.logger.error("Falling back to %s:%s" % (entry.tag,
+ entry.get('name')))
glist = [gen for gen in self.generators if
entry.get('name') in gen.Entries.get(entry.tag, {})]
@@ -288,8 +347,8 @@ class Core(Component):
metadata)
elif len(glist) > 1:
generators = ", ".join([gen.name for gen in glist])
- logger.error("%s %s served by multiple generators: %s" % \
- (entry.tag, entry.get('name'), generators))
+ self.logger.error("%s %s served by multiple generators: %s" %
+ (entry.tag, entry.get('name'), generators))
g2list = [gen for gen in self.generators if
gen.HandlesEntry(entry, metadata)]
if len(g2list) == 1:
@@ -301,18 +360,21 @@ class Core(Component):
def BuildConfiguration(self, client):
"""Build configuration for clients."""
start = time.time()
- config = lxml.etree.Element("Configuration", version='2.0', \
+ config = lxml.etree.Element("Configuration", version='2.0',
revision=self.revision)
try:
meta = self.build_metadata(client)
- except Bcfg2.Server.Plugins.Metadata.MetadataConsistencyError:
- logger.error("Metadata consistency error for client %s" % client)
+ except Bcfg2.Server.Plugin.MetadataConsistencyError:
+ self.logger.error("Metadata consistency error for client %s" %
+ client)
return lxml.etree.Element("error", type='metadata error')
+ self.client_run_hook("start_client_run", meta)
+
try:
structures = self.GetStructures(meta)
except:
- logger.error("error in GetStructures", exc_info=1)
+ self.logger.error("error in GetStructures", exc_info=1)
return lxml.etree.Element("error", type='structure error')
self.validate_structures(meta, structures)
@@ -324,7 +386,8 @@ class Core(Component):
key = (entry.tag, entry.get('name'))
if key in esrcs:
if esrcs[key] != entry.get('altsrc'):
- logger.error("Found inconsistent altsrc mapping for entry %s:%s" % key)
+ self.logger.error("Found inconsistent altsrc mapping "
+ "for entry %s:%s" % key)
else:
esrcs[key] = entry.get('altsrc', None)
del esrcs
@@ -334,15 +397,49 @@ class Core(Component):
self.BindStructure(astruct, meta)
config.append(astruct)
except:
- logger.error("error in BindStructure", exc_info=1)
+ self.logger.error("error in BindStructure", exc_info=1)
self.validate_goals(meta, config)
+ self.client_run_hook("end_client_run", meta)
+
sort_xml(config, key=lambda e: e.get('name'))
- logger.info("Generated config for %s in %.03f seconds" % \
- (client, time.time() - start))
+ self.logger.info("Generated config for %s in %.03f seconds" %
+ (client, time.time() - start))
return config
+ def run(self, **kwargs):
+ """ run the server core """
+ raise NotImplementedError
+
+ def _daemonize(self):
+ child_pid = os.fork()
+ if child_pid != 0:
+ return
+
+ os.setsid()
+
+ child_pid = os.fork()
+ if child_pid != 0:
+ os._exit(0)
+
+ redirect_file = open("/dev/null", "w+")
+ os.dup2(redirect_file.fileno(), sys.__stdin__.fileno())
+ os.dup2(redirect_file.fileno(), sys.__stdout__.fileno())
+ os.dup2(redirect_file.fileno(), sys.__stderr__.fileno())
+
+ os.chdir(os.sep)
+
+ pidfile = open(self.setup['daemon'] or "/dev/null", "w")
+ pidfile.write("%s\n" % os.getpid())
+ pidfile.close()
+
+ return os.getpid()
+
+ def critical_error(self, operation):
+ """ this should be overridden by child classes """
+ self.logger.fatal(operation, exc_info=1)
+
def GetDecisions(self, metadata, mode):
"""Get data for the decision list."""
result = []
@@ -350,15 +447,15 @@ class Core(Component):
try:
result += plugin.GetDecisions(metadata, mode)
except:
- logger.error("Plugin: %s failed to generate decision list" \
- % plugin.name, exc_info=1)
+ self.logger.error("Plugin: %s failed to generate decision list"
+ % plugin.name, exc_info=1)
return result
def build_metadata(self, client_name):
"""Build the metadata structure."""
if not hasattr(self, 'metadata'):
# some threads start before metadata is even loaded
- raise Bcfg2.Server.Plugins.Metadata.MetadataRuntimeError
+ raise Bcfg2.Server.Plugin.MetadataRuntimeError
imd = self.metadata.get_initial_metadata(client_name)
for conn in self.connectors:
grps = conn.get_additional_groups(imd)
@@ -378,102 +475,147 @@ class Core(Component):
try:
plugin.process_statistics(meta, statistics)
except:
- logger.error("Plugin %s failed to process stats from %s" \
- % (plugin.name, meta.hostname),
- exc_info=1)
+ self.logger.error("Plugin %s failed to process stats from "
+ "%s" % (plugin.name, meta.hostname),
+ exc_info=1)
+
+ self.logger.info("Client %s reported state %s" % (client_name,
+ state.get('state')))
+ self.client_run_hook("end_statistics", meta)
+
+ def resolve_client(self, address, cleanup_cache=False, metadata=True):
+ try:
+ client = self.metadata.resolve_client(address,
+ cleanup_cache=cleanup_cache)
+ if metadata:
+ meta = self.build_metadata(client)
+ else:
+ meta = None
+ except Bcfg2.Server.Plugin.MetadataConsistencyError:
+ err = sys.exc_info()[1]
+ self.critical_error("Client metadata resolution error for %s: %s" %
+ (address[0], err))
+ except Bcfg2.Server.Plugin.MetadataRuntimeError:
+ err = sys.exc_info()[1]
+ self.critical_error('Metadata system runtime failure for %s: %s' %
+ (address[0], err))
+ return (client, meta)
+
+ def critical_error(self, operation):
+ """Log and err, traceback and return an xmlrpc fault to client."""
+ self.logger.error(operation, exc_info=1)
+ raise xmlrpclib.Fault(xmlrpclib.APPLICATION_ERROR,
+ "Critical failure: %s" % operation)
+
+ def _get_rmi(self):
+ rmi = dict()
+ if self.plugins:
+ for pname, pinst in list(self.plugins.items()):
+ for mname in pinst.__rmi__:
+ rmi["%s.%s" % (pname, mname)] = getattr(pinst, mname)
+ return rmi
- logger.info("Client %s reported state %s" % (client_name,
- state.get('state')))
# XMLRPC handlers start here
+ @exposed
+ def listMethods(self, address):
+ methods = [name
+ for name, func in inspect.getmembers(self, callable)
+ if getattr(func, "exposed", False)]
+ methods.extend(self._get_rmi().keys())
+ return methods
+
+ @exposed
+ def methodHelp(self, address, method_name):
+ raise NotImplementedError
+
+ @exposed
+ def DeclareVersion(self, address, version):
+ """ declare the client version """
+ client, metadata = self.resolve_client(address)
+ try:
+ self.metadata.set_version(client, version)
+ except (Bcfg2.Server.Plugin.MetadataConsistencyError,
+ Bcfg2.Server.Plugin.MetadataRuntimeError):
+ err = sys.exc_info()[1]
+ self.critical_error("Unable to set version for %s: %s" %
+ (client, err))
+ return True
@exposed
def GetProbes(self, address):
"""Fetch probes for a particular client."""
resp = lxml.etree.Element('probes')
+ client, metadata = self.resolve_client(address, cleanup_cache=True)
try:
- name = self.metadata.resolve_client(address, cleanup_cache=True)
- meta = self.build_metadata(name)
-
for plugin in self.plugins_by_type(Bcfg2.Server.Plugin.Probing):
- for probe in plugin.GetProbes(meta):
+ for probe in plugin.GetProbes(metadata):
resp.append(probe)
- return lxml.etree.tostring(resp, encoding='UTF-8',
- xml_declaration=True)
- except Bcfg2.Server.Plugins.Metadata.MetadataConsistencyError:
- warning = 'Client metadata resolution error for %s' % address[0]
- self.logger.warning(warning)
- raise xmlrpclib.Fault(6, warning + "; check server log")
- except Bcfg2.Server.Plugins.Metadata.MetadataRuntimeError:
- err_msg = 'Metadata system runtime failure'
- self.logger.error(err_msg)
- raise xmlrpclib.Fault(6, err_msg)
+ return lxml.etree.tostring(resp,
+ xml_declaration=False).decode('UTF-8')
except:
- critical_error("Error determining client probes")
+ err = sys.exc_info()[1]
+ self.critical_error("Error determining probes for %s: %s" %
+ (client, err))
@exposed
def RecvProbeData(self, address, probedata):
"""Receive probe data from clients."""
+ client, metadata = self.resolve_client(address)
try:
- name = self.metadata.resolve_client(address)
- meta = self.build_metadata(name)
- except Bcfg2.Server.Plugins.Metadata.MetadataConsistencyError:
- warning = 'Metadata consistency error'
- self.logger.warning(warning)
- raise xmlrpclib.Fault(6, warning)
- # clear dynamic groups
- self.metadata.cgroups[meta.hostname] = []
- try:
- xpdata = lxml.etree.XML(probedata.encode('utf-8'))
+ xpdata = lxml.etree.XML(probedata.encode('utf-8'),
+ parser=Bcfg2.Server.XMLParser)
except:
- self.logger.error("Failed to parse probe data from client %s" % \
- (address[0]))
- return False
+ err = sys.exc_info()[1]
+ self.critical_error("Failed to parse probe data from client %s: %s"
+ % (client, err))
sources = []
[sources.append(data.get('source')) for data in xpdata
if data.get('source') not in sources]
for source in sources:
if source not in self.plugins:
- self.logger.warning("Failed to locate plugin %s" % (source))
+ self.logger.warning("Failed to locate plugin %s" % source)
continue
dl = [data for data in xpdata if data.get('source') == source]
try:
- self.plugins[source].ReceiveData(meta, dl)
+ self.plugins[source].ReceiveData(metadata, dl)
except:
- logger.error("Failed to process probe data from client %s" % \
- (address[0]), exc_info=1)
+ err = sys.exc_info()[1]
+ self.critical_error("Failed to process probe data from client "
+ "%s: %s" %
+ (client, err))
return True
@exposed
def AssertProfile(self, address, profile):
"""Set profile for a client."""
+ client = self.resolve_client(address, metadata=False)[0]
try:
- client = self.metadata.resolve_client(address)
self.metadata.set_profile(client, profile, address)
- except (Bcfg2.Server.Plugins.Metadata.MetadataConsistencyError,
- Bcfg2.Server.Plugins.Metadata.MetadataRuntimeError):
- warning = 'Metadata consistency error'
- self.logger.warning(warning)
- raise xmlrpclib.Fault(6, warning)
+ except (Bcfg2.Server.Plugin.MetadataConsistencyError,
+ Bcfg2.Server.Plugin.MetadataRuntimeError):
+ err = sys.exc_info()[1]
+ self.critical_error("Unable to assert profile for %s: %s" %
+ (client, err))
return True
@exposed
def GetConfig(self, address, checksum=False):
"""Build config for a client."""
+ client = self.resolve_client(address)[0]
try:
- client = self.metadata.resolve_client(address)
config = self.BuildConfiguration(client)
- return lxml.etree.tostring(config, encoding='UTF-8',
- xml_declaration=True)
- except Bcfg2.Server.Plugins.Metadata.MetadataConsistencyError:
- self.logger.warning("Metadata consistency failure for %s" % (address))
- raise xmlrpclib.Fault(6, "Metadata consistency failure")
+ return lxml.etree.tostring(config,
+ xml_declaration=False).decode('UTF-8')
+ except Bcfg2.Server.Plugin.MetadataConsistencyError:
+ self.critical_error("Metadata consistency failure for %s" % client)
@exposed
def RecvStats(self, address, stats):
"""Act on statistics upload."""
- sdata = lxml.etree.XML(stats.encode('utf-8'))
- client = self.metadata.resolve_client(address)
+ client = self.resolve_client(address)[0]
+ sdata = lxml.etree.XML(stats.encode('utf-8'),
+ parser=Bcfg2.Server.XMLParser)
self.process_statistics(client, sdata)
return "<ok/>"
@@ -483,11 +625,17 @@ class Core(Component):
else:
# No ca, so no cert validation can be done
acert = None
- return self.metadata.AuthenticateConnection(acert, user, password, address)
+ return self.metadata.AuthenticateConnection(acert, user, password,
+ address)
@exposed
def GetDecisionList(self, address, mode):
"""Get the data of the decision list."""
- client = self.metadata.resolve_client(address)
- meta = self.build_metadata(client)
- return self.GetDecisions(meta, mode)
+ client, metadata = self.resolve_client(address)
+ return self.GetDecisions(metadata, mode)
+
+ @property
+ def database_available(self):
+ """Is the database configured and available"""
+ return self._database_available
+
diff --git a/src/lib/Bcfg2/Server/FileMonitor.py b/src/lib/Bcfg2/Server/FileMonitor.py
deleted file mode 100644
index d6b313e6b..000000000
--- a/src/lib/Bcfg2/Server/FileMonitor.py
+++ /dev/null
@@ -1,315 +0,0 @@
-"""Bcfg2.Server.FileMonitor provides the support for monitorung files."""
-
-import logging
-import os
-import stat
-from time import sleep, time
-
-logger = logging.getLogger('Bcfg2.Server.FileMonitor')
-
-
-def ShouldIgnore(event):
- """Test if the event should be suppresed."""
- # FIXME should move event suppression out of the core
- if event.filename.split('/')[-1] == '.svn':
- return True
- if event.filename.endswith('~') or \
- event.filename.startswith('#') or event.filename.startswith('.#'):
- #logger.error("Suppressing event for file %s" % (event.filename))
- return True
- return False
-
-
-class Event(object):
- def __init__(self, request_id, filename, code):
- self.requestID = request_id
- self.filename = filename
- self.action = code
-
- def code2str(self):
- """return static code for event"""
- return self.action
-
-available = {}
-
-
-class FileMonitor(object):
- """File Monitor baseclass."""
- def __init__(self, debug=False):
- object.__init__(self)
- self.debug = debug
- self.handles = dict()
-
- def get_event(self):
- return None
-
- def pending(self):
- return False
-
- def fileno(self):
- return 0
-
- def handle_one_event(self, event):
- if ShouldIgnore(event):
- return
- if event.requestID not in self.handles:
- logger.info("Got event for unexpected id %s, file %s" %
- (event.requestID, event.filename))
- return
- if self.debug:
- logger.info("Dispatching event %s %s to obj %s" \
- % (event.code2str(), event.filename,
- self.handles[event.requestID]))
- try:
- self.handles[event.requestID].HandleEvent(event)
- except:
- logger.error("error in handling of gamin event for %s" % \
- (event.filename), exc_info=1)
-
- def handle_event_set(self, lock=None):
- count = 1
- event = self.get_event()
- start = time()
- if lock:
- lock.acquire()
- try:
- self.handle_one_event(event)
- while self.pending():
- self.handle_one_event(self.get_event())
- count += 1
- except:
- pass
- if lock:
- lock.release()
- end = time()
- logger.info("Handled %d events in %.03fs" % (count, (end - start)))
-
- def handle_events_in_interval(self, interval):
- end = time() + interval
- while time() < end:
- if self.pending():
- self.handle_event_set()
- end = time() + interval
- else:
- sleep(0.5)
-
-
-class FamFam(object):
- """The fam object is a set of callbacks for
- file alteration events (FAM support).
- """
-
- def __init__(self):
- object.__init__(self)
- self.fm = _fam.open()
- self.users = {}
- self.handles = {}
- self.debug = False
-
- def fileno(self):
- """Return fam file handle number."""
- return self.fm.fileno()
-
- def handle_event_set(self, _):
- self.Service()
-
- def handle_events_in_interval(self, interval):
- now = time()
- while (time() - now) < interval:
- if self.Service():
- now = time()
-
- def AddMonitor(self, path, obj):
- """Add a monitor to path, installing a callback to obj.HandleEvent."""
- mode = os.stat(path)[stat.ST_MODE]
- if stat.S_ISDIR(mode):
- handle = self.fm.monitorDirectory(path, None)
- else:
- handle = self.fm.monitorFile(path, None)
- self.handles[handle.requestID()] = handle
- if obj != None:
- self.users[handle.requestID()] = obj
- return handle.requestID()
-
- def Service(self, interval=0.50):
- """Handle all fam work."""
- count = 0
- collapsed = 0
- rawevents = []
- start = time()
- now = time()
- while (time() - now) < interval:
- if self.fm.pending():
- while self.fm.pending():
- count += 1
- rawevents.append(self.fm.nextEvent())
- now = time()
- unique = []
- bookkeeping = []
- for event in rawevents:
- if ShouldIgnore(event):
- continue
- if event.code2str() != 'changed':
- # process all non-change events
- unique.append(event)
- else:
- if (event.filename, event.requestID) not in bookkeeping:
- bookkeeping.append((event.filename, event.requestID))
- unique.append(event)
- else:
- collapsed += 1
- for event in unique:
- if event.requestID in self.users:
- try:
- self.users[event.requestID].HandleEvent(event)
- except:
- logger.error("handling event for file %s" % (event.filename), exc_info=1)
- end = time()
- logger.info("Processed %s fam events in %03.03f seconds. %s coalesced" %
- (count, (end - start), collapsed))
- return count
-
-
-class Fam(FileMonitor):
- """
- The fam object is a set of callbacks for
- file alteration events (FAM support).
- """
-
- def __init__(self, debug=False):
- FileMonitor.__init__(self, debug)
- self.fm = _fam.open()
-
- def fileno(self):
- return self.fm.fileno()
-
- def AddMonitor(self, path, obj):
- """Add a monitor to path, installing a callback to obj.HandleEvent."""
- mode = os.stat(path)[stat.ST_MODE]
- if stat.S_ISDIR(mode):
- handle = self.fm.monitorDirectory(path, None)
- else:
- handle = self.fm.monitorFile(path, None)
- if obj != None:
- self.handles[handle.requestID()] = obj
- return handle.requestID()
-
- def pending(self):
- return self.fm.pending()
-
- def get_event(self):
- return self.fm.nextEvent()
-
-
-class Pseudo(FileMonitor):
- """
- The fam object is a set of callbacks for
- file alteration events (static monitor support).
- """
-
- def __init__(self, debug=False):
- FileMonitor.__init__(self, debug=False)
- self.pending_events = []
-
- def pending(self):
- return len(self.pending_events) != 0
-
- def get_event(self):
- return self.pending_events.pop()
-
- def AddMonitor(self, path, obj):
- """add a monitor to path, installing a callback to obj.HandleEvent"""
- handleID = len(list(self.handles.keys()))
- mode = os.stat(path)[stat.ST_MODE]
- handle = Event(handleID, path, 'exists')
- if stat.S_ISDIR(mode):
- dirList = os.listdir(path)
- self.pending_events.append(handle)
- for includedFile in dirList:
- self.pending_events.append(Event(handleID,
- includedFile,
- 'exists'))
- self.pending_events.append(Event(handleID, path, 'endExist'))
- else:
- self.pending_events.append(Event(handleID, path, 'exists'))
- if obj != None:
- self.handles[handleID] = obj
- return handleID
-
-
-try:
- from gamin import WatchMonitor, GAMCreated, GAMExists, GAMEndExist, \
- GAMChanged, GAMDeleted, GAMMoved
-
- class GaminEvent(Event):
- """
- This class provides an event analogous to
- python-fam events based on gamin sources.
- """
- def __init__(self, request_id, filename, code):
- Event.__init__(self, request_id, filename, code)
- action_map = {GAMCreated: 'created', GAMExists: 'exists',
- GAMChanged: 'changed', GAMDeleted: 'deleted',
- GAMEndExist: 'endExist', GAMMoved: 'moved'}
- if code in action_map:
- self.action = action_map[code]
-
- class Gamin(FileMonitor):
- """
- The fam object is a set of callbacks for
- file alteration events (Gamin support)
- """
- def __init__(self, debug=False):
- FileMonitor.__init__(self, debug)
- self.mon = WatchMonitor()
- self.counter = 0
- self.events = []
-
- def fileno(self):
- return self.mon.get_fd()
-
- def queue(self, path, action, request_id):
- """queue up the event for later handling"""
- self.events.append(GaminEvent(request_id, path, action))
-
- def AddMonitor(self, path, obj):
- """Add a monitor to path, installing a callback to obj.HandleEvent."""
- handle = self.counter
- self.counter += 1
- mode = os.stat(path)[stat.ST_MODE]
-
- # Flush queued gamin events
- while self.mon.event_pending():
- self.mon.handle_one_event()
-
- if stat.S_ISDIR(mode):
- self.mon.watch_directory(path, self.queue, handle)
- else:
- self.mon.watch_file(path, self.queue, handle)
- self.handles[handle] = obj
- return handle
-
- def pending(self):
- return len(self.events) > 0 or self.mon.event_pending()
-
- def get_event(self):
- if self.mon.event_pending():
- self.mon.handle_one_event()
- return self.events.pop(0)
-
- available['gamin'] = Gamin
-except ImportError:
- # fall back to _fam
- pass
-
-try:
- import _fam
- available['fam'] = FamFam
-except ImportError:
- pass
-available['pseudo'] = Pseudo
-
-for fdrv in ['gamin', 'fam', 'pseudo']:
- if fdrv in available:
- available['default'] = available[fdrv]
- break
diff --git a/src/lib/Bcfg2/Server/FileMonitor/Fam.py b/src/lib/Bcfg2/Server/FileMonitor/Fam.py
new file mode 100644
index 000000000..1a00fffa0
--- /dev/null
+++ b/src/lib/Bcfg2/Server/FileMonitor/Fam.py
@@ -0,0 +1,82 @@
+""" Fam provides FAM support for file alteration events """
+
+import os
+import _fam
+import stat
+import logging
+from time import time
+from Bcfg2.Server.FileMonitor import FileMonitor
+
+logger = logging.getLogger(__name__)
+
+class Fam(FileMonitor):
+ __priority__ = 90
+
+ def __init__(self, ignore=None, debug=False):
+ FileMonitor.__init__(self, ignore=ignore, debug=debug)
+ self.fm = _fam.open()
+ self.users = {}
+
+ def fileno(self):
+ """Return fam file handle number."""
+ return self.fm.fileno()
+
+ def handle_event_set(self, _):
+ self.Service()
+
+ def handle_events_in_interval(self, interval):
+ now = time()
+ while (time() - now) < interval:
+ if self.Service():
+ now = time()
+
+ def AddMonitor(self, path, obj):
+ """Add a monitor to path, installing a callback to obj.HandleEvent."""
+ mode = os.stat(path)[stat.ST_MODE]
+ if stat.S_ISDIR(mode):
+ handle = self.fm.monitorDirectory(path, None)
+ else:
+ handle = self.fm.monitorFile(path, None)
+ self.handles[handle.requestID()] = handle
+ if obj != None:
+ self.users[handle.requestID()] = obj
+ return handle.requestID()
+
+ def Service(self, interval=0.50):
+ """Handle all fam work."""
+ count = 0
+ collapsed = 0
+ rawevents = []
+ start = time()
+ now = time()
+ while (time() - now) < interval:
+ if self.fm.pending():
+ while self.fm.pending():
+ count += 1
+ rawevents.append(self.fm.nextEvent())
+ now = time()
+ unique = []
+ bookkeeping = []
+ for event in rawevents:
+ if self.should_ignore(event):
+ continue
+ if event.code2str() != 'changed':
+ # process all non-change events
+ unique.append(event)
+ else:
+ if (event.filename, event.requestID) not in bookkeeping:
+ bookkeeping.append((event.filename, event.requestID))
+ unique.append(event)
+ else:
+ collapsed += 1
+ for event in unique:
+ if event.requestID in self.users:
+ try:
+ self.users[event.requestID].HandleEvent(event)
+ except:
+ logger.error("Handling event for file %s" % event.filename,
+ exc_info=1)
+ end = time()
+ logger.info("Processed %s fam events in %03.03f seconds. %s coalesced" %
+ (count, (end - start), collapsed))
+ return count
diff --git a/src/lib/Bcfg2/Server/FileMonitor/Gamin.py b/src/lib/Bcfg2/Server/FileMonitor/Gamin.py
new file mode 100644
index 000000000..60f80c9c3
--- /dev/null
+++ b/src/lib/Bcfg2/Server/FileMonitor/Gamin.py
@@ -0,0 +1,64 @@
+""" Gamin driver for file alteration events """
+
+import os
+import stat
+import logging
+from gamin import WatchMonitor, GAMCreated, GAMExists, GAMEndExist, \
+ GAMChanged, GAMDeleted
+from Bcfg2.Server.FileMonitor import Event, FileMonitor
+
+logger = logging.getLogger(__name__)
+
+class GaminEvent(Event):
+ """
+ This class provides an event analogous to
+ python-fam events based on gamin sources.
+ """
+ action_map = {GAMCreated: 'created', GAMExists: 'exists',
+ GAMChanged: 'changed', GAMDeleted: 'deleted',
+ GAMEndExist: 'endExist'}
+
+ def __init__(self, request_id, filename, code):
+ Event.__init__(self, request_id, filename, code)
+ if code in self.action_map:
+ self.action = self.action_map[code]
+
+class Gamin(FileMonitor):
+ __priority__ = 10
+
+ def __init__(self, ignore=None, debug=False):
+ FileMonitor.__init__(self, ignore=ignore, debug=debug)
+ self.mon = WatchMonitor()
+ self.counter = 0
+
+ def fileno(self):
+ return self.mon.get_fd()
+
+ def queue(self, path, action, request_id):
+ """queue up the event for later handling"""
+ self.events.append(GaminEvent(request_id, path, action))
+
+ def AddMonitor(self, path, obj):
+ """Add a monitor to path, installing a callback to obj."""
+ handle = self.counter
+ self.counter += 1
+ mode = os.stat(path)[stat.ST_MODE]
+
+ # Flush queued gamin events
+ while self.mon.event_pending():
+ self.mon.handle_one_event()
+
+ if stat.S_ISDIR(mode):
+ self.mon.watch_directory(path, self.queue, handle)
+ else:
+ self.mon.watch_file(path, self.queue, handle)
+ self.handles[handle] = obj
+ return handle
+
+ def pending(self):
+ return FileMonitor.pending(self) or self.mon.event_pending()
+
+ def get_event(self):
+ if self.mon.event_pending():
+ self.mon.handle_one_event()
+ return FileMonitor.get_event(self)
diff --git a/src/lib/Bcfg2/Server/FileMonitor/Inotify.py b/src/lib/Bcfg2/Server/FileMonitor/Inotify.py
new file mode 100644
index 000000000..880ac7e8d
--- /dev/null
+++ b/src/lib/Bcfg2/Server/FileMonitor/Inotify.py
@@ -0,0 +1,126 @@
+""" Inotify driver for file alteration events """
+
+import logging
+import operator
+import os
+import pyinotify
+import sys
+from Bcfg2.Bcfg2Py3k import reduce
+from Bcfg2.Server.FileMonitor import Event
+from Bcfg2.Server.FileMonitor.Pseudo import Pseudo
+
+logger = logging.getLogger(__name__)
+
+class Inotify(Pseudo, pyinotify.ProcessEvent):
+ __priority__ = 1
+ action_map = {pyinotify.IN_CREATE: 'created',
+ pyinotify.IN_DELETE: 'deleted',
+ pyinotify.IN_MODIFY: 'changed',
+ pyinotify.IN_MOVED_FROM: 'deleted',
+ pyinotify.IN_MOVED_TO: 'created'}
+ mask = reduce(lambda x, y: x | y, action_map.keys())
+
+ def __init__(self, ignore=None, debug=False):
+ Pseudo.__init__(self, ignore=ignore, debug=debug)
+ self.wm = pyinotify.WatchManager()
+ self.notifier = pyinotify.ThreadedNotifier(self.wm, self)
+ self.notifier.start()
+ self.event_filter = dict()
+ self.watches_by_path = dict()
+
+ def fileno(self):
+ return self.wm.get_fd()
+
+ def process_default(self, ievent):
+ action = ievent.maskname
+ for amask, aname in self.action_map.items():
+ if ievent.mask & amask:
+ action = aname
+ break
+ try:
+ watch = self.wm.watches[ievent.wd]
+ except KeyError:
+ err = sys.exc_info()[1]
+ logger.error("Error handling event for %s: Watch %s not found" %
+ (ievent.pathname, ievent.wd))
+ return
+ # FAM-style file monitors return the full path to the parent
+ # directory that is being watched, relative paths to anything
+ # contained within the directory. since we can't use inotify
+ # to watch files directly, we have to sort of guess at whether
+ # this watch was actually added on a file (and thus is in
+ # self.event_filter because we're filtering out other events
+ # on the directory) or was added directly on a directory.
+ if (watch.path == ievent.pathname or ievent.wd in self.event_filter):
+ path = ievent.pathname
+ else:
+ # relative path
+ path = os.path.basename(ievent.pathname)
+ # figure out the handleID. start with the path of the event;
+ # that should catch events on files that are watched directly.
+ # (we have to watch the directory that a file is in, so this
+ # lets us handle events on different files in the same
+ # directory -- and thus under the same watch -- with different
+ # objects.) If the path to the event doesn't have a handler,
+ # use the path of the watch itself.
+ handleID = ievent.pathname
+ if handleID not in self.handles:
+ handleID = watch.path
+ evt = Event(handleID, path, action)
+
+ if (ievent.wd not in self.event_filter or
+ ievent.pathname in self.event_filter[ievent.wd]):
+ self.events.append(evt)
+
+ def AddMonitor(self, path, obj):
+ # strip trailing slashes
+ path = path.rstrip("/")
+ if not os.path.isdir(path):
+ # inotify is a little wonky about watching files. for
+ # instance, if you watch /tmp/foo, and then do 'mv
+ # /tmp/bar /tmp/foo', it processes that as a deletion of
+ # /tmp/foo (which it technically _is_, but that's rather
+ # useless -- we care that /tmp/foo changed, not that it
+ # was first deleted and then created). In order to
+ # effectively watch a file, we have to watch the directory
+ # it's in, and filter out events for other files in the
+ # same directory that are not similarly watched.
+ # watch_transient_file requires a Processor _class_, not
+ # an object, so we can't have this object handle events,
+ # which is Wrong, so we can't use that function.
+ watch_path = os.path.dirname(path)
+ is_dir = False
+ else:
+ watch_path = path
+ is_dir = True
+
+ # see if this path is already being watched
+ try:
+ wd = self.watches_by_path[watch_path]
+ except KeyError:
+ wd = self.wm.add_watch(watch_path, self.mask,
+ quiet=False)[watch_path]
+ self.watches_by_path[watch_path] = wd
+
+ produce_exists = True
+ if not is_dir:
+ if wd not in self.event_filter:
+ self.event_filter[wd] = [path]
+ elif path not in self.event_filter[wd]:
+ self.event_filter[wd].append(path)
+ else:
+ # we've been asked to watch a file that we're already
+ # watching, so we don't need to produce 'exists'
+ # events
+ produce_exists = False
+
+ # inotify doesn't produce initial 'exists' events, so we
+ # inherit from Pseudo to produce those
+ if produce_exists:
+ return Pseudo.AddMonitor(self, path, obj, handleID=path)
+ else:
+ self.handles[path] = obj
+ return path
+
+ def shutdown(self):
+ self.notifier.stop()
diff --git a/src/lib/Bcfg2/Server/FileMonitor/Pseudo.py b/src/lib/Bcfg2/Server/FileMonitor/Pseudo.py
new file mode 100644
index 000000000..089d4cf0f
--- /dev/null
+++ b/src/lib/Bcfg2/Server/FileMonitor/Pseudo.py
@@ -0,0 +1,25 @@
+""" Pseudo provides static monitor support for file alteration events """
+
+import os
+import logging
+from Bcfg2.Server.FileMonitor import FileMonitor, Event
+
+logger = logging.getLogger(__name__)
+
+class Pseudo(FileMonitor):
+ __priority__ = 99
+
+ def AddMonitor(self, path, obj, handleID=None):
+ """add a monitor to path, installing a callback to obj.HandleEvent"""
+ if handleID is None:
+ handleID = len(list(self.handles.keys()))
+ self.events.append(Event(handleID, path, 'exists'))
+ if os.path.isdir(path):
+ dirList = os.listdir(path)
+ for includedFile in dirList:
+ self.events.append(Event(handleID, includedFile, 'exists'))
+ self.events.append(Event(handleID, path, 'endExist'))
+
+ if obj != None:
+ self.handles[handleID] = obj
+ return handleID
diff --git a/src/lib/Bcfg2/Server/FileMonitor/__init__.py b/src/lib/Bcfg2/Server/FileMonitor/__init__.py
new file mode 100644
index 000000000..c490acc81
--- /dev/null
+++ b/src/lib/Bcfg2/Server/FileMonitor/__init__.py
@@ -0,0 +1,143 @@
+"""Bcfg2.Server.FileMonitor provides the support for monitoring files."""
+
+import os
+import sys
+import fnmatch
+import logging
+import pkgutil
+from time import sleep, time
+
+logger = logging.getLogger(__name__)
+
+class Event(object):
+ def __init__(self, request_id, filename, code):
+ self.requestID = request_id
+ self.filename = filename
+ self.action = code
+
+ def code2str(self):
+ """return static code for event"""
+ return self.action
+
+ def __str__(self):
+ return "%s: %s %s" % (self.__class__.__name__,
+ self.filename, self.action)
+
+ def __repr__(self):
+ return "%s (request ID %s)" % (str(self), self.requestID)
+
+
+class FileMonitor(object):
+ """File Monitor baseclass."""
+ def __init__(self, ignore=None, debug=False):
+ object.__init__(self)
+ self.debug = debug
+ self.handles = dict()
+ self.events = []
+ if ignore is None:
+ ignore = []
+ self.ignore = ignore
+
+ def __str__(self):
+ return "%s: %s" % (__name__, self.__class__.__name__)
+
+ def __repr__(self):
+ return "%s (%s events, fd %s)" % (str(self), len(self.events), self.fileno)
+
+ def debug_log(self, msg):
+ if self.debug:
+ logger.info(msg)
+
+ def should_ignore(self, event):
+ for pattern in self.ignore:
+ if (fnmatch.fnmatch(event.filename, pattern) or
+ fnmatch.fnmatch(os.path.split(event.filename)[-1], pattern)):
+ self.debug_log("Ignoring %s" % event)
+ return True
+ return False
+
+ def pending(self):
+ return bool(self.events)
+
+ def get_event(self):
+ return self.events.pop(0)
+
+ def fileno(self):
+ return 0
+
+ def handle_one_event(self, event):
+ if self.should_ignore(event):
+ return
+ if event.requestID not in self.handles:
+ logger.info("Got event for unexpected id %s, file %s" %
+ (event.requestID, event.filename))
+ return
+ self.debug_log("Dispatching event %s %s to obj %s" %
+ (event.code2str(), event.filename,
+ self.handles[event.requestID]))
+ try:
+ self.handles[event.requestID].HandleEvent(event)
+ except:
+ err = sys.exc_info()[1]
+ logger.error("Error in handling of event %s for %s: %s" %
+ (event.code2str(), event.filename, err))
+
+ def handle_event_set(self, lock=None):
+ count = 1
+ event = self.get_event()
+ start = time()
+ if lock:
+ lock.acquire()
+ try:
+ self.handle_one_event(event)
+ while self.pending():
+ self.handle_one_event(self.get_event())
+ count += 1
+ except:
+ pass
+ if lock:
+ lock.release()
+ end = time()
+ logger.info("Handled %d events in %.03fs" % (count, (end - start)))
+
+ def handle_events_in_interval(self, interval):
+ end = time() + interval
+ while time() < end:
+ if self.pending():
+ self.handle_event_set()
+ end = time() + interval
+ else:
+ sleep(0.5)
+
+ def shutdown(self):
+ pass
+
+
+available = dict()
+
+# todo: loading the monitor drivers should be automatic
+from Bcfg2.Server.FileMonitor.Pseudo import Pseudo
+available['pseudo'] = Pseudo
+
+try:
+ from Bcfg2.Server.FileMonitor.Fam import Fam
+ available['fam'] = Fam
+except ImportError:
+ pass
+
+try:
+ from Bcfg2.Server.FileMonitor.Gamin import Gamin
+ available['gamin'] = Gamin
+except ImportError:
+ pass
+
+try:
+ from Bcfg2.Server.FileMonitor.Inotify import Inotify
+ available['inotify'] = Inotify
+except ImportError:
+ pass
+
+for fdrv in sorted(available.keys(), key=lambda k: available[k].__priority__):
+ if fdrv in available:
+ available['default'] = available[fdrv]
+ break
diff --git a/src/lib/Bcfg2/Server/Hostbase/backends.py b/src/lib/Bcfg2/Server/Hostbase/backends.py
index ecaf3c109..cfa9e1e16 100644
--- a/src/lib/Bcfg2/Server/Hostbase/backends.py
+++ b/src/lib/Bcfg2/Server/Hostbase/backends.py
@@ -18,21 +18,16 @@ from nisauth import *
## uid=l.badge_no
## )
## #fixme: need to add this user session obj to session
-## #print str(ldap_user)
## user,created = User.objects.get_or_create(username=username)
-## #print user
-## #print "created " + str(created)
## return user
## except LDAPAUTHError,e:
-## #print str(e)
## return None
## def get_user(self,user_id):
## try:
## return User.objects.get(pk=user_id)
## except User.DoesNotExist, e:
-## print str(e)
## return None
diff --git a/src/lib/Bcfg2/Server/Hostbase/ldapauth.py b/src/lib/Bcfg2/Server/Hostbase/ldapauth.py
index f3db26f67..fc2ca1bf1 100644
--- a/src/lib/Bcfg2/Server/Hostbase/ldapauth.py
+++ b/src/lib/Bcfg2/Server/Hostbase/ldapauth.py
@@ -144,7 +144,6 @@ class ldapauth(object):
def member_of(self):
"""See if this user is in our group that is allowed to login"""
m = [g for g in self.memberOf if g == self.check_member_of]
- #print m
if len(m) == 1:
return True
else:
diff --git a/src/lib/Bcfg2/Server/Lint/Bundles.py b/src/lib/Bcfg2/Server/Lint/Bundles.py
deleted file mode 100644
index e6b6307f2..000000000
--- a/src/lib/Bcfg2/Server/Lint/Bundles.py
+++ /dev/null
@@ -1,54 +0,0 @@
-import lxml.etree
-import Bcfg2.Server.Lint
-
-class Bundles(Bcfg2.Server.Lint.ServerPlugin):
- """ Perform various bundle checks """
- def Run(self):
- """ run plugin """
- if 'Bundler' in self.core.plugins:
- self.missing_bundles()
- for bundle in self.core.plugins['Bundler'].entries.values():
- if self.HandlesFile(bundle.name):
- if (not Bcfg2.Server.Plugins.Bundler.have_genshi or
- type(bundle) is not
- Bcfg2.Server.Plugins.SGenshi.SGenshiTemplateFile):
- self.bundle_names(bundle)
-
- @classmethod
- def Errors(cls):
- return {"bundle-not-found":"error",
- "inconsistent-bundle-name":"warning"}
-
- def missing_bundles(self):
- """ find bundles listed in Metadata but not implemented in Bundler """
- if self.files is None:
- # when given a list of files on stdin, this check is
- # useless, so skip it
- groupdata = self.metadata.groups_xml.xdata
- ref_bundles = set([b.get("name")
- for b in groupdata.findall("//Bundle")])
-
- allbundles = self.core.plugins['Bundler'].entries.keys()
- for bundle in ref_bundles:
- xmlbundle = "%s.xml" % bundle
- genshibundle = "%s.genshi" % bundle
- if (xmlbundle not in allbundles and
- genshibundle not in allbundles):
- self.LintError("bundle-not-found",
- "Bundle %s referenced, but does not exist" %
- bundle)
-
- def bundle_names(self, bundle):
- """ verify bundle name attribute matches filename """
- try:
- xdata = lxml.etree.XML(bundle.data)
- except AttributeError:
- # genshi template
- xdata = lxml.etree.parse(bundle.template.filepath).getroot()
-
- fname = bundle.name.split('Bundler/')[1].split('.')[0]
- bname = xdata.get('name')
- if fname != bname:
- self.LintError("inconsistent-bundle-name",
- "Inconsistent bundle name: filename is %s, bundle name is %s" %
- (fname, bname))
diff --git a/src/lib/Bcfg2/Server/Lint/Comments.py b/src/lib/Bcfg2/Server/Lint/Comments.py
index f5d0e265f..59d18fc57 100644
--- a/src/lib/Bcfg2/Server/Lint/Comments.py
+++ b/src/lib/Bcfg2/Server/Lint/Comments.py
@@ -1,6 +1,7 @@
-import os.path
+import os
import lxml.etree
import Bcfg2.Server.Lint
+from Bcfg2.Server import XI, XI_NAMESPACE
from Bcfg2.Server.Plugins.Cfg.CfgPlaintextGenerator import CfgPlaintextGenerator
from Bcfg2.Server.Plugins.Cfg.CfgGenshiGenerator import CfgGenshiGenerator
from Bcfg2.Server.Plugins.Cfg.CfgCheetahGenerator import CfgCheetahGenerator
@@ -186,7 +187,7 @@ class Comments(Bcfg2.Server.Lint.ServerPlugin):
path = os.path.join(self.metadata.data, mfile)
if path in self.files:
xdata = lxml.etree.parse(path)
- for el in xdata.findall('./{http://www.w3.org/2001/XInclude}include'):
+ for el in xdata.findall('./%sinclude' % XI_NAMESPACE):
if not self.has_all_xincludes(el.get('href')):
self.LintError("broken-xinclude-chain",
"Broken XInclude chain: could not include %s" % path)
diff --git a/src/lib/Bcfg2/Server/Lint/Deltas.py b/src/lib/Bcfg2/Server/Lint/Deltas.py
deleted file mode 100644
index 114f2e348..000000000
--- a/src/lib/Bcfg2/Server/Lint/Deltas.py
+++ /dev/null
@@ -1,25 +0,0 @@
-import Bcfg2.Server.Lint
-from Bcfg2.Server.Plugins.Cfg import CfgFilter
-
-class Deltas(Bcfg2.Server.Lint.ServerPlugin):
- """ Warn about usage of .cat and .diff files """
-
- def Run(self):
- """ run plugin """
- if 'Cfg' in self.core.plugins:
- cfg = self.core.plugins['Cfg']
- for basename, entry in list(cfg.entries.items()):
- self.check_entry(basename, entry)
-
- @classmethod
- def Errors(cls):
- return {"cat-file-used":"warning",
- "diff-file-used":"warning"}
-
- def check_entry(self, basename, entry):
- for fname, processor in entry.entries.items():
- if self.HandlesFile(fname) and isinstance(processor, CfgFilter):
- extension = fname.split(".")[-1]
- self.LintError("%s-file-used" % extension,
- "%s file used on %s: %s" %
- (extension, basename, fname))
diff --git a/src/lib/Bcfg2/Server/Lint/Duplicates.py b/src/lib/Bcfg2/Server/Lint/Duplicates.py
index ee6b7a2e6..60a02ffb9 100644
--- a/src/lib/Bcfg2/Server/Lint/Duplicates.py
+++ b/src/lib/Bcfg2/Server/Lint/Duplicates.py
@@ -1,6 +1,7 @@
-import os.path
+import os
import lxml.etree
import Bcfg2.Server.Lint
+from Bcfg2.Server import XI, XI_NAMESPACE
class Duplicates(Bcfg2.Server.Lint.ServerPlugin):
""" Find duplicate clients, groups, etc. """
@@ -80,7 +81,7 @@ class Duplicates(Bcfg2.Server.Lint.ServerPlugin):
path = os.path.join(self.metadata.data, mfile)
if path in self.files:
xdata = lxml.etree.parse(path)
- for el in xdata.findall('./{http://www.w3.org/2001/XInclude}include'):
+ for el in xdata.findall('./%sinclude' % XI_NAMESPACE):
if not self.has_all_xincludes(el.get('href')):
self.LintError("broken-xinclude-chain",
"Broken XInclude chain: could not include %s" % path)
diff --git a/src/lib/Bcfg2/Server/Lint/Genshi.py b/src/lib/Bcfg2/Server/Lint/Genshi.py
index b6007161e..74142b446 100755
--- a/src/lib/Bcfg2/Server/Lint/Genshi.py
+++ b/src/lib/Bcfg2/Server/Lint/Genshi.py
@@ -1,3 +1,4 @@
+import sys
import genshi.template
import Bcfg2.Server.Lint
diff --git a/src/lib/Bcfg2/Server/Lint/GroupNames.py b/src/lib/Bcfg2/Server/Lint/GroupNames.py
new file mode 100644
index 000000000..5df98a30e
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Lint/GroupNames.py
@@ -0,0 +1,78 @@
+import os
+import re
+import Bcfg2.Server.Lint
+try:
+ from Bcfg2.Server.Plugins.Bundler import BundleTemplateFile
+ has_genshi = True
+except ImportError:
+ has_genshi = False
+
+class GroupNames(Bcfg2.Server.Lint.ServerPlugin):
+ """ ensure that all named groups are valid group names """
+ pattern = r'\S+$'
+ valid = re.compile(r'^' + pattern)
+
+ def Run(self):
+ self.check_metadata()
+ if 'Rules' in self.core.plugins:
+ self.check_rules()
+ if 'Bundler' in self.core.plugins:
+ self.check_bundles()
+ if 'GroupPatterns' in self.core.plugins:
+ self.check_grouppatterns()
+ if 'Cfg' in self.core.plugins:
+ self.check_cfg()
+
+ @classmethod
+ def Errors(cls):
+ return {"invalid-group-name": "error"}
+
+ def check_rules(self):
+ for rules in self.core.plugins['Rules'].entries.values():
+ if not self.HandlesFile(rules.name):
+ continue
+ xdata = rules.pnode.data
+ self.check_entries(xdata.xpath("//Group"),
+ os.path.join(self.config['repo'], rules.name))
+
+ def check_bundles(self):
+ """ check bundles for BoundPath entries with missing attrs """
+ for bundle in self.core.plugins['Bundler'].entries.values():
+ if (self.HandlesFile(bundle.name) and
+ (not has_genshi or
+ not isinstance(bundle, BundleTemplateFile))):
+ self.check_entries(bundle.xdata.xpath("//Group"),
+ bundle.name)
+
+ def check_metadata(self):
+ self.check_entries(self.metadata.groups_xml.xdata.xpath("//Group"),
+ os.path.join(self.config['repo'],
+ self.metadata.groups_xml.name))
+
+ def check_grouppatterns(self):
+ cfg = self.core.plugins['GroupPatterns'].config
+ if not self.HandlesFile(cfg.name):
+ return
+ for grp in cfg.xdata.xpath('//GroupPattern/Group'):
+ if not self.valid.search(grp.text):
+ self.LintError("invalid-group-name",
+ "Invalid group name in %s: %s" %
+ (cfg.name, self.RenderXML(grp, keep_text=True)))
+
+ def check_cfg(self):
+ for root, dirs, files in os.walk(self.core.plugins['Cfg'].data):
+ for fname in files:
+ basename = os.path.basename(root)
+ if (re.search(r'^%s\.G\d\d_' % basename, fname) and
+ not re.search(r'^%s\.G\d\d_' % basename + self.pattern,
+ fname)):
+ self.LintError("invalid-group-name",
+ "Invalid group name referenced in %s" %
+ os.path.join(root, fname))
+
+ def check_entries(self, entries, fname):
+ for grp in entries:
+ if not self.valid.search(grp.get("name")):
+ self.LintError("invalid-group-name",
+ "Invalid group name in %s: %s" %
+ (fname, self.RenderXML(grp)))
diff --git a/src/lib/Bcfg2/Server/Lint/GroupPatterns.py b/src/lib/Bcfg2/Server/Lint/GroupPatterns.py
deleted file mode 100644
index 431ba4056..000000000
--- a/src/lib/Bcfg2/Server/Lint/GroupPatterns.py
+++ /dev/null
@@ -1,35 +0,0 @@
-import sys
-import Bcfg2.Server.Lint
-from Bcfg2.Server.Plugins.GroupPatterns import PatternMap
-
-class GroupPatterns(Bcfg2.Server.Lint.ServerPlugin):
- """ Check Genshi templates for syntax errors """
-
- def Run(self):
- """ run plugin """
- if 'GroupPatterns' in self.core.plugins:
- cfg = self.core.plugins['GroupPatterns'].config
- for entry in cfg.xdata.xpath('//GroupPattern'):
- groups = [g.text for g in entry.findall('Group')]
- self.check(entry, groups, ptype='NamePattern')
- self.check(entry, groups, ptype='NameRange')
-
- @classmethod
- def Errors(cls):
- return {"pattern-fails-to-initialize":"error"}
-
- def check(self, entry, groups, ptype="NamePattern"):
- if ptype == "NamePattern":
- pmap = lambda p: PatternMap(p, None, groups)
- else:
- pmap = lambda p: PatternMap(None, p, groups)
-
- for el in entry.findall(ptype):
- pat = el.text
- try:
- pmap(pat)
- except:
- err = sys.exc_info()[1]
- self.LintError("pattern-fails-to-initialize",
- "Failed to initialize %s %s for %s: %s" %
- (ptype, pat, entry.get('pattern'), err))
diff --git a/src/lib/Bcfg2/Server/Lint/InfoXML.py b/src/lib/Bcfg2/Server/Lint/InfoXML.py
index db6aeea73..5e4e21e18 100644
--- a/src/lib/Bcfg2/Server/Lint/InfoXML.py
+++ b/src/lib/Bcfg2/Server/Lint/InfoXML.py
@@ -1,28 +1,41 @@
-import os.path
+import os
import Bcfg2.Options
import Bcfg2.Server.Lint
from Bcfg2.Server.Plugins.Cfg.CfgInfoXML import CfgInfoXML
+from Bcfg2.Server.Plugins.Cfg.CfgLegacyInfo import CfgLegacyInfo
class InfoXML(Bcfg2.Server.Lint.ServerPlugin):
""" ensure that all config files have an info.xml file"""
def Run(self):
- if 'Cfg' in self.core.plugins:
- for filename, entryset in self.core.plugins['Cfg'].entries.items():
- infoxml_fname = os.path.join(entryset.path, "info.xml")
- if self.HandlesFile(infoxml_fname):
- found = False
- for entry in entryset.entries.values():
- if isinstance(entry, CfgInfoXML):
- self.check_infoxml(infoxml_fname,
- entry.infoxml.pnode.data)
- found = True
- if not found:
- self.LintError("no-infoxml",
- "No info.xml found for %s" % filename)
+ if 'Cfg' not in self.core.plugins:
+ return
+
+ for filename, entryset in self.core.plugins['Cfg'].entries.items():
+ infoxml_fname = os.path.join(entryset.path, "info.xml")
+ if self.HandlesFile(infoxml_fname):
+ found = False
+ for entry in entryset.entries.values():
+ if isinstance(entry, CfgInfoXML):
+ self.check_infoxml(infoxml_fname,
+ entry.infoxml.pnode.data)
+ found = True
+ if not found:
+ self.LintError("no-infoxml",
+ "No info.xml found for %s" % filename)
+
+ for entry in entryset.entries.values():
+ if isinstance(entry, CfgLegacyInfo):
+ if not self.HandlesFile(entry.path):
+ continue
+ self.LintError("deprecated-info-file",
+ "Deprecated %s file found at %s" %
+ (os.path.basename(entry.name),
+ entry.path))
@classmethod
def Errors(cls):
return {"no-infoxml":"warning",
+ "deprecated-info-file":"warning",
"paranoid-false":"warning",
"broken-xinclude-chain":"warning",
"required-infoxml-attrs-missing":"error"}
diff --git a/src/lib/Bcfg2/Server/Lint/Pkgmgr.py b/src/lib/Bcfg2/Server/Lint/Pkgmgr.py
deleted file mode 100644
index ceb46238a..000000000
--- a/src/lib/Bcfg2/Server/Lint/Pkgmgr.py
+++ /dev/null
@@ -1,38 +0,0 @@
-import glob
-import lxml.etree
-import Bcfg2.Server.Lint
-
-class Pkgmgr(Bcfg2.Server.Lint.ServerlessPlugin):
- """ find duplicate Pkgmgr entries with the same priority """
- def Run(self):
- pset = set()
- for pfile in glob.glob("%s/Pkgmgr/*.xml" % self.config['repo']):
- if self.HandlesFile(pfile):
- xdata = lxml.etree.parse(pfile).getroot()
- # get priority, type, group
- priority = xdata.get('priority')
- ptype = xdata.get('type')
- for pkg in xdata.xpath("//Package"):
- if pkg.getparent().tag == 'Group':
- grp = pkg.getparent().get('name')
- if (type(grp) is not str and
- grp.getparent().tag == 'Group'):
- pgrp = grp.getparent().get('name')
- else:
- pgrp = 'none'
- else:
- grp = 'none'
- pgrp = 'none'
- ptuple = (pkg.get('name'), priority, ptype, grp, pgrp)
- # check if package is already listed with same
- # priority, type, grp
- if ptuple in pset:
- self.LintError("duplicate-package",
- "Duplicate Package %s, priority:%s, type:%s" %
- (pkg.get('name'), priority, ptype))
- else:
- pset.add(ptuple)
-
- @classmethod
- def Errors(cls):
- return {"duplicate-packages":"error"}
diff --git a/src/lib/Bcfg2/Server/Lint/RequiredAttrs.py b/src/lib/Bcfg2/Server/Lint/RequiredAttrs.py
index 6f76cf2db..fcb7c6c28 100644
--- a/src/lib/Bcfg2/Server/Lint/RequiredAttrs.py
+++ b/src/lib/Bcfg2/Server/Lint/RequiredAttrs.py
@@ -1,32 +1,105 @@
-import os.path
+import os
+import re
import lxml.etree
import Bcfg2.Server.Lint
+import Bcfg2.Client.Tools.POSIX
+import Bcfg2.Client.Tools.VCS
from Bcfg2.Server.Plugins.Packages import Apt, Yum
+try:
+ from Bcfg2.Server.Plugins.Bundler import BundleTemplateFile
+ has_genshi = True
+except ImportError:
+ has_genshi = False
+
+# format verifying functions
+def is_filename(val):
+ return val.startswith("/") and len(val) > 1
+
+def is_selinux_type(val):
+ return re.match(r'^[a-z_]+_t', val)
+
+def is_selinux_user(val):
+ return re.match(r'^[a-z_]+_u', val)
+
+def is_octal_mode(val):
+ return re.match(r'[0-7]{3,4}', val)
+
+def is_username(val):
+ return re.match(r'^([a-z]\w{0,30}|\d+)$', val)
+
+def is_device_mode(val):
+ try:
+ # checking upper bound seems like a good way to discover some
+ # obscure OS with >8-bit device numbers
+ return int(val) > 0
+ except:
+ return False
class RequiredAttrs(Bcfg2.Server.Lint.ServerPlugin):
""" verify attributes for configuration entries (as defined in
doc/server/configurationentries) """
def __init__(self, *args, **kwargs):
Bcfg2.Server.Lint.ServerPlugin.__init__(self, *args, **kwargs)
- self.required_attrs = {
- 'Path': {
- 'device': ['name', 'owner', 'group', 'dev_type'],
- 'directory': ['name', 'owner', 'group', 'perms'],
- 'file': ['name', 'owner', 'group', 'perms', '__text__'],
- 'hardlink': ['name', 'to'],
- 'symlink': ['name', 'to'],
- 'ignore': ['name'],
- 'nonexistent': ['name'],
- 'permissions': ['name', 'owner', 'group', 'perms'],
- 'vcs': ['vcstype', 'revision', 'sourceurl']},
- 'Service': {
- 'chkconfig': ['name'],
- 'deb': ['name'],
- 'rc-update': ['name'],
- 'smf': ['name', 'FMRI'],
- 'upstart': ['name']},
- 'Action': ['name', 'timing', 'when', 'status', 'command'],
- 'Package': ['name']}
+ self.required_attrs = dict(
+ Path=dict(
+ device=dict(name=is_filename, owner=is_username,
+ group=is_username,
+ dev_type=lambda v: \
+ v in Bcfg2.Client.Tools.POSIX.device_map),
+ directory=dict(name=is_filename, owner=is_username,
+ group=is_username, perms=is_octal_mode),
+ file=dict(name=is_filename, owner=is_username,
+ group=is_username, perms=is_octal_mode,
+ __text__=None),
+ hardlink=dict(name=is_filename, to=is_filename),
+ symlink=dict(name=is_filename, to=is_filename),
+ ignore=dict(name=is_filename),
+ nonexistent=dict(name=is_filename),
+ permissions=dict(name=is_filename, owner=is_username,
+ group=is_username, perms=is_octal_mode),
+ vcs=dict(vcstype=lambda v: (v != 'Path' and
+ hasattr(Bcfg2.Client.Tools.VCS,
+ "Install%s" % v)),
+ revision=None, sourceurl=None)),
+ Service={
+ "chkconfig": dict(name=None),
+ "deb": dict(name=None),
+ "rc-update": dict(name=None),
+ "smf": dict(name=None, FMRI=None),
+ "upstart": dict(name=None)},
+ Action={None: dict(name=None,
+ timing=lambda v: v in ['pre', 'post', 'both'],
+ when=lambda v: v in ['modified', 'always'],
+ status=lambda v: v in ['ignore', 'check'],
+ command=None)},
+ ACL=dict(
+ default=dict(scope=lambda v: v in ['user', 'group'],
+ perms=lambda v: re.match('^([0-7]|[rwx\-]{0,3}',
+ v)),
+ access=dict(scope=lambda v: v in ['user', 'group'],
+ perms=lambda v: re.match('^([0-7]|[rwx\-]{0,3}',
+ v)),
+ mask=dict(perms=lambda v: re.match('^([0-7]|[rwx\-]{0,3}', v))),
+ Package={None: dict(name=None)},
+ SELinux=dict(
+ boolean=dict(name=None,
+ value=lambda v: v in ['on', 'off']),
+ module=dict(name=None, __text__=None),
+ port=dict(name=lambda v: re.match(r'^\d+(-\d+)?/(tcp|udp)', v),
+ selinuxtype=is_selinux_type),
+ fcontext=dict(name=None, selinuxtype=is_selinux_type),
+ node=dict(name=lambda v: "/" in v,
+ selinuxtype=is_selinux_type,
+ proto=lambda v: v in ['ipv6', 'ipv4']),
+ login=dict(name=is_username,
+ selinuxuser=is_selinux_user),
+ user=dict(name=is_selinux_user,
+ roles=lambda v: all(is_selinux_user(u)
+ for u in " ".split(v)),
+ prefix=None),
+ interface=dict(name=None, selinuxtype=is_selinux_type),
+ permissive=dict(name=is_selinux_type))
+ )
def Run(self):
self.check_packages()
@@ -42,9 +115,9 @@ class RequiredAttrs(Bcfg2.Server.Lint.ServerPlugin):
return {"unknown-entry-type":"error",
"unknown-entry-tag":"error",
"required-attrs-missing":"error",
+ "required-attr-format":"error",
"extra-attrs":"warning"}
-
def check_packages(self):
""" check package sources for Source entries with missing attrs """
if 'Packages' in self.core.plugins:
@@ -85,13 +158,17 @@ class RequiredAttrs(Bcfg2.Server.Lint.ServerPlugin):
""" check bundles for BoundPath entries with missing attrs """
if 'Bundler' in self.core.plugins:
for bundle in self.core.plugins['Bundler'].entries.values():
- try:
- xdata = lxml.etree.XML(bundle.data)
- except (lxml.etree.XMLSyntaxError, AttributeError):
- xdata = lxml.etree.parse(bundle.template.filepath).getroot()
+ if (self.HandlesFile(bundle.name) and
+ (not has_genshi or
+ not isinstance(bundle, BundleTemplateFile))):
+ try:
+ xdata = lxml.etree.XML(bundle.data)
+ except (lxml.etree.XMLSyntaxError, AttributeError):
+ xdata = \
+ lxml.etree.parse(bundle.template.filepath).getroot()
- for path in xdata.xpath("//*[substring(name(), 1, 5) = 'Bound']"):
- self.check_entry(path, bundle.name)
+ for path in xdata.xpath("//*[substring(name(), 1, 5) = 'Bound']"):
+ self.check_entry(path, bundle.name)
def check_entry(self, entry, filename):
""" generic entry check """
@@ -103,43 +180,55 @@ class RequiredAttrs(Bcfg2.Server.Lint.ServerPlugin):
if tag not in self.required_attrs:
self.LintError("unknown-entry-tag",
"Unknown entry tag '%s': %s" %
- (entry.tag, self.RenderXML(entry)))
+ (tag, self.RenderXML(entry)))
if isinstance(self.required_attrs[tag], dict):
etype = entry.get('type')
if etype in self.required_attrs[tag]:
- required_attrs = set(self.required_attrs[tag][etype] +
- ['type'])
+ required_attrs = self.required_attrs[tag][etype]
else:
self.LintError("unknown-entry-type",
"Unknown %s type %s: %s" %
(tag, etype, self.RenderXML(entry)))
return
else:
- required_attrs = set(self.required_attrs[tag])
+ required_attrs = self.required_attrs[tag]
attrs = set(entry.attrib.keys())
if 'dev_type' in required_attrs:
dev_type = entry.get('dev_type')
if dev_type in ['block', 'char']:
# check if major/minor are specified
- required_attrs |= set(['major', 'minor'])
+ required_attrs['major'] = is_device_mode
+ required_attrs['minor'] = is_device_mode
+
+ if tag == 'ACL' and 'scope' in required_attrs:
+ required_attrs[entry.get('scope')] = is_username
if '__text__' in required_attrs:
- required_attrs.remove('__text__')
+ del required_attrs['__text__']
if (not entry.text and
not entry.get('empty', 'false').lower() == 'true'):
self.LintError("required-attrs-missing",
"Text missing for %s %s in %s: %s" %
- (entry.tag, name, filename,
+ (tag, name, filename,
self.RenderXML(entry)))
- if not attrs.issuperset(required_attrs):
+ if not attrs.issuperset(required_attrs.keys()):
self.LintError("required-attrs-missing",
"The following required attribute(s) are "
"missing for %s %s in %s: %s\n%s" %
- (entry.tag, name, filename,
+ (tag, name, filename,
", ".join([attr
for attr in
- required_attrs.difference(attrs)]),
+ set(required_attrs.keys()).difference(attrs)]),
self.RenderXML(entry)))
+
+ for attr, fmt in required_attrs.items():
+ if fmt and attr in attrs and not fmt(entry.attrib[attr]):
+ self.LintError("required-attr-format",
+ "The %s attribute of %s %s in %s is "
+ "malformed\n%s" %
+ (attr, tag, name, filename,
+ self.RenderXML(entry)))
+
diff --git a/src/lib/Bcfg2/Server/Lint/TemplateHelper.py b/src/lib/Bcfg2/Server/Lint/TemplateHelper.py
deleted file mode 100644
index be270a59c..000000000
--- a/src/lib/Bcfg2/Server/Lint/TemplateHelper.py
+++ /dev/null
@@ -1,64 +0,0 @@
-import sys
-import imp
-import glob
-import Bcfg2.Server.Lint
-from Bcfg2.Server.Plugins.TemplateHelper import HelperModule
-
-class TemplateHelper(Bcfg2.Server.Lint.ServerlessPlugin):
- """ find duplicate Pkgmgr entries with the same priority """
- def __init__(self, *args, **kwargs):
- Bcfg2.Server.Lint.ServerlessPlugin.__init__(self, *args, **kwargs)
- hm = HelperModule("foo.py", None, None)
- self.reserved_keywords = dir(hm)
-
- def Run(self):
- for helper in glob.glob("%s/TemplateHelper/*.py" % self.config['repo']):
- if not self.HandlesFile(helper):
- continue
-
- match = HelperModule._module_name_re.search(helper)
- if match:
- module_name = match.group(1)
- else:
- module_name = helper
-
- try:
- module = imp.load_source(module_name, helper)
- except:
- err = sys.exc_info()[1]
- self.LintError("templatehelper-import-error",
- "Failed to import %s: %s" %
- (helper, err))
- continue
-
- if not hasattr(module, "__export__"):
- self.LintError("templatehelper-no-export",
- "%s has no __export__ list" % helper)
- continue
- elif not isinstance(module.__export__, list):
- self.LintError("templatehelper-nonlist-export",
- "__export__ is not a list in %s" % helper)
- continue
-
- for sym in module.__export__:
- if not hasattr(module, sym):
- self.LintError("templatehelper-nonexistent-export",
- "%s: exported symbol %s does not exist" %
- (helper, sym))
- elif sym in self.reserved_keywords:
- self.LintError("templatehelper-reserved-export",
- "%s: exported symbol %s is reserved" %
- (helper, sym))
- elif sym.startswith("_"):
- self.LintError("templatehelper-underscore-export",
- "%s: exported symbol %s starts with underscore" %
- (helper, sym))
-
- @classmethod
- def Errors(cls):
- return {"templatehelper-import-error":"error",
- "templatehelper-no-export":"error",
- "templatehelper-nonlist-export":"error",
- "templatehelper-nonexistent-export":"error",
- "templatehelper-reserved-export":"error",
- "templatehelper-underscore-export":"warning"}
diff --git a/src/lib/Bcfg2/Server/Lint/Validate.py b/src/lib/Bcfg2/Server/Lint/Validate.py
index 05fedc313..b8bdb4755 100644
--- a/src/lib/Bcfg2/Server/Lint/Validate.py
+++ b/src/lib/Bcfg2/Server/Lint/Validate.py
@@ -1,10 +1,10 @@
-import fnmatch
+import os
+import sys
import glob
+import fnmatch
import lxml.etree
-import os
from subprocess import Popen, PIPE, STDOUT
-import sys
-
+from Bcfg2.Server import XI, XI_NAMESPACE
import Bcfg2.Server.Lint
class Validate(Bcfg2.Server.Lint.ServerlessPlugin):
@@ -22,7 +22,6 @@ class Validate(Bcfg2.Server.Lint.ServerlessPlugin):
"%s/Rules/*.xml":"%s/rules.xsd",
"%s/Defaults/*.xml":"%s/defaults.xsd",
"%s/etc/report-configuration.xml":"%s/report-configuration.xsd",
- "%s/Svcmgr/*.xml":"%s/services.xsd",
"%s/Deps/*.xml":"%s/deps.xsd",
"%s/Decisions/*.xml":"%s/decisions.xsd",
"%s/Packages/sources.xml":"%s/packages.xsd",
@@ -46,20 +45,10 @@ class Validate(Bcfg2.Server.Lint.ServerlessPlugin):
if filelist:
# avoid loading schemas for empty file lists
schemafile = schemaname % schemadir
- try:
- schema = lxml.etree.XMLSchema(lxml.etree.parse(schemafile))
- except IOError:
- e = sys.exc_info()[1]
- self.LintError("input-output-error", str(e))
- continue
- except lxml.etree.XMLSchemaParseError:
- e = sys.exc_info()[1]
- self.LintError("schema-failed-to-parse",
- "Failed to process schema %s: %s" %
- (schemafile, e))
- continue
- for filename in filelist:
- self.validate(filename, schemafile, schema=schema)
+ schema = self._load_schema(schemafile)
+ if schema:
+ for filename in filelist:
+ self.validate(filename, schemafile, schema=schema)
self.check_properties()
@@ -88,11 +77,8 @@ class Validate(Bcfg2.Server.Lint.ServerlessPlugin):
return True on success, False on failure """
if schema is None:
# if no schema object was provided, instantiate one
- try:
- schema = lxml.etree.XMLSchema(lxml.etree.parse(schemafile))
- except:
- self.LintError("schema-failed-to-parse",
- "Failed to process schema %s" % schemafile)
+ schema = self._load_schema(schemafile)
+ if not schema:
return False
try:
@@ -187,24 +173,42 @@ class Validate(Bcfg2.Server.Lint.ServerlessPlugin):
def follow_xinclude(self, xfile):
""" follow xincludes in the given file """
xdata = lxml.etree.parse(xfile)
- included = set([ent.get('href') for ent in
- xdata.findall('./{http://www.w3.org/2001/XInclude}include')])
+ included = set([el
+ for el in xdata.findall('./%sinclude' % XI_NAMESPACE)])
rv = []
while included:
try:
- filename = included.pop()
+ el = included.pop()
except KeyError:
continue
+ filename = el.get("href")
path = os.path.join(os.path.dirname(xfile), filename)
- if self.HandlesFile(path):
+ if not os.path.exists(path):
+ if not el.findall('./%sfallback' % XI_NAMESPACE):
+ self.LintError("broken-xinclude-chain",
+ "XInclude %s does not exist in %s: %s" %
+ (filename, xfile, self.RenderXML(el)))
+ elif self.HandlesFile(path):
rv.append(path)
groupdata = lxml.etree.parse(path)
[included.add(el.get('href'))
for el in
- groupdata.findall('./{http://www.w3.org/2001/XInclude}include')]
+ groupdata.findall('./%sinclude' % XI_NAMESPACE)]
included.discard(filename)
return rv
+ def _load_schema(self, filename):
+ try:
+ return lxml.etree.XMLSchema(lxml.etree.parse(filename))
+ except IOError:
+ e = sys.exc_info()[1]
+ self.LintError("input-output-error", str(e))
+ except lxml.etree.XMLSchemaParseError:
+ e = sys.exc_info()[1]
+ self.LintError("schema-failed-to-parse",
+ "Failed to process schema %s: %s" %
+ (filename, e))
+ return None
diff --git a/src/lib/Bcfg2/Server/Lint/__init__.py b/src/lib/Bcfg2/Server/Lint/__init__.py
index 5d7dd707b..e3b4c8ea7 100644
--- a/src/lib/Bcfg2/Server/Lint/__init__.py
+++ b/src/lib/Bcfg2/Server/Lint/__init__.py
@@ -81,18 +81,20 @@ class Plugin (object):
def LintError(self, err, msg):
self.errorhandler.dispatch(err, msg)
- def RenderXML(self, element):
+ def RenderXML(self, element, keep_text=False):
"""render an XML element for error output -- line number
prefixed, no children"""
xml = None
if len(element) or element.text:
el = copy(element)
- if el.text:
+ if el.text and not keep_text:
el.text = '...'
[el.remove(c) for c in el.iterchildren()]
- xml = lxml.etree.tostring(el).strip()
+ xml = lxml.etree.tostring(el,
+ xml_declaration=False).decode("UTF-8").strip()
else:
- xml = lxml.etree.tostring(element).strip()
+ xml = lxml.etree.tostring(element,
+ xml_declaration=False).decode("UTF-8").strip()
return " line %s: %s" % (element.sourceline, xml)
diff --git a/src/lib/Bcfg2/Server/Plugin.py b/src/lib/Bcfg2/Server/Plugin.py
index ca37431a2..910bc0108 100644
--- a/src/lib/Bcfg2/Server/Plugin.py
+++ b/src/lib/Bcfg2/Server/Plugin.py
@@ -1,54 +1,60 @@
"""This module provides the baseclass for Bcfg2 Server Plugins."""
-import copy
-import logging
-import lxml.etree
import os
-import pickle
-import posixpath
import re
import sys
+import copy
+import logging
+import operator
import threading
-from Bcfg2.Bcfg2Py3k import ConfigParser
-
-from lxml.etree import XML, XMLSyntaxError
-
+import lxml.etree
+import Bcfg2.Server
import Bcfg2.Options
+from Bcfg2.Bcfg2Py3k import ConfigParser, CmpMixin, reduce, Queue, Empty, \
+ Full, cPickle
-# py3k compatibility
-if sys.hexversion >= 0x03000000:
- from functools import reduce
- from io import FileIO as BUILTIN_FILE_TYPE
-else:
- BUILTIN_FILE_TYPE = file
-from Bcfg2.Bcfg2Py3k import Queue
-from Bcfg2.Bcfg2Py3k import Empty
-from Bcfg2.Bcfg2Py3k import Full
+try:
+ import django
+ has_django = True
+except ImportError:
+ has_django = False
# grab default metadata info from bcfg2.conf
opts = {'owner': Bcfg2.Options.MDATA_OWNER,
'group': Bcfg2.Options.MDATA_GROUP,
- 'important': Bcfg2.Options.MDATA_IMPORTANT,
'perms': Bcfg2.Options.MDATA_PERMS,
+ 'secontext': Bcfg2.Options.MDATA_SECONTEXT,
+ 'important': Bcfg2.Options.MDATA_IMPORTANT,
'paranoid': Bcfg2.Options.MDATA_PARANOID,
'sensitive': Bcfg2.Options.MDATA_SENSITIVE}
-mdata_setup = Bcfg2.Options.OptionParser(opts)
-mdata_setup.parse([])
-del mdata_setup['args']
+default_file_metadata = Bcfg2.Options.OptionParser(opts)
+default_file_metadata.parse([])
+del default_file_metadata['args']
logger = logging.getLogger('Bcfg2.Server.Plugin')
-default_file_metadata = mdata_setup
-
-info_regex = re.compile( \
- 'encoding:(\s)*(?P<encoding>\w+)|' +
- 'group:(\s)*(?P<group>\S+)|' +
- 'important:(\s)*(?P<important>\S+)|' +
- 'mtime:(\s)*(?P<mtime>\w+)|' +
- 'owner:(\s)*(?P<owner>\S+)|' +
- 'paranoid:(\s)*(?P<paranoid>\S+)|' +
- 'perms:(\s)*(?P<perms>\w+)|' +
- 'sensitive:(\s)*(?P<sensitive>\S+)|')
+info_regex = re.compile('owner:(\s)*(?P<owner>\S+)|' +
+ 'group:(\s)*(?P<group>\S+)|' +
+ 'perms:(\s)*(?P<perms>\w+)|' +
+ 'secontext:(\s)*(?P<secontext>\S+)|' +
+ 'paranoid:(\s)*(?P<paranoid>\S+)|' +
+ 'sensitive:(\s)*(?P<sensitive>\S+)|' +
+ 'encoding:(\s)*(?P<encoding>\S+)|' +
+ 'important:(\s)*(?P<important>\S+)|' +
+ 'mtime:(\s)*(?P<mtime>\w+)|')
+
+def bind_info(entry, metadata, infoxml=None, default=default_file_metadata):
+ for attr, val in list(default.items()):
+ entry.set(attr, val)
+ if infoxml:
+ mdata = dict()
+ infoxml.pnode.Match(metadata, mdata, entry=entry)
+ if 'Info' not in mdata:
+ msg = "Failed to set metadata for file %s" % entry.get('name')
+ logger.error(msg)
+ raise PluginExecutionError(msg)
+ for attr, val in list(mdata['Info'][None].items()):
+ entry.set(attr, val)
class PluginInitError(Exception):
@@ -61,6 +67,18 @@ class PluginExecutionError(Exception):
pass
+class MetadataConsistencyError(Exception):
+ """This error gets raised when metadata is internally inconsistent."""
+ pass
+
+
+class MetadataRuntimeError(Exception):
+ """This error is raised when the metadata engine
+ is called prior to reading enough data.
+ """
+ pass
+
+
class Debuggable(object):
__rmi__ = ['toggle_debug']
@@ -73,6 +91,10 @@ class Debuggable(object):
def toggle_debug(self):
self.debug_flag = not self.debug_flag
+ self.debug_log("%s: debug_flag = %s" % (self.__class__.__name__,
+ self.debug_flag),
+ flag=True)
+ return self.debug_flag
def debug_log(self, message, flag=None):
if (flag is None and self.debug_flag) or flag:
@@ -116,8 +138,7 @@ class Plugin(Debuggable):
@classmethod
def init_repo(cls, repo):
- path = "%s/%s" % (repo, cls.name)
- os.makedirs(path)
+ os.makedirs(os.path.join(repo, cls.name))
def shutdown(self):
self.running = False
@@ -126,6 +147,26 @@ class Plugin(Debuggable):
return "%s Plugin" % self.__class__.__name__
+class DatabaseBacked(Plugin):
+ @property
+ def _use_db(self):
+ use_db = self.core.setup.cfp.getboolean(self.name.lower(),
+ "use_database",
+ default=False)
+ if use_db and has_django and self.core.database_available:
+ return True
+ elif not use_db:
+ return False
+ else:
+ self.logger.error("use_database is true but django not found")
+ return False
+
+
+class PluginDatabaseModel(object):
+ class Meta:
+ app_label = "Server"
+
+
class Generator(object):
"""Generator plugins contribute to literal client configurations."""
def HandlesEntry(self, entry, metadata):
@@ -134,19 +175,19 @@ class Generator(object):
def HandleEntry(self, entry, metadata):
"""This is the slow-path handler for configuration entry binding."""
- raise PluginExecutionError
+ return entry
class Structure(object):
"""Structure Plugins contribute to abstract client configurations."""
def BuildStructures(self, metadata):
"""Return a list of abstract goal structures for client."""
- raise PluginExecutionError
+ raise NotImplementedError
class Metadata(object):
"""Signal metadata capabilities for this plugin"""
- def add_client(self, client_name, attribs):
+ def add_client(self, client_name):
"""Add client."""
pass
@@ -158,11 +199,17 @@ class Metadata(object):
"""Create viz str for viz admin mode."""
pass
+ def _handle_default_event(self, event):
+ pass
+
def get_initial_metadata(self, client_name):
- raise PluginExecutionError
+ raise NotImplementedError
- def merge_additional_data(self, imd, source, groups, data):
- raise PluginExecutionError
+ def merge_additional_data(self, imd, source, data):
+ raise NotImplementedError
+
+ def merge_additional_groups(self, imd, groups):
+ raise NotImplementedError
class Connector(object):
@@ -187,23 +234,23 @@ class Probing(object):
pass
-class Statistics(object):
+class Statistics(Plugin):
"""Signal statistics handling capability."""
def process_statistics(self, client, xdata):
pass
-class ThreadedStatistics(Statistics,
- threading.Thread):
+class ThreadedStatistics(Statistics, threading.Thread):
"""Threaded statistics handling capability."""
def __init__(self, core, datastore):
- Statistics.__init__(self)
+ Statistics.__init__(self, core, datastore)
threading.Thread.__init__(self)
# Event from the core signaling an exit
self.terminate = core.terminate
self.work_queue = Queue(100000)
- self.pending_file = "%s/etc/%s.pending" % (datastore, self.__class__.__name__)
- self.daemon = True
+ self.pending_file = os.path.join(datastore, "etc",
+ "%s.pending" % self.name)
+ self.daemon = False
self.start()
def save(self):
@@ -213,32 +260,38 @@ class ThreadedStatistics(Statistics,
while not self.work_queue.empty():
(metadata, data) = self.work_queue.get_nowait()
try:
- pending_data.append((metadata.hostname, lxml.etree.tostring(data)))
+ pending_data.append((metadata.hostname,
+ lxml.etree.tostring(data,
+ xml_declaration=False).decode("UTF-8")))
except:
- self.logger.warning("Dropping interaction for %s" % metadata.hostname)
+ err = sys.exc_info()[1]
+ self.logger.warning("Dropping interaction for %s: %s" %
+ (metadata.hostname, err))
except Empty:
pass
try:
savefile = open(self.pending_file, 'w')
- pickle.dump(pending_data, savefile)
+ cPickle.dump(pending_data, savefile)
savefile.close()
- self.logger.info("Saved pending %s data" % self.__class__.__name__)
+ self.logger.info("Saved pending %s data" % self.name)
except:
- self.logger.warning("Failed to save pending data")
+ err = sys.exc_info()[1]
+ self.logger.warning("Failed to save pending data: %s" % err)
def load(self):
- """Load any pending data to a file."""
+ """Load any pending data from a file."""
if not os.path.exists(self.pending_file):
return True
pending_data = []
try:
savefile = open(self.pending_file, 'r')
- pending_data = pickle.load(savefile)
+ pending_data = cPickle.load(savefile)
savefile.close()
except Exception:
e = sys.exc_info()[1]
self.logger.warning("Failed to load pending data: %s" % e)
+ return False
for (pmetadata, pdata) in pending_data:
# check that shutdown wasnt called early
if self.terminate.isSet():
@@ -249,56 +302,58 @@ class ThreadedStatistics(Statistics,
try:
metadata = self.core.build_metadata(pmetadata)
break
- except Bcfg2.Server.Plugins.Metadata.MetadataRuntimeError:
+ except MetadataRuntimeError:
pass
self.terminate.wait(5)
if self.terminate.isSet():
return False
- self.work_queue.put_nowait((metadata, lxml.etree.fromstring(pdata)))
+ self.work_queue.put_nowait((metadata,
+ lxml.etree.XML(pdata,
+ parser=Bcfg2.Server.XMLParser)))
except Full:
self.logger.warning("Queue.Full: Failed to load queue data")
break
except lxml.etree.LxmlError:
lxml_error = sys.exc_info()[1]
- self.logger.error("Unable to load save interaction: %s" % lxml_error)
- except Bcfg2.Server.Plugins.Metadata.MetadataConsistencyError:
- self.logger.error("Unable to load metadata for save interaction: %s" % pmetadata)
+ self.logger.error("Unable to load saved interaction: %s" %
+ lxml_error)
+ except MetadataConsistencyError:
+ self.logger.error("Unable to load metadata for save "
+ "interaction: %s" % pmetadata)
try:
os.unlink(self.pending_file)
except:
- self.logger.error("Failed to unlink save file: %s" % self.pending_file)
- self.logger.info("Loaded pending %s data" % self.__class__.__name__)
+ self.logger.error("Failed to unlink save file: %s" %
+ self.pending_file)
+ self.logger.info("Loaded pending %s data" % self.name)
return True
def run(self):
if not self.load():
return
- while not self.terminate.isSet():
+ while not self.terminate.isSet() and self.work_queue != None:
try:
- (xdata, client) = self.work_queue.get(block=True, timeout=2)
+ (client, xdata) = self.work_queue.get(block=True, timeout=2)
except Empty:
continue
except Exception:
e = sys.exc_info()[1]
self.logger.error("ThreadedStatistics: %s" % e)
continue
- self.handle_statistic(xdata, client)
- if not self.work_queue.empty():
+ self.handle_statistic(client, xdata)
+ if self.work_queue != None and not self.work_queue.empty():
self.save()
def process_statistics(self, metadata, data):
- warned = False
try:
self.work_queue.put_nowait((metadata, copy.copy(data)))
- warned = False
except Full:
- if not warned:
- self.logger.warning("%s: Queue is full. Dropping interactions." % self.__class__.__name__)
- warned = True
+ self.logger.warning("%s: Queue is full. Dropping interactions." %
+ self.name)
- def handle_statistics(self, metadata, data):
+ def handle_statistic(self, metadata, data):
"""Handle stats here."""
pass
@@ -308,17 +363,17 @@ class PullSource(object):
return []
def GetCurrentEntry(self, client, e_type, e_name):
- raise PluginExecutionError
+ raise NotImplementedError
class PullTarget(object):
def AcceptChoices(self, entry, metadata):
- raise PluginExecutionError
+ raise NotImplementedError
def AcceptPullData(self, specific, new_entry, verbose):
"""This is the null per-plugin implementation
of bcfg2-admin pull."""
- raise PluginExecutionError
+ raise NotImplementedError
class Decision(object):
@@ -334,13 +389,13 @@ class ValidationError(Exception):
class StructureValidator(object):
"""Validate/modify goal structures."""
def validate_structures(self, metadata, structures):
- raise ValidationError("not implemented")
+ raise NotImplementedError
class GoalValidator(object):
"""Validate/modify configuration goals."""
def validate_goals(self, metadata, goals):
- raise ValidationError("not implemented")
+ raise NotImplementedError
class Version(object):
@@ -352,6 +407,17 @@ class Version(object):
pass
+class ClientRunHooks(object):
+ """ Provides hooks to interact with client runs """
+ def start_client_run(self, metadata):
+ pass
+
+ def end_client_run(self, metadata):
+ pass
+
+ def end_statistics(self, metadata):
+ pass
+
# the rest of the file contains classes for coherent file caching
class FileBacked(object):
@@ -361,17 +427,18 @@ class FileBacked(object):
This object is meant to be used as a part of DirectoryBacked.
"""
- def __init__(self, name):
+ def __init__(self, name, fam=None):
object.__init__(self)
self.data = ''
self.name = name
+ self.fam = fam
def HandleEvent(self, event=None):
"""Read file upon update."""
if event and event.code2str() not in ['exists', 'changed', 'created']:
return
try:
- self.data = BUILTIN_FILE_TYPE(self.name).read()
+ self.data = open(self.name).read()
self.Index()
except IOError:
err = sys.exc_info()[1]
@@ -382,16 +449,14 @@ class FileBacked(object):
pass
def __repr__(self):
- return "%s: %s" % (self.__class__.__name__, str(self))
-
- def __str__(self):
- return "%s: %s" % (self.name, self.data)
+ return "%s: %s" % (self.__class__.__name__, self.name)
class DirectoryBacked(object):
"""This object is a coherent cache for a filesystem hierarchy of files."""
__child__ = FileBacked
patterns = re.compile('.*')
+ ignore = None
def __init__(self, data, fam):
"""Initialize the DirectoryBacked object.
@@ -438,8 +503,8 @@ class DirectoryBacked(object):
"""
dirpathname = os.path.join(self.data, relative)
if relative not in self.handles.values():
- if not posixpath.isdir(dirpathname):
- logger.error("Failed to open directory %s" % (dirpathname))
+ if not os.path.isdir(dirpathname):
+ logger.error("%s is not a directory" % dirpathname)
return
reqid = self.fam.AddMonitor(dirpathname, self)
self.handles[reqid] = relative
@@ -453,7 +518,8 @@ class DirectoryBacked(object):
added.
"""
self.entries[relative] = self.__child__(os.path.join(self.data,
- relative))
+ relative),
+ self.fam)
self.entries[relative].HandleEvent(event)
def HandleEvent(self, event):
@@ -470,27 +536,33 @@ class DirectoryBacked(object):
"""
action = event.code2str()
- # Clean up the absolute path names passed in
- event.filename = os.path.normpath(event.filename)
- if event.filename.startswith(self.data):
- event.filename = event.filename[len(self.data)+1:]
-
# Exclude events for actions we don't care about
if action == 'endExist':
return
if event.requestID not in self.handles:
- logger.warn("Got %s event with unknown handle (%s) for %s"
- % (action, event.requestID, abspath))
+ logger.warn("Got %s event with unknown handle (%s) for %s" %
+ (action, event.requestID, event.filename))
+ return
+
+ # Clean up path names
+ event.filename = os.path.normpath(event.filename)
+ if event.filename.startswith(self.data):
+ # the first event we get is on the data directory itself
+ event.filename = event.filename[len(self.data) + 1:]
+
+ if self.ignore and self.ignore.search(event.filename):
+ logger.debug("Ignoring event %s" % event.filename)
return
# Calculate the absolute and relative paths this event refers to
abspath = os.path.join(self.data, self.handles[event.requestID],
event.filename)
- relpath = os.path.join(self.handles[event.requestID], event.filename)
+ relpath = os.path.join(self.handles[event.requestID],
+ event.filename).lstrip('/')
if action == 'deleted':
- for key in self.entries.keys():
+ for key in list(self.entries.keys()):
if key.startswith(relpath):
del self.entries[key]
# We remove values from self.entries, but not
@@ -498,7 +570,7 @@ class DirectoryBacked(object):
# watching a directory just because it gets deleted. If it
# is recreated, we will start getting notifications for it
# again without having to add a new monitor.
- elif posixpath.isdir(abspath):
+ elif os.path.isdir(abspath):
# Deal with events for directories
if action in ['exists', 'created']:
self.add_directory_monitor(relpath)
@@ -522,21 +594,13 @@ class DirectoryBacked(object):
# didn't know about. Go ahead and treat it like a
# "created" event, but log a warning, because this
# is unexpected.
- logger.warn("Got %s event for unexpected dir %s" % (action,
- abspath))
+ logger.warn("Got %s event for unexpected dir %s" %
+ (action, abspath))
self.add_directory_monitor(relpath)
else:
- logger.warn("Got unknown dir event %s %s %s" % (event.requestID,
- event.code2str(),
- abspath))
- else:
- # Deal with events for non-directories
- if ((event.filename[-1] == '~') or
- (event.filename[:2] == '.#') or
- (event.filename[-4:] == '.swp') or
- (event.filename in ['SCCS', '.svn', '4913']) or
- (not self.patterns.match(event.filename))):
- return
+ logger.warn("Got unknown dir event %s %s %s" %
+ (event.requestID, event.code2str(), abspath))
+ elif self.patterns.search(event.filename):
if action in ['exists', 'created']:
self.add_entry(relpath, event)
elif action == 'changed':
@@ -547,13 +611,16 @@ class DirectoryBacked(object):
# know about. Go ahead and treat it like a
# "created" event, but log a warning, because this
# is unexpected.
- logger.warn("Got %s event for unexpected file %s" % (action,
- abspath))
+ logger.warn("Got %s event for unexpected file %s" %
+ (action,
+ abspath))
self.add_entry(relpath, event)
else:
- logger.warn("Got unknown file event %s %s %s" % (event.requestID,
- event.code2str(),
- abspath))
+ logger.warn("Got unknown file event %s %s %s" %
+ (event.requestID, event.code2str(), abspath))
+ else:
+ logger.warn("Could not process filename %s; ignoring" %
+ event.filename)
class XMLFileBacked(FileBacked):
@@ -563,68 +630,55 @@ class XMLFileBacked(FileBacked):
"""
__identifier__ = 'name'
- def __init__(self, filename):
- self.label = "dummy"
- self.entries = []
+ def __init__(self, filename, fam=None, should_monitor=False):
FileBacked.__init__(self, filename)
-
- def Index(self):
- """Build local data structures."""
- try:
- self.xdata = XML(self.data)
- except XMLSyntaxError:
- logger.error("Failed to parse %s" % (self.name))
- return
- self.entries = self.xdata.getchildren()
- if self.__identifier__ is not None:
- self.label = self.xdata.attrib[self.__identifier__]
-
- def __iter__(self):
- return iter(self.entries)
-
- def __str__(self):
- return "%s: %s" % (self.name, lxml.etree.tostring(self.xdata))
-
-
-class SingleXMLFileBacked(XMLFileBacked):
- """This object is a coherent cache for an independent XML file."""
- def __init__(self, filename, fam):
- XMLFileBacked.__init__(self, filename)
+ self.label = ""
+ self.entries = []
self.extras = []
self.fam = fam
- self.fam.AddMonitor(filename, self)
+ self.should_monitor = should_monitor
+ if fam and should_monitor:
+ self.fam.AddMonitor(filename, self)
def _follow_xincludes(self, fname=None, xdata=None):
- ''' follow xincludes, adding included files to fam and to
- self.extras '''
+ ''' follow xincludes, adding included files to self.extras '''
if xdata is None:
if fname is None:
xdata = self.xdata.getroottree()
else:
xdata = lxml.etree.parse(fname)
- included = [ent.get('href')
- for ent in xdata.findall('//{http://www.w3.org/2001/XInclude}include')]
- for name in included:
- if name not in self.extras:
- if name.startswith("/"):
- fpath = name
+ included = [el for el in xdata.findall('//%sinclude' %
+ Bcfg2.Server.XI_NAMESPACE)]
+ for el in included:
+ name = el.get("href")
+ if name.startswith("/"):
+ fpath = name
+ else:
+ if fname:
+ rel = fname
else:
- fpath = os.path.join(os.path.dirname(self.name), name)
- self.add_monitor(fpath, name)
- self._follow_xincludes(fname=fpath)
-
- def add_monitor(self, fpath, fname):
- self.fam.AddMonitor(fpath, self)
- self.extras.append(fname)
+ rel = self.name
+ fpath = os.path.join(os.path.dirname(rel), name)
+ if fpath not in self.extras:
+ if os.path.exists(fpath):
+ self._follow_xincludes(fname=fpath)
+ self.add_monitor(fpath)
+ else:
+ msg = "%s: %s does not exist, skipping" % (self.name, name)
+ if el.findall('./%sfallback' % Bcfg2.Server.XI_NAMESPACE):
+ self.logger.debug(msg)
+ else:
+ self.logger.warning(msg)
def Index(self):
"""Build local data structures."""
try:
- self.xdata = lxml.etree.XML(self.data, base_url=self.name)
+ self.xdata = lxml.etree.XML(self.data, base_url=self.name,
+ parser=Bcfg2.Server.XMLParser)
except lxml.etree.XMLSyntaxError:
- err = sys.exc_info()[1]
- logger.error("Failed to parse %s: %s" % (self.name, err))
- raise Bcfg2.Server.Plugin.PluginInitError
+ msg = "Failed to parse %s: %s" % (self.name, sys.exc_info()[1])
+ logger.error(msg)
+ raise PluginInitError(msg)
self._follow_xincludes()
if self.extras:
@@ -638,43 +692,52 @@ class SingleXMLFileBacked(XMLFileBacked):
if self.__identifier__ is not None:
self.label = self.xdata.attrib[self.__identifier__]
+ def add_monitor(self, fpath):
+ self.extras.append(fpath)
+ if self.fam and self.should_monitor:
+ self.fam.AddMonitor(fpath, self)
+
+ def __iter__(self):
+ return iter(self.entries)
+
+ def __str__(self):
+ return "%s at %s" % (self.__class__.__name__, self.name)
+
class StructFile(XMLFileBacked):
"""This file contains a set of structure file formatting logic."""
__identifier__ = None
- def __init__(self, name):
- XMLFileBacked.__init__(self, name)
+ def _include_element(self, item, metadata):
+ """ determine if an XML element matches the metadata """
+ if isinstance(item, lxml.etree._Comment):
+ return False
+ negate = item.get('negate', 'false').lower() == 'true'
+ if item.tag == 'Group':
+ return negate == (item.get('name') not in metadata.groups)
+ elif item.tag == 'Client':
+ return negate == (item.get('name') != metadata.hostname)
+ else:
+ return True
def _match(self, item, metadata):
""" recursive helper for Match() """
- if isinstance(item, lxml.etree._Comment):
- return []
- elif item.tag == 'Group':
- rv = []
- if ((item.get('negate', 'false').lower() == 'true' and
- item.get('name') not in metadata.groups) or
- (item.get('negate', 'false').lower() == 'false' and
- item.get('name') in metadata.groups)):
- for child in item.iterchildren():
- rv.extend(self._match(child, metadata))
- return rv
- elif item.tag == 'Client':
- rv = []
- if ((item.get('negate', 'false').lower() == 'true' and
- item.get('name') != metadata.hostname) or
- (item.get('negate', 'false').lower() == 'false' and
- item.get('name') == metadata.hostname)):
+ if self._include_element(item, metadata):
+ if item.tag == 'Group' or item.tag == 'Client':
+ rv = []
+ if self._include_element(item, metadata):
+ for child in item.iterchildren():
+ rv.extend(self._match(child, metadata))
+ return rv
+ else:
+ rv = copy.deepcopy(item)
+ for child in rv.iterchildren():
+ rv.remove(child)
for child in item.iterchildren():
rv.extend(self._match(child, metadata))
- return rv
+ return [rv]
else:
- rv = copy.copy(item)
- for child in rv.iterchildren():
- rv.remove(child)
- for child in item.iterchildren():
- rv.extend(self._match(child, metadata))
- return [rv]
+ return []
def Match(self, metadata):
"""Return matching fragments of independent."""
@@ -683,27 +746,52 @@ class StructFile(XMLFileBacked):
rv.extend(self._match(child, metadata))
return rv
+ def _xml_match(self, item, metadata):
+ """ recursive helper for XMLMatch """
+ if self._include_element(item, metadata):
+ if item.tag == 'Group' or item.tag == 'Client':
+ for child in item.iterchildren():
+ item.remove(child)
+ item.getparent().append(child)
+ self._xml_match(child, metadata)
+ item.getparent().remove(item)
+ else:
+ for child in item.iterchildren():
+ self._xml_match(child, metadata)
+ else:
+ item.getparent().remove(item)
+
+ def XMLMatch(self, metadata):
+ """ Return a rebuilt XML document that only contains the
+ matching portions """
+ rv = copy.deepcopy(self.xdata)
+ for child in rv.iterchildren():
+ self._xml_match(child, metadata)
+ return rv
+
-class INode:
+class INode(object):
"""
LNodes provide lists of things available at a particular
group intersection.
"""
- raw = {'Client': "lambda m, e:'%(name)s' == m.hostname and predicate(m, e)",
- 'Group': "lambda m, e:'%(name)s' in m.groups and predicate(m, e)"}
- nraw = {'Client': "lambda m, e:'%(name)s' != m.hostname and predicate(m, e)",
- 'Group': "lambda m, e:'%(name)s' not in m.groups and predicate(m, e)"}
+ raw = dict(
+ Client="lambda m, e:'%(name)s' == m.hostname and predicate(m, e)",
+ Group="lambda m, e:'%(name)s' in m.groups and predicate(m, e)")
+ nraw = dict(
+ Client="lambda m, e:'%(name)s' != m.hostname and predicate(m, e)",
+ Group="lambda m, e:'%(name)s' not in m.groups and predicate(m, e)")
containers = ['Group', 'Client']
ignore = []
def __init__(self, data, idict, parent=None):
self.data = data
self.contents = {}
- if parent == None:
- self.predicate = lambda m, d: True
+ if parent is None:
+ self.predicate = lambda m, e: True
else:
predicate = parent.predicate
- if data.get('negate', 'false') in ['true', 'True']:
+ if data.get('negate', 'false').lower() == 'true':
psrc = self.nraw
else:
psrc = self.raw
@@ -712,21 +800,29 @@ class INode:
{'name': data.get('name')},
{'predicate': predicate})
else:
- raise Exception
- mytype = self.__class__
+ raise PluginExecutionError("Unknown tag: %s" % data.tag)
self.children = []
+ self._load_children(data, idict)
+
+ def _load_children(self, data, idict):
for item in data.getchildren():
if item.tag in self.ignore:
continue
elif item.tag in self.containers:
- self.children.append(mytype(item, idict, self))
+ self.children.append(self.__class__(item, idict, self))
else:
try:
- self.contents[item.tag][item.get('name')] = item.attrib
+ self.contents[item.tag][item.get('name')] = \
+ dict(item.attrib)
except KeyError:
- self.contents[item.tag] = {item.get('name'): item.attrib}
+ self.contents[item.tag] = \
+ {item.get('name'): dict(item.attrib)}
if item.text:
- self.contents[item.tag]['__text__'] = item.text
+ self.contents[item.tag][item.get('name')]['__text__'] = \
+ item.text
+ if item.getchildren():
+ self.contents[item.tag][item.get('name')]['__children__'] =\
+ item.getchildren()
try:
idict[item.tag].append(item.get('name'))
except KeyError:
@@ -760,43 +856,48 @@ class XMLSrc(XMLFileBacked):
"""XMLSrc files contain a LNode hierarchy that returns matching entries."""
__node__ = INode
__cacheobj__ = dict
+ __priority_required__ = True
- def __init__(self, filename, noprio=False):
- XMLFileBacked.__init__(self, filename)
+ def __init__(self, filename, fam=None, should_monitor=False):
+ XMLFileBacked.__init__(self, filename, fam, should_monitor)
self.items = {}
self.cache = None
self.pnode = None
self.priority = -1
- self.noprio = noprio
def HandleEvent(self, _=None):
"""Read file upon update."""
try:
- data = BUILTIN_FILE_TYPE(self.name).read()
+ data = open(self.name).read()
except IOError:
- logger.error("Failed to read file %s" % (self.name))
- return
+ msg = "Failed to read file %s: %s" % (self.name, sys.exc_info()[1])
+ logger.error(msg)
+ raise PluginExecutionError(msg)
self.items = {}
try:
- xdata = lxml.etree.XML(data)
+ xdata = lxml.etree.XML(data, parser=Bcfg2.Server.XMLParser)
except lxml.etree.XMLSyntaxError:
- logger.error("Failed to parse file %s" % (self.name))
- return
+ msg = "Failed to parse file %s" % (self.name, sys.exc_info()[1])
+ logger.error(msg)
+ raise PluginExecutionError(msg)
self.pnode = self.__node__(xdata, self.items)
self.cache = None
try:
self.priority = int(xdata.get('priority'))
except (ValueError, TypeError):
- if not self.noprio:
- logger.error("Got bogus priority %s for file %s" %
- (xdata.get('priority'), self.name))
+ if self.__priority_required__:
+ msg = "Got bogus priority %s for file %s" % \
+ (xdata.get('priority'), self.name)
+ logger.error(msg)
+ raise PluginExecutionError(msg)
+
del xdata, data
def Cache(self, metadata):
"""Build a package dict for a given host."""
- if self.cache == None or self.cache[0] != metadata:
+ if self.cache is None or self.cache[0] != metadata:
cache = (metadata, self.__cacheobj__())
- if self.pnode == None:
+ if self.pnode is None:
logger.error("Cache method called early for %s; forcing data load" % (self.name))
self.HandleEvent()
return
@@ -809,11 +910,13 @@ class XMLSrc(XMLFileBacked):
class InfoXML(XMLSrc):
__node__ = InfoNode
+ __priority_required__ = False
class XMLDirectoryBacked(DirectoryBacked):
"""Directorybacked for *.xml."""
- patterns = re.compile('.*\.xml')
+ patterns = re.compile('^.*\.xml$')
+ __child__ = XMLFileBacked
class PrioDir(Plugin, Generator, XMLDirectoryBacked):
@@ -824,11 +927,7 @@ class PrioDir(Plugin, Generator, XMLDirectoryBacked):
def __init__(self, core, datastore):
Plugin.__init__(self, core, datastore)
Generator.__init__(self)
- try:
- XMLDirectoryBacked.__init__(self, self.data, self.core.fam)
- except OSError:
- self.logger.error("Failed to load %s indices" % (self.name))
- raise PluginInitError
+ XMLDirectoryBacked.__init__(self, self.data, self.core.fam)
def HandleEvent(self, event):
"""Handle events and update dispatch table."""
@@ -867,19 +966,22 @@ class PrioDir(Plugin, Generator, XMLDirectoryBacked):
else:
prio = [int(src.priority) for src in matching]
if prio.count(max(prio)) > 1:
- self.logger.error("Found conflicting sources with "
- "same priority for %s, %s %s" %
- (metadata.hostname,
- entry.tag.lower(), entry.get('name')))
+ msg = "Found conflicting sources with same priority for " + \
+ "%s:%s for %s" % (entry.tag, entry.get("name"),
+ metadata.hostname)
+ self.logger.error(msg)
self.logger.error([item.name for item in matching])
self.logger.error("Priority was %s" % max(prio))
- raise PluginExecutionError
+ raise PluginExecutionError(msg)
index = prio.index(max(prio))
for rname in list(matching[index].cache[1][entry.tag].keys()):
if self._matches(entry, metadata, [rname]):
data = matching[index].cache[1][entry.tag][rname]
break
+ else:
+ # Fall back on __getitem__. Required if override used
+ data = matching[index].cache[1][entry.tag][entry.get('name')]
if '__text__' in data:
entry.text = data['__text__']
if '__children__' in data:
@@ -896,18 +998,16 @@ class SpecificityError(Exception):
pass
-class Specificity:
-
- def __init__(self, all=False, group=False, hostname=False, prio=0, delta=False):
+class Specificity(CmpMixin):
+ def __init__(self, all=False, group=False, hostname=False, prio=0,
+ delta=False):
+ CmpMixin.__init__(self)
self.hostname = hostname
self.all = all
self.group = group
self.prio = prio
self.delta = delta
- def __lt__(self, other):
- return self.__cmp__(other) < 0
-
def matches(self, metadata):
return self.all or \
self.hostname == metadata.hostname or \
@@ -916,26 +1016,36 @@ class Specificity:
def __cmp__(self, other):
"""Sort most to least specific."""
if self.all:
- return 1
- if self.group:
+ if other.all:
+ return 0
+ else:
+ return 1
+ elif other.all:
+ return -1
+ elif self.group:
if other.hostname:
return 1
if other.group and other.prio > self.prio:
return 1
if other.group and other.prio == self.prio:
return 0
+ elif other.group:
+ return -1
+ elif self.hostname and other.hostname:
+ return 0
return -1
- def more_specific(self, other):
- """Test if self is more specific than other."""
+ def __str__(self):
+ rv = [self.__class__.__name__, ': ']
if self.all:
- True
+ rv.append("all")
elif self.group:
- if other.hostname:
- return True
- elif other.group and other.prio > self.prio:
- return True
- return False
+ rv.append("Group %s, priority %s" % (self.group, self.prio))
+ elif self.hostname:
+ rv.append("Host %s" % self.hostname)
+ if self.delta:
+ rv.append(", delta=%s" % self.delta)
+ return "".join(rv)
class SpecificData(object):
@@ -957,6 +1067,7 @@ class SpecificData(object):
class EntrySet(Debuggable):
"""Entry sets deal with the host- and group-specific entries."""
ignore = re.compile("^(\.#.*|.*~|\\..*\\.(sw[px])|.*\\.genshi_include)$")
+ basename_is_regex=False
def __init__(self, basename, path, entry_type, encoding):
Debuggable.__init__(self, name=basename)
@@ -966,14 +1077,15 @@ class EntrySet(Debuggable):
self.metadata = default_file_metadata.copy()
self.infoxml = None
self.encoding = encoding
- pattern = '(.*/)?%s(\.((H_(?P<hostname>\S+))|' % basename
+
+ if self.basename_is_regex:
+ base_pat = basename
+ else:
+ base_pat = re.escape(basename)
+ pattern = '(.*/)?%s(\.((H_(?P<hostname>\S+))|' % base_pat
pattern += '(G(?P<prio>\d+)_(?P<group>\S+))))?$'
self.specific = re.compile(pattern)
- def debug_log(self, message, flag=None):
- if (flag is None and self.debug_flag) or flag:
- logger.error(message)
-
def sort_by_specific(self, one, other):
return cmp(one.specific, other.specific)
@@ -987,20 +1099,13 @@ class EntrySet(Debuggable):
if matching is None:
matching = self.get_matching(metadata)
- hspec = [ent for ent in matching if ent.specific.hostname]
- if hspec:
- return hspec[0]
-
- gspec = [ent for ent in matching if ent.specific.group]
- if gspec:
- gspec.sort(self.group_sortfunc)
- return gspec[-1]
-
- aspec = [ent for ent in matching if ent.specific.all]
- if aspec:
- return aspec[0]
-
- raise PluginExecutionError
+ if matching:
+ matching.sort(key=operator.attrgetter("specific"))
+ return matching[0]
+ else:
+ raise PluginExecutionError("No matching entries available for %s "
+ "for %s" % (self.path,
+ metadata.hostname))
def handle_event(self, event):
"""Handle FAM events for the TemplateSet."""
@@ -1074,7 +1179,7 @@ class EntrySet(Debuggable):
fpath = os.path.join(self.path, event.filename)
if event.filename == 'info.xml':
if not self.infoxml:
- self.infoxml = InfoXML(fpath, True)
+ self.infoxml = InfoXML(fpath)
self.infoxml.HandleEvent(event)
elif event.filename in [':info', 'info']:
for line in open(fpath).readlines():
@@ -1089,8 +1194,7 @@ class EntrySet(Debuggable):
if value:
self.metadata[key] = value
if len(self.metadata['perms']) == 3:
- self.metadata['perms'] = "0%s" % \
- (self.metadata['perms'])
+ self.metadata['perms'] = "0%s" % self.metadata['perms']
def reset_metadata(self, event):
"""Reset metadata to defaults if info or info.xml removed."""
@@ -1099,26 +1203,12 @@ class EntrySet(Debuggable):
elif event.filename in [':info', 'info']:
self.metadata = default_file_metadata.copy()
- def group_sortfunc(self, x, y):
- """sort groups by their priority"""
- return cmp(x.specific.prio, y.specific.prio)
-
def bind_info_to_entry(self, entry, metadata):
- # first set defaults from global metadata/:info
- for key in self.metadata:
- entry.set(key, self.metadata[key])
- if self.infoxml:
- mdata = {}
- self.infoxml.pnode.Match(metadata, mdata, entry=entry)
- if 'Info' not in mdata:
- logger.error("Failed to set metadata for file %s" % \
- (entry.get('name')))
- raise PluginExecutionError
- [entry.attrib.__setitem__(key, value) \
- for (key, value) in list(mdata['Info'][None].items())]
+ bind_info(entry, metadata, infoxml=self.infoxml, default=self.metadata)
def bind_entry(self, entry, metadata):
- """Return the appropriate interpreted template from the set of available templates."""
+ """Return the appropriate interpreted template from the set of
+ available templates."""
self.bind_info_to_entry(entry, metadata)
return self.best_matching(metadata).bind_entry(entry, metadata)
@@ -1130,13 +1220,14 @@ class GroupSpool(Plugin, Generator):
filename_pattern = ""
es_child_cls = object
es_cls = EntrySet
+ entry_type = 'Path'
def __init__(self, core, datastore):
Plugin.__init__(self, core, datastore)
Generator.__init__(self)
if self.data[-1] == '/':
self.data = self.data[:-1]
- self.Entries['Path'] = {}
+ self.Entries[self.entry_type] = {}
self.entries = {}
self.handles = {}
self.AddDirectoryMonitor('')
@@ -1145,29 +1236,38 @@ class GroupSpool(Plugin, Generator):
def add_entry(self, event):
epath = self.event_path(event)
ident = self.event_id(event)
- if posixpath.isdir(epath):
+ if os.path.isdir(epath):
self.AddDirectoryMonitor(epath[len(self.data):])
- if ident not in self.entries and posixpath.isfile(epath):
- dirpath = "".join([self.data, ident])
+ if ident not in self.entries and os.path.isfile(epath):
+ dirpath = self.data + ident
self.entries[ident] = self.es_cls(self.filename_pattern,
dirpath,
self.es_child_cls,
self.encoding)
- self.Entries['Path'][ident] = self.entries[ident].bind_entry
- if not posixpath.isdir(epath):
+ self.Entries[self.entry_type][ident] = \
+ self.entries[ident].bind_entry
+ if not os.path.isdir(epath):
# do not pass through directory events
self.entries[ident].handle_event(event)
def event_path(self, event):
- return "".join([self.data, self.handles[event.requestID],
- event.filename])
+ return os.path.join(self.data,
+ self.handles[event.requestID].lstrip("/"),
+ event.filename)
def event_id(self, event):
epath = self.event_path(event)
- if posixpath.isdir(epath):
- return self.handles[event.requestID] + event.filename
+ if os.path.isdir(epath):
+ return os.path.join(self.handles[event.requestID].lstrip("/"),
+ event.filename)
else:
- return self.handles[event.requestID][:-1]
+ return self.handles[event.requestID].rstrip("/")
+
+ def toggle_debug(self):
+ for entry in self.entries.values():
+ if hasattr(entry, "toggle_debug"):
+ entry.toggle_debug()
+ return Plugin.toggle_debug(self)
def HandleEvent(self, event):
"""Unified FAM event handler for GroupSpool."""
@@ -1178,7 +1278,7 @@ class GroupSpool(Plugin, Generator):
if action in ['exists', 'created']:
self.add_entry(event)
- if action == 'changed':
+ elif action == 'changed':
if ident in self.entries:
self.entries[ident].handle_event(event)
else:
@@ -1193,7 +1293,7 @@ class GroupSpool(Plugin, Generator):
if fbase in self.entries:
# a directory was deleted
del self.entries[fbase]
- del self.Entries['Path'][fbase]
+ del self.Entries[self.entry_type][fbase]
elif ident in self.entries:
self.entries[ident].handle_event(event)
elif ident not in self.entries:
@@ -1206,8 +1306,8 @@ class GroupSpool(Plugin, Generator):
relative += '/'
name = self.data + relative
if relative not in list(self.handles.values()):
- if not posixpath.isdir(name):
- print("Failed to open directory %s" % (name))
+ if not os.path.isdir(name):
+ self.logger.error("Failed to open directory %s" % name)
return
reqid = self.core.fam.AddMonitor(name, self)
self.handles[reqid] = relative
diff --git a/src/lib/Bcfg2/Server/Plugins/BB.py b/src/lib/Bcfg2/Server/Plugins/BB.py
deleted file mode 100644
index c015ec47c..000000000
--- a/src/lib/Bcfg2/Server/Plugins/BB.py
+++ /dev/null
@@ -1,83 +0,0 @@
-import lxml.etree
-import Bcfg2.Server.Plugin
-import glob
-import os
-import socket
-
-#manage boot symlinks
- #add statistics check to do build->boot mods
-
-#map profiles: first array is not empty we replace the -p with a determined profile.
-logger = Bcfg2.Server.Plugin.logger
-
-class BBfile(Bcfg2.Server.Plugin.XMLFileBacked):
- """Class for bb files."""
- def Index(self):
- """Build data into an xml object."""
-
- try:
- self.data = lxml.etree.XML(self.data)
- except lxml.etree.XMLSyntaxError:
- Bcfg2.Server.Plugin.logger.error("Failed to parse %s" % self.name)
- return
- self.tftppath = self.data.get('tftp', '/tftpboot')
- self.macs = {}
- self.users = {}
- self.actions = {}
- self.bootlinks = []
-
- for node in self.data.findall('Node'):
- iface = node.find('Interface')
- if iface != None:
- mac = "01-%s" % (iface.get('mac'.replace(':','-').lower()))
- self.actions[node.get('name')] = node.get('action')
- self.bootlinks.append((mac, node.get('action')))
- try:
- ip = socket.gethostbyname(node.get('name'))
- except:
- logger.error("failed host resolution for %s" % node.get('name'))
-
- self.macs[node.get('name')] = (iface.get('mac'), ip)
- else:
- logger.error("%s" % lxml.etree.tostring(node))
- self.users[node.get('name')] = node.get('user',"").split(':')
-
- def enforce_bootlinks(self):
- for mac, target in self.bootlinks:
- path = self.tftppath + '/' + mac
- if not os.path.islink(path):
- logger.error("Boot file %s not a link" % path)
- if target != os.readlink(path):
- try:
- os.unlink(path)
- os.symlink(target, path)
- except:
- logger.error("Failed to modify link %s" % path)
-
-class BBDirectoryBacked(Bcfg2.Server.Plugin.DirectoryBacked):
- __child__ = BBfile
-
-
-class BB(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Connector):
- """The BB plugin maps users to machines and metadata to machines."""
- name = 'BB'
- deprecated = True
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Connector.__init__(self)
- self.store = BBDirectoryBacked(self.data, core.fam)
-
- def get_additional_data(self, metadata):
-
- users = {}
- for user in self.store.entries['bb.xml'].users.get(metadata.hostname.split(".")[0], []):
- pubkeys = []
- for fname in glob.glob('/home/%s/.ssh/*.pub'%user):
- pubkeys.append(open(fname).read())
-
- users[user] = pubkeys
-
- return dict([('users', users),
- ('macs', self.store.entries['bb.xml'].macs)])
diff --git a/src/lib/Bcfg2/Server/Plugins/Base.py b/src/lib/Bcfg2/Server/Plugins/Base.py
index 389ca7a95..2020f7795 100644
--- a/src/lib/Bcfg2/Server/Plugins/Base.py
+++ b/src/lib/Bcfg2/Server/Plugins/Base.py
@@ -3,10 +3,7 @@
import copy
import lxml.etree
import sys
-# py3k compatibility
-if sys.hexversion >= 0x03000000:
- from functools import reduce
-
+from Bcfg2.Bcfg2Py3k import reduce
import Bcfg2.Server.Plugin
diff --git a/src/lib/Bcfg2/Server/Plugins/Bundler.py b/src/lib/Bcfg2/Server/Plugins/Bundler.py
index ccb99481e..65914c371 100644
--- a/src/lib/Bcfg2/Server/Plugins/Bundler.py
+++ b/src/lib/Bcfg2/Server/Plugins/Bundler.py
@@ -1,25 +1,25 @@
"""This provides bundle clauses with translation functionality."""
import copy
+import logging
import lxml.etree
import os
import os.path
import re
import sys
-
+import Bcfg2.Server
import Bcfg2.Server.Plugin
+import Bcfg2.Server.Lint
try:
- import genshi.template
import genshi.template.base
- import Bcfg2.Server.Plugins.SGenshi
+ import Bcfg2.Server.Plugins.TGenshi
have_genshi = True
except:
have_genshi = False
class BundleFile(Bcfg2.Server.Plugin.StructFile):
-
def get_xml_value(self, metadata):
bundlename = os.path.splitext(os.path.basename(self.name))[0]
bundle = lxml.etree.Element('Bundle', name=bundlename)
@@ -27,6 +27,58 @@ class BundleFile(Bcfg2.Server.Plugin.StructFile):
return bundle
+if have_genshi:
+ class BundleTemplateFile(Bcfg2.Server.Plugins.TGenshi.TemplateFile,
+ Bcfg2.Server.Plugin.StructFile):
+ def __init__(self, name, specific, encoding):
+ Bcfg2.Server.Plugins.TGenshi.TemplateFile.__init__(self, name,
+ specific,
+ encoding)
+ Bcfg2.Server.Plugin.StructFile.__init__(self, name)
+ self.logger = logging.getLogger(name)
+
+ def get_xml_value(self, metadata):
+ if not hasattr(self, 'template'):
+ self.logger.error("No parsed template information for %s" %
+ self.name)
+ raise Bcfg2.Server.Plugin.PluginExecutionError
+ try:
+ stream = self.template.generate(metadata=metadata).filter(
+ Bcfg2.Server.Plugins.TGenshi.removecomment)
+ data = lxml.etree.XML(stream.render('xml',
+ strip_whitespace=False),
+ parser=Bcfg2.Server.XMLParser)
+ bundlename = os.path.splitext(os.path.basename(self.name))[0]
+ bundle = lxml.etree.Element('Bundle', name=bundlename)
+ for item in self.Match(metadata, data):
+ bundle.append(copy.deepcopy(item))
+ return bundle
+ except LookupError:
+ lerror = sys.exc_info()[1]
+ self.logger.error('Genshi lookup error: %s' % lerror)
+ except genshi.template.TemplateError:
+ terror = sys.exc_info()[1]
+ self.logger.error('Genshi template error: %s' % terror)
+ raise
+ except genshi.input.ParseError:
+ perror = sys.exc_info()[1]
+ self.logger.error('Genshi parse error: %s' % perror)
+ raise
+
+ def Match(self, metadata, xdata):
+ """Return matching fragments of parsed template."""
+ rv = []
+ for child in xdata.getchildren():
+ rv.extend(self._match(child, metadata))
+ self.logger.debug("File %s got %d match(es)" % (self.name, len(rv)))
+ return rv
+
+
+ class SGenshiTemplateFile(BundleTemplateFile):
+ # provided for backwards compat
+ pass
+
+
class Bundler(Bcfg2.Server.Plugin.Plugin,
Bcfg2.Server.Plugin.Structure,
Bcfg2.Server.Plugin.XMLDirectoryBacked):
@@ -50,25 +102,20 @@ class Bundler(Bcfg2.Server.Plugin.Plugin,
self.logger.error("Failed to load Bundle repository")
raise Bcfg2.Server.Plugin.PluginInitError
- def template_dispatch(self, name):
- bundle = lxml.etree.parse(name)
+ def template_dispatch(self, name, _):
+ bundle = lxml.etree.parse(name,
+ parser=Bcfg2.Server.XMLParser)
nsmap = bundle.getroot().nsmap
- if name.endswith('.xml'):
- if have_genshi and \
- (nsmap == {'py': 'http://genshi.edgewall.org/'}):
- # allow for genshi bundles with .xml extensions
- spec = Bcfg2.Server.Plugin.Specificity()
- return Bcfg2.Server.Plugins.SGenshi.SGenshiTemplateFile(name,
- spec,
- self.encoding)
- else:
- return BundleFile(name)
- elif name.endswith('.genshi'):
+ if (name.endswith('.genshi') or
+ ('py' in nsmap and
+ nsmap['py'] == 'http://genshi.edgewall.org/')):
if have_genshi:
spec = Bcfg2.Server.Plugin.Specificity()
- return Bcfg2.Server.Plugins.SGenshi.SGenshiTemplateFile(name,
- spec,
- self.encoding)
+ return BundleTemplateFile(name, spec, self.encoding)
+ else:
+ raise Bcfg2.Server.Plugin.PluginExecutionError("Genshi not available: %s" % name)
+ else:
+ return BundleFile(name, self.fam)
def BuildStructures(self, metadata):
"""Build all structures for client (metadata)."""
@@ -97,3 +144,54 @@ class Bundler(Bcfg2.Server.Plugin.Plugin,
self.logger.error("Bundler: Unexpected bundler error for %s" %
bundlename, exc_info=1)
return bundleset
+
+
+class BundlerLint(Bcfg2.Server.Lint.ServerPlugin):
+ """ Perform various bundle checks """
+ def Run(self):
+ """ run plugin """
+ self.missing_bundles()
+ for bundle in self.core.plugins['Bundler'].entries.values():
+ if (self.HandlesFile(bundle.name) and
+ (not have_genshi or
+ not isinstance(bundle, BundleTemplateFile))):
+ self.bundle_names(bundle)
+
+ @classmethod
+ def Errors(cls):
+ return {"bundle-not-found":"error",
+ "inconsistent-bundle-name":"warning"}
+
+ def missing_bundles(self):
+ """ find bundles listed in Metadata but not implemented in Bundler """
+ if self.files is None:
+ # when given a list of files on stdin, this check is
+ # useless, so skip it
+ groupdata = self.metadata.groups_xml.xdata
+ ref_bundles = set([b.get("name")
+ for b in groupdata.findall("//Bundle")])
+
+ allbundles = self.core.plugins['Bundler'].entries.keys()
+ for bundle in ref_bundles:
+ xmlbundle = "%s.xml" % bundle
+ genshibundle = "%s.genshi" % bundle
+ if (xmlbundle not in allbundles and
+ genshibundle not in allbundles):
+ self.LintError("bundle-not-found",
+ "Bundle %s referenced, but does not exist" %
+ bundle)
+
+ def bundle_names(self, bundle):
+ """ verify bundle name attribute matches filename """
+ try:
+ xdata = lxml.etree.XML(bundle.data)
+ except AttributeError:
+ # genshi template
+ xdata = lxml.etree.parse(bundle.template.filepath).getroot()
+
+ fname = bundle.name.split('Bundler/')[1].split('.')[0]
+ bname = xdata.get('name')
+ if fname != bname:
+ self.LintError("inconsistent-bundle-name",
+ "Inconsistent bundle name: filename is %s, "
+ "bundle name is %s" % (fname, bname))
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgCheetahGenerator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgCheetahGenerator.py
index 3edd1d8cb..f02461673 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgCheetahGenerator.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgCheetahGenerator.py
@@ -6,8 +6,7 @@ from Bcfg2.Server.Plugins.Cfg import CfgGenerator
logger = logging.getLogger(__name__)
try:
- import Cheetah.Template
- import Cheetah.Parser
+ from Cheetah.Template import Template
have_cheetah = True
except ImportError:
have_cheetah = False
@@ -25,9 +24,9 @@ class CfgCheetahGenerator(CfgGenerator):
raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
def get_data(self, entry, metadata):
- template = Cheetah.Template.Template(self.data,
- compilerSettings=self.settings)
+ template = Template(self.data.decode(self.encoding),
+ compilerSettings=self.settings)
template.metadata = metadata
template.path = entry.get('realname', entry.get('name'))
- template.source_path = self.path
+ template.source_path = self.name
return template.respond()
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedCheetahGenerator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedCheetahGenerator.py
new file mode 100644
index 000000000..a75329d2a
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedCheetahGenerator.py
@@ -0,0 +1,14 @@
+import logging
+from Bcfg2.Server.Plugins.Cfg.CfgCheetahGenerator import CfgCheetahGenerator
+from Bcfg2.Server.Plugins.Cfg.CfgEncryptedGenerator import CfgEncryptedGenerator
+
+logger = logging.getLogger(__name__)
+
+class CfgEncryptedCheetahGenerator(CfgCheetahGenerator, CfgEncryptedGenerator):
+ __extensions__ = ['cheetah.crypt', 'crypt.cheetah']
+
+ def handle_event(self, event):
+ CfgEncryptedGenerator.handle_event(self, event)
+
+ def get_data(self, entry, metadata):
+ return CfgCheetahGenerator.get_data(self, entry, metadata)
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenerator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenerator.py
new file mode 100644
index 000000000..2c926fae7
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenerator.py
@@ -0,0 +1,63 @@
+import logging
+import Bcfg2.Server.Plugin
+from Bcfg2.Server.Plugins.Cfg import CfgGenerator, SETUP
+try:
+ from Bcfg2.Encryption import ssl_decrypt, EVPError
+ have_crypto = True
+except ImportError:
+ have_crypto = False
+
+logger = logging.getLogger(__name__)
+
+def passphrases():
+ section = "encryption"
+ if SETUP.cfp.has_section(section):
+ return dict([(o, SETUP.cfp.get(section, o))
+ for o in SETUP.cfp.options(section)])
+ else:
+ return dict()
+
+def decrypt(crypted):
+ if not have_crypto:
+ msg = "Cfg: M2Crypto is not available: %s" % entry.get("name")
+ logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+ for passwd in passphrases().values():
+ try:
+ return ssl_decrypt(crypted, passwd)
+ except EVPError:
+ pass
+ raise EVPError("Failed to decrypt")
+
+class CfgEncryptedGenerator(CfgGenerator):
+ __extensions__ = ["crypt"]
+
+ def __init__(self, fname, spec, encoding):
+ CfgGenerator.__init__(self, fname, spec, encoding)
+ if not have_crypto:
+ msg = "Cfg: M2Crypto is not available: %s" % entry.get("name")
+ logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+
+ def handle_event(self, event):
+ if event.code2str() == 'deleted':
+ return
+ try:
+ crypted = open(self.name).read()
+ except UnicodeDecodeError:
+ crypted = open(self.name, mode='rb').read()
+ except:
+ logger.error("Failed to read %s" % self.name)
+ return
+ # todo: let the user specify a passphrase by name
+ try:
+ self.data = decrypt(crypted)
+ except EVPError:
+ msg = "Failed to decrypt %s" % self.name
+ logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+
+ def get_data(self, entry, metadata):
+ if self.data is None:
+ raise Bcfg2.Server.Plugin.PluginExecutionError("Failed to decrypt %s" % self.name)
+ return CfgGenerator.get_data(self, entry, metadata)
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenshiGenerator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenshiGenerator.py
new file mode 100644
index 000000000..6605cca7c
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenshiGenerator.py
@@ -0,0 +1,26 @@
+import logging
+from Bcfg2.Bcfg2Py3k import StringIO
+from Bcfg2.Server.Plugins.Cfg.CfgGenshiGenerator import CfgGenshiGenerator
+from Bcfg2.Server.Plugins.Cfg.CfgEncryptedGenerator import decrypt, \
+ CfgEncryptedGenerator
+
+logger = logging.getLogger(__name__)
+
+try:
+ from genshi.template import TemplateLoader
+except ImportError:
+ # CfgGenshiGenerator will raise errors if genshi doesn't exist
+ TemplateLoader = object
+
+
+class EncryptedTemplateLoader(TemplateLoader):
+ def _instantiate(self, cls, fileobj, filepath, filename, encoding=None):
+ plaintext = StringIO(decrypt(fileobj.read()))
+ return TemplateLoader._instantiate(self, cls, plaintext, filepath,
+ filename, encoding=encoding)
+
+
+class CfgEncryptedGenshiGenerator(CfgGenshiGenerator):
+ __extensions__ = ['genshi.crypt', 'crypt.genshi']
+ __loader_cls__ = EncryptedTemplateLoader
+
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgGenshiGenerator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgGenshiGenerator.py
index 2c0a076d7..277a26f97 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgGenshiGenerator.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgGenshiGenerator.py
@@ -1,5 +1,7 @@
+import re
import sys
import logging
+import traceback
import Bcfg2.Server.Plugin
from Bcfg2.Server.Plugins.Cfg import CfgGenerator
@@ -8,8 +10,10 @@ logger = logging.getLogger(__name__)
try:
import genshi.core
from genshi.template import TemplateLoader, NewTextTemplate
+ from genshi.template.eval import UndefinedError
have_genshi = True
except ImportError:
+ TemplateLoader = None
have_genshi = False
# snipped from TGenshi
@@ -23,14 +27,17 @@ def removecomment(stream):
class CfgGenshiGenerator(CfgGenerator):
__extensions__ = ['genshi']
+ __loader_cls__ = TemplateLoader
+ pyerror_re = re.compile('<\w+ u?[\'"](.*?)\s*\.\.\.[\'"]>')
def __init__(self, fname, spec, encoding):
CfgGenerator.__init__(self, fname, spec, encoding)
- self.loader = TemplateLoader()
if not have_genshi:
- msg = "Cfg: Genshi is not available: %s" % entry.get("name")
+ msg = "Cfg: Genshi is not available: %s" % fname
logger.error(msg)
raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+ self.loader = self.__loader_cls__()
+ self.template = None
@classmethod
def ignore(cls, event, basename=None):
@@ -44,10 +51,63 @@ class CfgGenshiGenerator(CfgGenerator):
metadata=metadata,
path=self.name).filter(removecomment)
try:
- return stream.render('text', encoding=self.encoding,
- strip_whitespace=False)
- except TypeError:
- return stream.render('text', encoding=self.encoding)
+ try:
+ return stream.render('text', encoding=self.encoding,
+ strip_whitespace=False)
+ except TypeError:
+ return stream.render('text', encoding=self.encoding)
+ except UndefinedError:
+ # a failure in a genshi expression _other_ than %{ python ... %}
+ err = sys.exc_info()[1]
+ stack = traceback.extract_tb(sys.exc_info()[2])
+ for quad in stack:
+ if quad[0] == self.name:
+ logger.error("Cfg: Error rendering %s at %s: %s" %
+ (fname, quad[2], err))
+ break
+ raise
+ except:
+ # a failure in a %{ python ... %} block -- the snippet in
+ # the traceback is just the beginning of the block.
+ err = sys.exc_info()[1]
+ stack = traceback.extract_tb(sys.exc_info()[2])
+ (filename, lineno, func, text) = stack[-1]
+ # this is horrible, and I deeply apologize to whoever gets
+ # to maintain this after I go to the Great Beer Garden in
+ # the Sky. genshi is incredibly opaque about what's being
+ # executed, so the only way I can find to determine which
+ # {% python %} block is being executed -- if there are
+ # multiples -- is to iterate through them and match the
+ # snippet of the first line that's in the traceback with
+ # the first non-empty line of the block.
+ execs = [contents
+ for etype, contents, loc in self.template.stream
+ if etype == self.template.EXEC]
+ contents = None
+ if len(execs) == 1:
+ contents = execs[0]
+ elif len(execs) > 1:
+ match = pyerror_re.match(func)
+ if match:
+ firstline = match.group(0)
+ for pyblock in execs:
+ if pyblock.startswith(firstline):
+ contents = pyblock
+ break
+ # else, no EXEC blocks -- WTF?
+ if contents:
+ # we now have the bogus block, but we need to get the
+ # offending line. To get there, we do (line number
+ # given in the exception) - (firstlineno from the
+ # internal genshi code object of the snippet) + 1 =
+ # (line number of the line with an error within the
+ # block, with all multiple line breaks elided to a
+ # single line break)
+ real_lineno = lineno - contents.code.co_firstlineno
+ src = re.sub(r'\n\n+', '\n', contents.source).splitlines()
+ logger.error("Cfg: Error rendering %s at %s: %s" %
+ (fname, src[real_lineno], err))
+ raise
def handle_event(self, event):
if event.code2str() == 'deleted':
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgInfoXML.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgInfoXML.py
index 8e962efb4..956ebfe17 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgInfoXML.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgInfoXML.py
@@ -9,7 +9,7 @@ class CfgInfoXML(CfgInfo):
def __init__(self, path):
CfgInfo.__init__(self, path)
- self.infoxml = Bcfg2.Server.Plugin.InfoXML(path, noprio=True)
+ self.infoxml = Bcfg2.Server.Plugin.InfoXML(path)
def bind_info_to_entry(self, entry, metadata):
mdata = dict()
@@ -22,3 +22,9 @@ class CfgInfoXML(CfgInfo):
def handle_event(self, event):
self.infoxml.HandleEvent()
+
+ def _set_info(self, entry, info):
+ CfgInfo._set_info(self, entry, info)
+ if '__children__' in info:
+ for child in info['__children__']:
+ entry.append(child)
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgLegacyInfo.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgLegacyInfo.py
index 54c17c6c5..85c13c1ac 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgLegacyInfo.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgLegacyInfo.py
@@ -7,6 +7,10 @@ logger = logging.getLogger(__name__)
class CfgLegacyInfo(CfgInfo):
__basenames__ = ['info', ':info']
+ def __init__(self, path):
+ CfgInfo.__init__(self, path)
+ self.path = path
+
def bind_info_to_entry(self, entry, metadata):
self._set_info(entry, self.metadata)
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py b/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py
index 6c7585993..fe993ab54 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py
@@ -6,11 +6,11 @@ import sys
import stat
import pkgutil
import logging
-import binascii
import lxml.etree
import Bcfg2.Options
import Bcfg2.Server.Plugin
-from Bcfg2.Bcfg2Py3k import u_str
+from Bcfg2.Bcfg2Py3k import u_str, unicode, b64encode
+import Bcfg2.Server.Lint
logger = logging.getLogger(__name__)
@@ -113,7 +113,8 @@ class CfgInfo(CfgBaseFileMatcher):
def _set_info(self, entry, info):
for key, value in list(info.items()):
- entry.attrib.__setitem__(key, value)
+ if not key.startswith("__"):
+ entry.attrib.__setitem__(key, value)
class CfgVerifier(CfgBaseFileMatcher):
@@ -152,7 +153,19 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet):
global PROCESSORS
if PROCESSORS is None:
PROCESSORS = []
- for submodule in pkgutil.walk_packages(path=__path__):
+ if hasattr(pkgutil, 'walk_packages'):
+ submodules = pkgutil.walk_packages(path=__path__)
+ else:
+ #python 2.4
+ import glob
+ submodules = []
+ for path in __path__:
+ for submodule in glob.glob(os.path.join(path, "*.py")):
+ mod = '.'.join(submodule.split("/")[-1].split('.')[:-1])
+ if mod != '__init__':
+ submodules.append((None, mod, True))
+
+ for submodule in submodules:
module = getattr(__import__("%s.%s" %
(__name__,
submodule[1])).Server.Plugins.Cfg,
@@ -185,6 +198,7 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet):
return
elif action == 'changed':
self.entries[event.filename].handle_event(event)
+ return
elif action == 'deleted':
del self.entries[event.filename]
return
@@ -192,6 +206,11 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet):
logger.error("Could not process event %s for %s; ignoring" %
(action, event.filename))
+ def get_matching(self, metadata):
+ return [item for item in list(self.entries.values())
+ if (isinstance(item, CfgGenerator) and
+ item.specific.matches(metadata))]
+
def entry_init(self, event, proc):
if proc.__specific__:
Bcfg2.Server.Plugin.EntrySet.entry_init(
@@ -270,10 +289,11 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet):
raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
if entry.get('encoding') == 'base64':
- data = binascii.b2a_base64(data)
+ data = b64encode(data)
else:
try:
- data = u_str(data, self.encoding)
+ if not isinstance(data, unicode):
+ data = u_str(data, self.encoding)
except UnicodeDecodeError:
msg = "Failed to decode %s: %s" % (entry.get('name'),
sys.exc_info()[1])
@@ -287,6 +307,10 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet):
logger.error("You need to specify base64 encoding for %s." %
entry.get('name'))
raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+ except TypeError:
+ # data is already unicode; newer versions of Cheetah
+ # seem to return unicode
+ pass
if data:
entry.text = data
@@ -298,7 +322,7 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet):
generators = [ent for ent in list(self.entries.values())
if (isinstance(ent, CfgGenerator) and
ent.specific.matches(metadata))]
- if not matching:
+ if not generators:
msg = "No base file found for %s" % entry.get('name')
logger.error(msg)
raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
@@ -347,26 +371,26 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet):
if attr in new_entry]
if badattr:
# check for info files and inform user of their removal
- if os.path.exists(self.path + "/:info"):
- logger.info("Removing :info file and replacing with "
- "info.xml")
- os.remove(self.path + "/:info")
- if os.path.exists(self.path + "/info"):
- logger.info("Removing info file and replacing with "
- "info.xml")
- os.remove(self.path + "/info")
+ for ifile in ['info', ':info']:
+ info = os.path.join(self.path, ifile)
+ if os.path.exists(info):
+ logger.info("Removing %s and replacing with info.xml" %
+ info)
+ os.remove(info)
metadata_updates = {}
metadata_updates.update(self.metadata)
for attr in badattr:
metadata_updates[attr] = new_entry.get(attr)
infoxml = lxml.etree.Element('FileInfo')
infotag = lxml.etree.SubElement(infoxml, 'Info')
- [infotag.attrib.__setitem__(attr, metadata_updates[attr]) \
- for attr in metadata_updates]
+ [infotag.attrib.__setitem__(attr, metadata_updates[attr])
+ for attr in metadata_updates]
ofile = open(self.path + "/info.xml", "w")
- ofile.write(lxml.etree.tostring(infoxml, pretty_print=True))
+ ofile.write(lxml.etree.tostring(infoxml, xml_declaration=False,
+ pretty_print=True).decode('UTF-8'))
ofile.close()
- self.debug_log("Wrote file %s" % (self.path + "/info.xml"),
+ self.debug_log("Wrote file %s" % os.path.join(self.path,
+ "info.xml"),
flag=log)
@@ -385,9 +409,22 @@ class Cfg(Bcfg2.Server.Plugin.GroupSpool,
SETUP = core.setup
if 'validate' not in SETUP:
- SETUP['validate'] = Bcfg2.Options.CFG_VALIDATION
+ SETUP.add_option('validate', Bcfg2.Options.CFG_VALIDATION)
SETUP.reparse()
+ def has_generator(self, entry, metadata):
+ """ return True if the given entry can be generated for the
+ given metadata; False otherwise """
+ if entry.get('name') not in self.entries:
+ return False
+
+ for ent in self.entries[entry.get('name')].entries.values():
+ if ent.__specific__ and not ent.specific.matches(metadata):
+ continue
+ if isinstance(ent, CfgGenerator):
+ return True
+ return False
+
def AcceptChoices(self, entry, metadata):
return self.entries[entry.get('name')].list_accept_choices(entry,
metadata)
@@ -396,3 +433,26 @@ class Cfg(Bcfg2.Server.Plugin.GroupSpool,
return self.entries[new_entry.get('name')].write_update(specific,
new_entry,
log)
+
+class CfgLint(Bcfg2.Server.Lint.ServerPlugin):
+ """ warn about usage of .cat and .diff files """
+
+ def Run(self):
+ for basename, entry in list(self.core.plugins['Cfg'].entries.items()):
+ self.check_entry(basename, entry)
+
+
+ @classmethod
+ def Errors(cls):
+ return {"cat-file-used":"warning",
+ "diff-file-used":"warning"}
+
+ def check_entry(self, basename, entry):
+ cfg = self.core.plugins['Cfg']
+ for basename, entry in list(cfg.entries.items()):
+ for fname, processor in entry.entries.items():
+ if self.HandlesFile(fname) and isinstance(processor, CfgFilter):
+ extension = fname.split(".")[-1]
+ self.LintError("%s-file-used" % extension,
+ "%s file used on %s: %s" %
+ (extension, basename, fname))
diff --git a/src/lib/Bcfg2/Server/Plugins/DBStats.py b/src/lib/Bcfg2/Server/Plugins/DBStats.py
index 999e078b9..63c590f0f 100644
--- a/src/lib/Bcfg2/Server/Plugins/DBStats.py
+++ b/src/lib/Bcfg2/Server/Plugins/DBStats.py
@@ -1,8 +1,8 @@
-import binascii
import difflib
import logging
import lxml.etree
import platform
+import sys
import time
try:
@@ -11,61 +11,47 @@ except ImportError:
pass
import Bcfg2.Server.Plugin
-import Bcfg2.Server.Reports.importscript
+from Bcfg2.Server.Reports.importscript import load_stat
from Bcfg2.Server.Reports.reports.models import Client
-import Bcfg2.Server.Reports.settings
-from Bcfg2.Server.Reports.updatefix import update_database
+from Bcfg2.Bcfg2Py3k import b64decode
+
# for debugging output only
logger = logging.getLogger('Bcfg2.Plugins.DBStats')
-class DBStats(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.ThreadedStatistics,
+
+class DBStats(Bcfg2.Server.Plugin.ThreadedStatistics,
Bcfg2.Server.Plugin.PullSource):
name = 'DBStats'
def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
Bcfg2.Server.Plugin.ThreadedStatistics.__init__(self, core, datastore)
Bcfg2.Server.Plugin.PullSource.__init__(self)
self.cpath = "%s/Metadata/clients.xml" % datastore
self.core = core
- logger.debug("Searching for new models to add to the statistics database")
- try:
- update_database()
- except Exception:
- inst = sys.exc_info()[1]
- logger.debug(str(inst))
- logger.debug(str(type(inst)))
+ if not self.core.database_available:
+ raise Bcfg2.Server.Plugin.PluginInitError
def handle_statistic(self, metadata, data):
newstats = data.find("Statistics")
newstats.set('time', time.asctime(time.localtime()))
- # ick
- data = lxml.etree.tostring(newstats)
- ndx = lxml.etree.XML(data)
- e = lxml.etree.Element('Node', name=metadata.hostname)
- e.append(ndx)
- container = lxml.etree.Element("ConfigStatistics")
- container.append(e)
- # FIXME need to build a metadata interface to expose a list of clients
start = time.time()
for i in [1, 2, 3]:
try:
- Bcfg2.Server.Reports.importscript.load_stats(self.core.metadata.clients_xml.xdata,
- container,
- self.core.encoding,
- 0,
- logger,
- True,
- platform.node())
+ load_stat(metadata,
+ newstats,
+ self.core.encoding,
+ 0,
+ logger,
+ True,
+ platform.node())
logger.info("Imported data for %s in %s seconds" \
% (metadata.hostname, time.time() - start))
return
except MultipleObjectsReturned:
e = sys.exc_info()[1]
- logger.error("DBStats: MultipleObjectsReturned while handling %s: %s" % \
- (metadata.hostname, e))
+ logger.error("DBStats: MultipleObjectsReturned while "
+ "handling %s: %s" % (metadata.hostname, e))
logger.error("DBStats: Data is inconsistent")
break
except:
@@ -100,10 +86,10 @@ class DBStats(Bcfg2.Server.Plugin.Plugin,
if entry.reason.is_sensitive:
raise Bcfg2.Server.Plugin.PluginExecutionError
elif len(entry.reason.unpruned) != 0:
- ret.append('\n'.join(entry.reason.unpruned))
+ ret.append('\n'.join(entry.reason.unpruned))
elif entry.reason.current_diff != '':
if entry.reason.is_binary:
- ret.append(binascii.a2b_base64(entry.reason.current_diff))
+ ret.append(b64decode(entry.reason.current_diff))
else:
ret.append('\n'.join(difflib.restore(\
entry.reason.current_diff.split('\n'), 1)))
diff --git a/src/lib/Bcfg2/Server/Plugins/Decisions.py b/src/lib/Bcfg2/Server/Plugins/Decisions.py
index b432474f2..90d9ecbe3 100644
--- a/src/lib/Bcfg2/Server/Plugins/Decisions.py
+++ b/src/lib/Bcfg2/Server/Plugins/Decisions.py
@@ -14,6 +14,8 @@ class DecisionFile(Bcfg2.Server.Plugin.SpecificData):
return [(x.get('type'), x.get('name')) for x in self.contents.xpath('.//Decision')]
class DecisionSet(Bcfg2.Server.Plugin.EntrySet):
+ basename_is_regex = True
+
def __init__(self, path, fam, encoding):
"""Container for decision specification files.
@@ -23,8 +25,7 @@ class DecisionSet(Bcfg2.Server.Plugin.EntrySet):
- `encoding`: XML character encoding
"""
- pattern = '(white|black)list'
- Bcfg2.Server.Plugin.EntrySet.__init__(self, pattern, path, \
+ Bcfg2.Server.Plugin.EntrySet.__init__(self, '(white|black)list', path,
DecisionFile, encoding)
try:
fam.AddMonitor(path, self)
diff --git a/src/lib/Bcfg2/Server/Plugins/Deps.py b/src/lib/Bcfg2/Server/Plugins/Deps.py
index 9b848baae..d3a1ee871 100644
--- a/src/lib/Bcfg2/Server/Plugins/Deps.py
+++ b/src/lib/Bcfg2/Server/Plugins/Deps.py
@@ -7,27 +7,10 @@ import Bcfg2.Server.Plugin
class DNode(Bcfg2.Server.Plugin.INode):
"""DNode provides supports for single predicate types for dependencies."""
- raw = {'Group': "lambda m, e:'%(name)s' in m.groups and predicate(m, e)"}
- containers = ['Group']
-
- def __init__(self, data, idict, parent=None):
- self.data = data
- self.contents = {}
- if parent == None:
- self.predicate = lambda x, d: True
- else:
- predicate = parent.predicate
- if data.tag in list(self.raw.keys()):
- self.predicate = eval(self.raw[data.tag] %
- {'name': data.get('name')},
- {'predicate': predicate})
- else:
- raise Exception
- mytype = self.__class__
- self.children = []
+ def _load_children(self, data, idict):
for item in data.getchildren():
if item.tag in self.containers:
- self.children.append(mytype(item, idict, self))
+ self.children.append(self.__class__(item, idict, self))
else:
data = [(child.tag, child.get('name'))
for child in item.getchildren()]
diff --git a/src/lib/Bcfg2/Server/Plugins/FileProbes.py b/src/lib/Bcfg2/Server/Plugins/FileProbes.py
index 5beec7be0..632d586e8 100644
--- a/src/lib/Bcfg2/Server/Plugins/FileProbes.py
+++ b/src/lib/Bcfg2/Server/Plugins/FileProbes.py
@@ -7,23 +7,24 @@ the client """
import os
import sys
import errno
-import binascii
import lxml.etree
import Bcfg2.Options
+import Bcfg2.Server
import Bcfg2.Server.Plugin
+from Bcfg2.Bcfg2Py3k import b64decode
probecode = """#!/usr/bin/env python
import os
import pwd
import grp
-import binascii
import lxml.etree
+from Bcfg2.Bcfg2Py3k import b64encode
path = "%s"
if not os.path.exists(path):
- print "%%s does not exist" %% path
+ print("%%s does not exist" %% path)
raise SystemExit(1)
stat = os.stat(path)
@@ -32,18 +33,10 @@ data = lxml.etree.Element("ProbedFileData",
owner=pwd.getpwuid(stat[4])[0],
group=grp.getgrgid(stat[5])[0],
perms=oct(stat[0] & 07777))
-data.text = binascii.b2a_base64(open(path).read())
-print lxml.etree.tostring(data)
+data.text = b64encode(open(path).read())
+print(lxml.etree.tostring(data, xml_declaration=False).decode('UTF-8'))
"""
-class FileProbesConfig(Bcfg2.Server.Plugin.SingleXMLFileBacked,
- Bcfg2.Server.Plugin.StructFile):
- """ Config file handler for FileProbes """
- def __init__(self, filename, fam):
- Bcfg2.Server.Plugin.SingleXMLFileBacked.__init__(self, filename, fam)
- Bcfg2.Server.Plugin.StructFile.__init__(self, filename)
-
-
class FileProbes(Bcfg2.Server.Plugin.Plugin,
Bcfg2.Server.Plugin.Probing):
""" This module allows you to probe a client for a file, which is then
@@ -53,14 +46,15 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin,
the client """
name = 'FileProbes'
- experimental = True
__author__ = 'chris.a.st.pierre@gmail.com'
def __init__(self, core, datastore):
Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
Bcfg2.Server.Plugin.Probing.__init__(self)
- self.config = FileProbesConfig(os.path.join(self.data, 'config.xml'),
- core.fam)
+ self.config = Bcfg2.Server.Plugin.StructFile(os.path.join(self.data,
+ 'config.xml'),
+ fam=core.fam,
+ should_monitor=True)
self.entries = dict()
self.probes = dict()
@@ -75,13 +69,9 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin,
# do not probe for files that are already in Cfg and
# for which update is false; we can't possibly do
# anything with the data we get from such a probe
- try:
- if (entry.get('update', 'false').lower() == "false" and
- cfg.entries[path].get_pertinent_entries(entry,
- metadata)):
- continue
- except (KeyError, Bcfg2.Server.Plugin.PluginExecutionError):
- pass
+ if (entry.get('update', 'false').lower() == "false" and
+ not cfg.has_generator(entry, metadata)):
+ continue
self.entries[metadata.hostname][path] = entry
probe = lxml.etree.Element('probe', name=path,
source=self.name,
@@ -102,7 +92,9 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin,
(data.get('name'), metadata.hostname))
else:
try:
- self.write_data(lxml.etree.XML(data.text), metadata)
+ self.write_data(lxml.etree.XML(data.text,
+ parser=Bcfg2.Server.XMLParser),
+ metadata)
except lxml.etree.XMLSyntaxError:
# if we didn't get XML back from the probe, assume
# it's an error message
@@ -111,23 +103,24 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin,
def write_data(self, data, metadata):
"""Write the probed file data to the bcfg2 specification."""
filename = data.get("name")
- contents = binascii.a2b_base64(data.text)
+ contents = b64decode(data.text)
entry = self.entries[metadata.hostname][filename]
cfg = self.core.plugins['Cfg']
specific = "%s.H_%s" % (os.path.basename(filename), metadata.hostname)
# we can't use os.path.join() for this because specific
# already has a leading /, which confuses os.path.join()
- fileloc = "%s%s" % (cfg.data, os.path.join(filename, specific))
+ fileloc = os.path.join(cfg.data,
+ os.path.join(filename, specific).lstrip("/"))
create = False
try:
cfg.entries[filename].bind_entry(entry, metadata)
- except Bcfg2.Server.Plugin.PluginExecutionError:
+ except (KeyError, Bcfg2.Server.Plugin.PluginExecutionError):
create = True
# get current entry data
if entry.text and entry.get("encoding") == "base64":
- entrydata = binascii.a2b_base64(entry.text)
+ entrydata = b64decode(entry.text)
else:
entrydata = entry.text
@@ -135,7 +128,7 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin,
self.logger.info("Writing new probed file %s" % fileloc)
self.write_file(fileloc, contents)
self.verify_file(filename, contents, metadata)
- infoxml = os.path.join("%s%s" % (cfg.data, filename), "info.xml")
+ infoxml = os.path.join(cfg.data, filename.lstrip("/"), "info.xml")
self.write_infoxml(infoxml, entry, data)
elif entrydata == contents:
self.debug_log("Existing %s contents match probed contents" %
@@ -194,7 +187,7 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin,
# get current entry data
if entry.get("encoding") == "base64":
- entrydata = binascii.a2b_base64(entry.text)
+ entrydata = b64decode(entry.text)
else:
entrydata = entry.text
if entrydata == contents:
@@ -206,8 +199,7 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin,
if os.path.exists(infoxml):
return
- self.logger.info("Writing info.xml at %s for %s" %
- (infoxml, data.get("name")))
+ self.logger.info("Writing %s for %s" % (infoxml, data.get("name")))
info = \
lxml.etree.Element("Info",
owner=data.get("owner",
@@ -222,8 +214,10 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin,
root = lxml.etree.Element("FileInfo")
root.append(info)
try:
- open(infoxml, "w").write(lxml.etree.tostring(root,
- pretty_print=True))
+ open(infoxml,
+ "w").write(lxml.etree.tostring(root,
+ xml_declaration=False,
+ pretty_print=True).decode('UTF-8'))
except IOError:
err = sys.exc_info()[1]
self.logger.error("Could not write %s: %s" % (fileloc, err))
diff --git a/src/lib/Bcfg2/Server/Plugins/GroupPatterns.py b/src/lib/Bcfg2/Server/Plugins/GroupPatterns.py
index 58b4d4afb..837f47279 100644
--- a/src/lib/Bcfg2/Server/Plugins/GroupPatterns.py
+++ b/src/lib/Bcfg2/Server/Plugins/GroupPatterns.py
@@ -1,6 +1,9 @@
+import os
import re
+import sys
import logging
import lxml.etree
+import Bcfg2.Server.Lint
import Bcfg2.Server.Plugin
class PackedDigitRange(object):
@@ -58,7 +61,7 @@ class PatternMap(object):
return self.groups
def process_re(self, name):
- match = self.re.match(name)
+ match = self.re.search(name)
if not match:
return None
ret = list()
@@ -70,17 +73,22 @@ class PatternMap(object):
ret.append(newg)
return ret
+ def __str__(self):
+ return "%s: %s %s" % (self.__class__.__name__, self.pattern,
+ self.groups)
+
-class PatternFile(Bcfg2.Server.Plugin.SingleXMLFileBacked):
+class PatternFile(Bcfg2.Server.Plugin.XMLFileBacked):
__identifier__ = None
- def __init__(self, filename, fam):
- Bcfg2.Server.Plugin.SingleXMLFileBacked.__init__(self, filename, fam)
+ def __init__(self, filename, fam=None):
+ Bcfg2.Server.Plugin.XMLFileBacked.__init__(self, filename, fam=fam,
+ should_monitor=True)
self.patterns = []
self.logger = logging.getLogger(self.__class__.__name__)
def Index(self):
- Bcfg2.Server.Plugin.SingleXMLFileBacked.Index(self)
+ Bcfg2.Server.Plugin.XMLFileBacked.Index(self)
self.patterns = []
for entry in self.xdata.xpath('//GroupPattern'):
try:
@@ -112,13 +120,42 @@ class PatternFile(Bcfg2.Server.Plugin.SingleXMLFileBacked):
class GroupPatterns(Bcfg2.Server.Plugin.Plugin,
Bcfg2.Server.Plugin.Connector):
name = "GroupPatterns"
- experimental = True
def __init__(self, core, datastore):
Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
Bcfg2.Server.Plugin.Connector.__init__(self)
- self.config = PatternFile(self.data + '/config.xml',
- core.fam)
+ self.config = PatternFile(os.path.join(self.data, 'config.xml'),
+ fam=core.fam)
def get_additional_groups(self, metadata):
return self.config.process_patterns(metadata.hostname)
+
+
+class GroupPatternsLint(Bcfg2.Server.Lint.ServerPlugin):
+ def Run(self):
+ """ run plugin """
+ cfg = self.core.plugins['GroupPatterns'].config
+ for entry in cfg.xdata.xpath('//GroupPattern'):
+ groups = [g.text for g in entry.findall('Group')]
+ self.check(entry, groups, ptype='NamePattern')
+ self.check(entry, groups, ptype='NameRange')
+
+ @classmethod
+ def Errors(cls):
+ return {"pattern-fails-to-initialize":"error"}
+
+ def check(self, entry, groups, ptype="NamePattern"):
+ if ptype == "NamePattern":
+ pmap = lambda p: PatternMap(p, None, groups)
+ else:
+ pmap = lambda p: PatternMap(None, p, groups)
+
+ for el in entry.findall(ptype):
+ pat = el.text
+ try:
+ pmap(pat)
+ except:
+ err = sys.exc_info()[1]
+ self.LintError("pattern-fails-to-initialize",
+ "Failed to initialize %s %s for %s: %s" %
+ (ptype, pat, entry.get('pattern'), err))
diff --git a/src/lib/Bcfg2/Server/Plugins/Hostbase.py b/src/lib/Bcfg2/Server/Plugins/Hostbase.py
index e9c1c1cff..69b019160 100644
--- a/src/lib/Bcfg2/Server/Plugins/Hostbase.py
+++ b/src/lib/Bcfg2/Server/Plugins/Hostbase.py
@@ -3,19 +3,24 @@ This file provides the Hostbase plugin.
It manages dns/dhcp/nis host information
"""
+from lxml.etree import Element, SubElement
import os
+import re
+from time import strftime
os.environ['DJANGO_SETTINGS_MODULE'] = 'Bcfg2.Server.Hostbase.settings'
-from lxml.etree import Element, SubElement
import Bcfg2.Server.Plugin
from Bcfg2.Server.Plugin import PluginExecutionError, PluginInitError
-from time import strftime
-from sets import Set
from django.template import Context, loader
from django.db import connection
-import re
# Compatibility imports
from Bcfg2.Bcfg2Py3k import StringIO
+try:
+ set
+except NameError:
+ # deprecated since python 2.6
+ from sets import Set as set
+
class Hostbase(Bcfg2.Server.Plugin.Plugin,
Bcfg2.Server.Plugin.Structure,
@@ -383,7 +388,7 @@ class Hostbase(Bcfg2.Server.Plugin.Plugin,
""")
hostbase = cursor.fetchall()
domains = [host[0].split(".", 1)[1] for host in hostbase]
- domains_set = Set(domains)
+ domains_set = set(domains)
domain_data = [(domain, domains.count(domain)) for domain in domains_set]
domain_data.sort()
@@ -393,7 +398,7 @@ class Hostbase(Bcfg2.Server.Plugin.Plugin,
ips = cursor.fetchall()
three_octets = [ip[0].rstrip('0123456789').rstrip('.') \
for ip in ips]
- three_octets_set = Set(three_octets)
+ three_octets_set = set(three_octets)
three_octets_data = [(octet, three_octets.count(octet)) \
for octet in three_octets_set]
three_octets_data.sort()
@@ -412,7 +417,7 @@ class Hostbase(Bcfg2.Server.Plugin.Plugin,
append_data.append((three_octet, tuple(tosort)))
two_octets = [ip.rstrip('0123456789').rstrip('.') for ip in three_octets]
- two_octets_set = Set(two_octets)
+ two_octets_set = set(two_octets)
two_octets_data = [(octet, two_octets.count(octet))
for octet in two_octets_set]
two_octets_data.sort()
@@ -446,7 +451,7 @@ class Hostbase(Bcfg2.Server.Plugin.Plugin,
else:
if appenddata[0] == ip[0]:
simple = False
- ips.append((appenddata[2], appenddata[0], Set(namelist),
+ ips.append((appenddata[2], appenddata[0], set(namelist),
cnamelist, simple, appenddata[1]))
appenddata = ip
simple = True
@@ -455,7 +460,7 @@ class Hostbase(Bcfg2.Server.Plugin.Plugin,
if ip[4]:
cnamelist.append(ip[4].split('.', 1)[0])
simple = False
- ips.append((appenddata[2], appenddata[0], Set(namelist),
+ ips.append((appenddata[2], appenddata[0], set(namelist),
cnamelist, simple, appenddata[1]))
context = Context({
'subnet': subnet[0],
diff --git a/src/lib/Bcfg2/Server/Plugins/Ldap.py b/src/lib/Bcfg2/Server/Plugins/Ldap.py
index 29abf5b13..9883085db 100644
--- a/src/lib/Bcfg2/Server/Plugins/Ldap.py
+++ b/src/lib/Bcfg2/Server/Plugins/Ldap.py
@@ -139,7 +139,7 @@ class LdapConnection(object):
result = self.conn.search_s(
query.base,
SCOPE_MAP[query.scope],
- query.filter,
+ query.filter.replace("\\", "\\\\"),
query.attrs,
)
break
diff --git a/src/lib/Bcfg2/Server/Plugins/Metadata.py b/src/lib/Bcfg2/Server/Plugins/Metadata.py
index 970126b80..a5fa78088 100644
--- a/src/lib/Bcfg2/Server/Plugins/Metadata.py
+++ b/src/lib/Bcfg2/Server/Plugins/Metadata.py
@@ -2,17 +2,39 @@
This file stores persistent metadata for the Bcfg2 Configuration Repository.
"""
-import copy
-import fcntl
-import lxml.etree
+import re
import os
-import os.path
-import socket
import sys
import time
-
-import Bcfg2.Server.FileMonitor
+import copy
+import fcntl
+import socket
+import lxml.etree
+import Bcfg2.Server
+import Bcfg2.Server.Lint
import Bcfg2.Server.Plugin
+import Bcfg2.Server.FileMonitor
+from Bcfg2.Bcfg2Py3k import MutableMapping
+from Bcfg2.version import Bcfg2VersionInfo
+
+try:
+ from django.db import models
+ has_django = True
+except ImportError:
+ has_django = False
+
+
+try:
+ all
+except NameError:
+ # some of the crazy lexical closure stuff below works with all()
+ # but not with this loop inline. i really don't understand
+ # lexical closures some^Wmost days
+ def all(iterable):
+ for element in iterable:
+ if not element:
+ return False
+ return True
def locked(fd):
@@ -24,28 +46,68 @@ def locked(fd):
return False
-class MetadataConsistencyError(Exception):
- """This error gets raised when metadata is internally inconsistent."""
- pass
+if has_django:
+ class MetadataClientModel(models.Model,
+ Bcfg2.Server.Plugin.PluginDatabaseModel):
+ hostname = models.CharField(max_length=255, primary_key=True)
+ version = models.CharField(max_length=31, null=True)
+ class ClientVersions(MutableMapping):
+ def __getitem__(self, key):
+ try:
+ return MetadataClientModel.objects.get(hostname=key).version
+ except MetadataClientModel.DoesNotExist:
+ raise KeyError(key)
+
+ def __setitem__(self, key, value):
+ client = MetadataClientModel.objects.get_or_create(hostname=key)[0]
+ client.version = value
+ client.save()
+
+ def __delitem__(self, key):
+ # UserDict didn't require __delitem__, but MutableMapping
+ # does. we don't want deleting a client version record to
+ # delete the client, so we just set the version to None,
+ # which is kinda like deleting it, but not really.
+ try:
+ client = MetadataClientModel.objects.get(hostname=key)
+ except MetadataClientModel.DoesNotExist:
+ raise KeyError(key)
+ client.version = None
+ client.save()
-class MetadataRuntimeError(Exception):
- """This error is raised when the metadata engine
- is called prior to reading enough data.
- """
- pass
+ def __len__(self):
+ return MetadataClientModel.objects.count()
+ def __iter__(self):
+ for client in MetadataClientModel.objects.all():
+ yield client.hostname
-class XMLMetadataConfig(Bcfg2.Server.Plugin.SingleXMLFileBacked):
+ def keys(self):
+ return [c.hostname for c in MetadataClientModel.objects.all()]
+
+ def __contains__(self, key):
+ try:
+ client = MetadataClientModel.objects.get(hostname=key)
+ return True
+ except MetadataClientModel.DoesNotExist:
+ return False
+
+
+class XMLMetadataConfig(Bcfg2.Server.Plugin.XMLFileBacked):
"""Handles xml config files and all XInclude statements"""
def __init__(self, metadata, watch_clients, basefile):
- Bcfg2.Server.Plugin.SingleXMLFileBacked.__init__(self,
- os.path.join(metadata.data,
- basefile),
- metadata.core.fam)
+ # we tell XMLFileBacked _not_ to add a monitor for this file,
+ # because the main Metadata plugin has already added one.
+ # then we immediately set should_monitor to the proper value,
+ # so that XInclude'd files get properly watched
+ fpath = os.path.join(metadata.data, basefile)
+ Bcfg2.Server.Plugin.XMLFileBacked.__init__(self, fpath,
+ fam=metadata.core.fam,
+ should_monitor=False)
+ self.should_monitor = watch_clients
self.metadata = metadata
self.basefile = basefile
- self.should_monitor = watch_clients
self.data = None
self.basedata = None
self.basedir = metadata.data
@@ -56,25 +118,22 @@ class XMLMetadataConfig(Bcfg2.Server.Plugin.SingleXMLFileBacked):
@property
def xdata(self):
if not self.data:
- raise MetadataRuntimeError
+ raise Bcfg2.Server.Plugin.MetadataRuntimeError("%s has no data" %
+ self.basefile)
return self.data
@property
def base_xdata(self):
if not self.basedata:
- raise MetadataRuntimeError
+ raise Bcfg2.Server.Plugin.MetadataRuntimeError("%s has no data" %
+ self.basefile)
return self.basedata
- def add_monitor(self, fpath, fname):
- """Add a fam monitor for an included file"""
- if self.should_monitor:
- self.metadata.core.fam.AddMonitor(fpath, self.metadata)
- self.extras.append(fname)
-
def load_xml(self):
"""Load changes from XML"""
try:
- xdata = lxml.etree.parse(os.path.join(self.basedir, self.basefile))
+ xdata = lxml.etree.parse(os.path.join(self.basedir, self.basefile),
+ parser=Bcfg2.Server.XMLParser)
except lxml.etree.XMLSyntaxError:
self.logger.error('Failed to parse %s' % self.basefile)
return
@@ -100,12 +159,14 @@ class XMLMetadataConfig(Bcfg2.Server.Plugin.SingleXMLFileBacked):
try:
datafile = open(tmpfile, 'w')
except IOError:
- e = sys.exc_info()[1]
- self.logger.error("Failed to write %s: %s" % (tmpfile, e))
- raise MetadataRuntimeError
+ msg = "Failed to write %s: %s" % (tmpfile, sys.exc_info()[1])
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.MetadataRuntimeError(msg)
# prep data
dataroot = xmltree.getroot()
- newcontents = lxml.etree.tostring(dataroot, pretty_print=True)
+ newcontents = lxml.etree.tostring(dataroot, xml_declaration=False,
+ pretty_print=True).decode('UTF-8')
+
fd = datafile.fileno()
while locked(fd) == True:
@@ -114,21 +175,24 @@ class XMLMetadataConfig(Bcfg2.Server.Plugin.SingleXMLFileBacked):
datafile.write(newcontents)
except:
fcntl.lockf(fd, fcntl.LOCK_UN)
- self.logger.error("Metadata: Failed to write new xml data to %s" %
- tmpfile, exc_info=1)
+ msg = "Metadata: Failed to write new xml data to %s: %s" % \
+ (tmpfile, sys.exc_info()[1])
+ self.logger.error(msg, exc_info=1)
os.unlink(tmpfile)
- raise MetadataRuntimeError
+ raise Bcfg2.Server.Plugin.MetadataRuntimeError(msg)
datafile.close()
-
# check if clients.xml is a symlink
if os.path.islink(fname):
fname = os.readlink(fname)
try:
os.rename(tmpfile, fname)
+
except:
- self.logger.error("Metadata: Failed to rename %s" % tmpfile)
- raise MetadataRuntimeError
+ msg = "Metadata: Failed to rename %s: %s" % (tmpfile,
+ sys.exc_info()[1])
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.MetadataRuntimeError(msg)
def find_xml_for_xpath(self, xpath):
"""Find and load xml file containing the xpath query"""
@@ -144,22 +208,26 @@ class XMLMetadataConfig(Bcfg2.Server.Plugin.SingleXMLFileBacked):
"""Try to find the data in included files"""
for included in self.extras:
try:
- xdata = lxml.etree.parse(os.path.join(self.basedir,
- included))
+ xdata = lxml.etree.parse(included,
+ parser=Bcfg2.Server.XMLParser)
cli = xdata.xpath(xpath)
if len(cli) > 0:
- return {'filename': os.path.join(self.basedir,
- included),
+ return {'filename': included,
'xmltree': xdata,
'xquery': cli}
except lxml.etree.XMLSyntaxError:
- self.logger.error('Failed to parse %s' % (included))
+ self.logger.error('Failed to parse %s' % included)
return {}
+ def add_monitor(self, fpath):
+ self.extras.append(fpath)
+ if self.fam and self.should_monitor:
+ self.fam.AddMonitor(fpath, self.metadata)
+
def HandleEvent(self, event):
"""Handle fam events"""
- filename = event.filename.split('/')[-1]
- if filename in self.extras:
+ filename = os.path.basename(event.filename)
+ if event.filename in self.extras:
if event.code2str() == 'exists':
return False
elif filename != self.basefile:
@@ -172,8 +240,8 @@ class XMLMetadataConfig(Bcfg2.Server.Plugin.SingleXMLFileBacked):
class ClientMetadata(object):
"""This object contains client metadata."""
- def __init__(self, client, profile, groups, bundles,
- aliases, addresses, categories, uuid, password, query):
+ def __init__(self, client, profile, groups, bundles, aliases, addresses,
+ categories, uuid, password, version, query):
self.hostname = client
self.profile = profile
self.bundles = bundles
@@ -184,6 +252,11 @@ class ClientMetadata(object):
self.uuid = uuid
self.password = password
self.connectors = []
+ self.version = version
+ try:
+ self.version_info = Bcfg2VersionInfo(version)
+ except:
+ self.version_info = None
self.query = query
def inGroup(self, group):
@@ -198,7 +271,8 @@ class ClientMetadata(object):
class MetadataQuery(object):
- def __init__(self, by_name, get_clients, by_groups, by_profiles, all_groups, all_groups_in_category):
+ def __init__(self, by_name, get_clients, by_groups, by_profiles,
+ all_groups, all_groups_in_category):
# resolver is set later
self.by_name = by_name
self.names_by_groups = by_groups
@@ -217,74 +291,125 @@ class MetadataQuery(object):
return [self.by_name(name) for name in self.all_clients()]
-class Metadata(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Metadata,
- Bcfg2.Server.Plugin.Statistics):
+class MetadataGroup(tuple):
+ def __new__(cls, name, bundles=None, category=None,
+ is_profile=False, is_public=False, is_private=False):
+ if bundles is None:
+ bundles = set()
+ return tuple.__new__(cls, (bundles, category))
+
+ def __init__(self, name, bundles=None, category=None,
+ is_profile=False, is_public=False, is_private=False):
+ if bundles is None:
+ bundles = set()
+ tuple.__init__(self)
+ self.name = name
+ self.bundles = bundles
+ self.category = category
+ self.is_profile = is_profile
+ self.is_public = is_public
+ self.is_private = is_private
+
+ def __str__(self):
+ return repr(self)
+
+ def __repr__(self):
+ return "%s %s (bundles=%s, category=%s)" % \
+ (self.__class__.__name__, self.name, self.bundles,
+ self.category)
+
+ def __hash__(self):
+ return hash(self.name)
+
+class Metadata(Bcfg2.Server.Plugin.Metadata,
+ Bcfg2.Server.Plugin.Statistics,
+ Bcfg2.Server.Plugin.DatabaseBacked):
"""This class contains data for bcfg2 server metadata."""
__author__ = 'bcfg-dev@mcs.anl.gov'
name = "Metadata"
sort_order = 500
def __init__(self, core, datastore, watch_clients=True):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
Bcfg2.Server.Plugin.Metadata.__init__(self)
- Bcfg2.Server.Plugin.Statistics.__init__(self)
- if watch_clients:
- try:
- core.fam.AddMonitor(os.path.join(self.data, "groups.xml"), self)
- core.fam.AddMonitor(os.path.join(self.data, "clients.xml"), self)
- except:
- print("Unable to add file monitor for groups.xml or clients.xml")
- raise Bcfg2.Server.Plugin.PluginInitError
-
- self.clients_xml = XMLMetadataConfig(self, watch_clients, 'clients.xml')
- self.groups_xml = XMLMetadataConfig(self, watch_clients, 'groups.xml')
- self.states = {}
- if watch_clients:
- self.states = {"groups.xml": False,
- "clients.xml": False}
- self.addresses = {}
+ Bcfg2.Server.Plugin.Statistics.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.DatabaseBacked.__init__(self, core, datastore)
+ self.watch_clients = watch_clients
+ self.states = dict()
+ self.extra = dict()
+ self.handlers = []
+ self._handle_file("groups.xml")
+ if (self._use_db and
+ os.path.exists(os.path.join(self.data, "clients.xml"))):
+ self.logger.warning("Metadata: database enabled but clients.xml"
+ "found, parsing in compatibility mode")
+ self._handle_file("clients.xml")
+ elif not self._use_db:
+ self._handle_file("clients.xml")
+
+ # mapping of clientname -> authtype
self.auth = dict()
- self.clients = {}
- self.aliases = {}
- self.groups = {}
- self.cgroups = {}
- self.public = []
- self.private = []
- self.profiles = []
- self.categories = {}
- self.bad_clients = {}
- self.uuid = {}
+ # list of clients required to have non-global password
self.secure = []
+ # list of floating clients
self.floating = []
+ # mapping of clientname -> password
self.passwords = {}
+ self.addresses = {}
+ self.raddresses = {}
+ # mapping of clientname -> [groups]
+ self.clientgroups = {}
+ # list of clients
+ self.clients = []
+ self.aliases = {}
+ self.raliases = {}
+ # mapping of groupname -> MetadataGroup object
+ self.groups = {}
+ # mappings of predicate -> MetadataGroup object
+ self.group_membership = dict()
+ self.negated_groups = dict()
+ # mapping of hostname -> version string
+ if self._use_db:
+ self.versions = ClientVersions()
+ else:
+ self.versions = dict()
+ self.uuid = {}
self.session_cache = {}
self.default = None
self.pdirty = False
- self.extra = {'groups.xml': [],
- 'clients.xml': []}
self.password = core.password
self.query = MetadataQuery(core.build_metadata,
- lambda: list(self.clients.keys()),
+ lambda: list(self.clients),
self.get_client_names_by_groups,
self.get_client_names_by_profiles,
self.get_all_group_names,
self.get_all_groups_in_category)
@classmethod
- def init_repo(cls, repo, groups, os_selection, clients):
- path = os.path.join(repo, cls.name)
- os.makedirs(path)
- open(os.path.join(repo, "Metadata", "groups.xml"),
- "w").write(groups % os_selection)
- open(os.path.join(repo, "Metadata", "clients.xml"),
- "w").write(clients % socket.getfqdn())
-
- def get_groups(self):
- '''return groups xml tree'''
- groups_tree = lxml.etree.parse(os.path.join(self.data, "groups.xml"))
- root = groups_tree.getroot()
- return root
+ def init_repo(cls, repo, **kwargs):
+ # must use super here; inheritance works funny with class methods
+ super(Metadata, cls).init_repo(repo)
+
+ for fname in ["clients.xml", "groups.xml"]:
+ aname = re.sub(r'[^A-z0-9_]', '_', fname)
+ if aname in kwargs:
+ open(os.path.join(repo, cls.name, fname),
+ "w").write(kwargs[aname])
+
+ def _handle_file(self, fname):
+ if self.watch_clients:
+ try:
+ self.core.fam.AddMonitor(os.path.join(self.data, fname), self)
+ except:
+ err = sys.exc_info()[1]
+ msg = "Unable to add file monitor for %s: %s" % (fname, err)
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginInitError(msg)
+ self.states[fname] = False
+ aname = re.sub(r'[^A-z0-9_]', '_', fname)
+ xmlcfg = XMLMetadataConfig(self, self.watch_clients, fname)
+ setattr(self, aname, xmlcfg)
+ self.handlers.append(xmlcfg.HandleEvent)
+ self.extra[fname] = []
def _search_xdata(self, tag, name, tree, alias=False):
for node in tree.findall("//%s" % tag):
@@ -312,7 +437,7 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
node = self._search_xdata(tag, name, config.xdata, alias=alias)
if node != None:
self.logger.error("%s \"%s\" already exists" % (tag, name))
- raise MetadataConsistencyError
+ raise Bcfg2.Server.Plugin.MetadataConsistencyError
element = lxml.etree.SubElement(config.base_xdata.getroot(),
tag, name=name)
if attribs:
@@ -322,70 +447,130 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
def add_group(self, group_name, attribs):
"""Add group to groups.xml."""
- return self._add_xdata(self.groups_xml, "Group", group_name,
- attribs=attribs)
+ if self._use_db:
+ msg = "Metadata does not support adding groups with use_database enabled"
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+ else:
+ return self._add_xdata(self.groups_xml, "Group", group_name,
+ attribs=attribs)
def add_bundle(self, bundle_name):
"""Add bundle to groups.xml."""
- return self._add_xdata(self.groups_xml, "Bundle", bundle_name)
+ if self._use_db:
+ msg = "Metadata does not support adding bundles with use_database enabled"
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+ else:
+ return self._add_xdata(self.groups_xml, "Bundle", bundle_name)
- def add_client(self, client_name, attribs):
+ def add_client(self, client_name, attribs=None):
"""Add client to clients.xml."""
- return self._add_xdata(self.clients_xml, "Client", client_name,
- attribs=attribs, alias=True)
+ if attribs is None:
+ attribs = dict()
+ if self._use_db:
+ client = MetadataClientModel(hostname=client_name)
+ client.save()
+ self.clients = self.list_clients()
+ return client
+ else:
+ return self._add_xdata(self.clients_xml, "Client", client_name,
+ attribs=attribs, alias=True)
def _update_xdata(self, config, tag, name, attribs, alias=False):
node = self._search_xdata(tag, name, config.xdata, alias=alias)
if node == None:
self.logger.error("%s \"%s\" does not exist" % (tag, name))
- raise MetadataConsistencyError
+ raise Bcfg2.Server.Plugin.MetadataConsistencyError
xdict = config.find_xml_for_xpath('.//%s[@name="%s"]' %
(tag, node.get('name')))
if not xdict:
self.logger.error("Unexpected error finding %s \"%s\"" %
(tag, name))
- raise MetadataConsistencyError
+ raise Bcfg2.Server.Plugin.MetadataConsistencyError
for key, val in list(attribs.items()):
xdict['xquery'][0].set(key, val)
config.write_xml(xdict['filename'], xdict['xmltree'])
def update_group(self, group_name, attribs):
"""Update a groups attributes."""
- return self._update_xdata(self.groups_xml, "Group", group_name, attribs)
+ if self._use_db:
+ msg = "Metadata does not support updating groups with use_database enabled"
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+ else:
+ return self._update_xdata(self.groups_xml, "Group", group_name,
+ attribs)
def update_client(self, client_name, attribs):
"""Update a clients attributes."""
- return self._update_xdata(self.clients_xml, "Client", client_name,
- attribs, alias=True)
+ if self._use_db:
+ msg = "Metadata does not support updating clients with use_database enabled"
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+ else:
+ return self._update_xdata(self.clients_xml, "Client", client_name,
+ attribs, alias=True)
+
+ def list_clients(self):
+ """ List all clients in client database """
+ if self._use_db:
+ return set([c.hostname for c in MetadataClientModel.objects.all()])
+ else:
+ return self.clients
def _remove_xdata(self, config, tag, name, alias=False):
node = self._search_xdata(tag, name, config.xdata)
if node == None:
self.logger.error("%s \"%s\" does not exist" % (tag, name))
- raise MetadataConsistencyError
+ raise Bcfg2.Server.Plugin.MetadataConsistencyError
xdict = config.find_xml_for_xpath('.//%s[@name="%s"]' %
(tag, node.get('name')))
if not xdict:
self.logger.error("Unexpected error finding %s \"%s\"" %
(tag, name))
- raise MetadataConsistencyError
+ raise Bcfg2.Server.Plugin.MetadataConsistencyError
xdict['xquery'][0].getparent().remove(xdict['xquery'][0])
- self.groups_xml.write_xml(xdict['filename'], xdict['xmltree'])
+ config.write_xml(xdict['filename'], xdict['xmltree'])
def remove_group(self, group_name):
"""Remove a group."""
- return self._remove_xdata(self.groups_xml, "Group", group_name)
+ if self._use_db:
+ msg = "Metadata does not support removing groups with use_database enabled"
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+ else:
+ return self._remove_xdata(self.groups_xml, "Group", group_name)
def remove_bundle(self, bundle_name):
"""Remove a bundle."""
- return self._remove_xdata(self.groups_xml, "Bundle", bundle_name)
+ if self._use_db:
+ msg = "Metadata does not support removing bundles with use_database enabled"
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+ else:
+ return self._remove_xdata(self.groups_xml, "Bundle", bundle_name)
+
+ def remove_client(self, client_name):
+ """Remove a bundle."""
+ if self._use_db:
+ try:
+ client = MetadataClientModel.objects.get(hostname=client_name)
+ except MetadataClientModel.DoesNotExist:
+ msg = "Client %s does not exist" % client_name
+ self.logger.warning(msg)
+ raise Bcfg2.Server.Plugin.MetadataConsistencyError(msg)
+ client.delete()
+ self.clients = self.list_clients()
+ else:
+ return self._remove_xdata(self.clients_xml, "Client", client_name)
def _handle_clients_xml_event(self, event):
xdata = self.clients_xml.xdata
- self.clients = {}
+ self.clients = []
+ self.clientgroups = {}
self.aliases = {}
self.raliases = {}
- self.bad_clients = {}
self.secure = []
self.floating = []
self.addresses = {}
@@ -406,12 +591,15 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
'cert+password')
if 'uuid' in client.attrib:
self.uuid[client.get('uuid')] = clname
- if client.get('secure', 'false') == 'true':
+ if client.get('secure', 'false').lower() == 'true':
self.secure.append(clname)
- if client.get('location', 'fixed') == 'floating':
+ if (client.get('location', 'fixed') == 'floating' or
+ client.get('floating', 'false').lower() == 'true'):
self.floating.append(clname)
if 'password' in client.attrib:
self.passwords[clname] = client.get('password')
+ if 'version' in client.attrib:
+ self.versions[clname] = client.get('version')
self.raliases[clname] = set()
for alias in client.findall('Alias'):
@@ -426,115 +614,199 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
if clname not in self.raddresses:
self.raddresses[clname] = set()
self.raddresses[clname].add(alias.get('address'))
- self.clients.update({clname: client.get('profile')})
+ self.clients.append(clname)
+ try:
+ self.clientgroups[clname].append(client.get('profile'))
+ except KeyError:
+ self.clientgroups[clname] = [client.get('profile')]
self.states['clients.xml'] = True
+ if self._use_db:
+ self.clients = self.list_clients()
def _handle_groups_xml_event(self, event):
- xdata = self.groups_xml.xdata
- self.public = []
- self.private = []
- self.profiles = []
self.groups = {}
- grouptmp = {}
- self.categories = {}
- groupseen = list()
- for group in xdata.xpath('//Groups/Group'):
- if group.get('name') not in groupseen:
- groupseen.append(group.get('name'))
+
+ # get_condition and aggregate_conditions must be separate
+ # functions in order to ensure that the scope is right for the
+ # closures they return
+ def get_condition(element):
+ negate = element.get('negate', 'false').lower() == 'true'
+ pname = element.get("name")
+ if element.tag == 'Group':
+ return lambda c, g, _: negate != (pname in g)
+ elif element.tag == 'Client':
+ return lambda c, g, _: negate != (pname == c)
+
+ def aggregate_conditions(conditions):
+ return lambda client, groups, cats: \
+ all(cond(client, groups, cats) for cond in conditions)
+
+ # first, we get a list of all of the groups declared in the
+ # file. we do this in two stages because the old way of
+ # parsing groups.xml didn't support nested groups; in the old
+ # way, only Group tags under a Groups tag counted as
+ # declarative. so we parse those first, and then parse the
+ # other Group tags if they haven't already been declared.
+ # this lets you set options on a group (e.g., public="false")
+ # at the top level and then just use the name elsewhere, which
+ # is the original behavior
+ for grp in self.groups_xml.xdata.xpath("//Groups/Group") + \
+ self.groups_xml.xdata.xpath("//Groups/Group//Group"):
+ if grp.get("name") in self.groups:
+ continue
+ self.groups[grp.get("name")] = \
+ MetadataGroup(grp.get("name"),
+ bundles=[b.get("name")
+ for b in grp.findall("Bundle")],
+ category=grp.get("category"),
+ is_profile=grp.get("profile", "false") == "true",
+ is_public=grp.get("public", "false") == "true",
+ is_private=grp.get("public", "true") == "false")
+ if grp.get('default', 'false') == 'true':
+ self.default = grp.get('name')
+
+ self.group_membership = dict()
+ self.negated_groups = dict()
+ self.options = dict()
+ # confusing loop condition; the XPath query asks for all
+ # elements under a Group tag under a Groups tag; that is
+ # infinitely recursive, so "all" elements really means _all_
+ # elements. We then manually filter out non-Group elements
+ # since there doesn't seem to be a way to get Group elements
+ # of arbitrary depth with particular ultimate ancestors in
+ # XPath. We do the same thing for Client tags.
+ for el in self.groups_xml.xdata.xpath("//Groups/Group//*") + \
+ self.groups_xml.xdata.xpath("//Groups/Client//*"):
+ if ((el.tag != 'Group' and el.tag != 'Client') or
+ el.getchildren()):
+ continue
+
+ conditions = []
+ for parent in el.iterancestors():
+ cond = get_condition(parent)
+ if cond:
+ conditions.append(cond)
+
+ gname = el.get("name")
+ if el.get("negate", "false").lower() == "true":
+ self.negated_groups[aggregate_conditions(conditions)] = \
+ self.groups[gname]
else:
- self.logger.error("Metadata: Group %s defined multiply" %
- group.get('name'))
- grouptmp[group.get('name')] = \
- ([item.get('name') for item in group.findall('./Bundle')],
- [item.get('name') for item in group.findall('./Group')])
- grouptmp[group.get('name')][1].append(group.get('name'))
- if group.get('default', 'false') == 'true':
- self.default = group.get('name')
- if group.get('profile', 'false') == 'true':
- self.profiles.append(group.get('name'))
- if group.get('public', 'false') == 'true':
- self.public.append(group.get('name'))
- elif group.get('public', 'true') == 'false':
- self.private.append(group.get('name'))
- if 'category' in group.attrib:
- self.categories[group.get('name')] = group.get('category')
-
- for group in grouptmp:
- # self.groups[group] => (bundles, groups, categories)
- self.groups[group] = (set(), set(), {})
- tocheck = [group]
- group_cat = self.groups[group][2]
- while tocheck:
- now = tocheck.pop()
- self.groups[group][1].add(now)
- if now in grouptmp:
- (bundles, groups) = grouptmp[now]
- for ggg in groups:
- if ggg in self.groups[group][1]:
- continue
- if (ggg not in self.categories or \
- self.categories[ggg] not in self.groups[group][2]):
- self.groups[group][1].add(ggg)
- tocheck.append(ggg)
- if ggg in self.categories:
- group_cat[self.categories[ggg]] = ggg
- elif ggg in self.categories:
- self.logger.info("Group %s: %s cat-suppressed %s" % \
- (group,
- group_cat[self.categories[ggg]],
- ggg))
- [self.groups[group][0].add(bund) for bund in bundles]
+ if self.groups[gname].category and gname in self.groups:
+ category = self.groups[gname].category
+
+ def in_cat(client, groups, categories):
+ if category in categories:
+ # this is debug, not warning, because it
+ # gets called a _lot_ -- every time a
+ # group in a category is processed for
+ # every creation of client metadata. this
+ # message is produced in two other places,
+ # so the user should get warned by one of
+ # those.
+ self.logger.debug("%s: Group %s suppressed by "
+ "category %s; %s already a "
+ "member of %s" %
+ (self.name, gname, category,
+ client, categories[category]))
+ return False
+ return True
+ conditions.append(in_cat)
+
+ self.group_membership[aggregate_conditions(conditions)] = \
+ self.groups[gname]
self.states['groups.xml'] = True
def HandleEvent(self, event):
"""Handle update events for data files."""
- if self.clients_xml.HandleEvent(event):
- self._handle_clients_xml_event(event)
- elif self.groups_xml.HandleEvent(event):
- self._handle_groups_xml_event(event)
-
- if False not in list(self.states.values()):
- # check that all client groups are real and complete
- real = list(self.groups.keys())
- for client in list(self.clients.keys()):
- if self.clients[client] not in self.profiles:
- self.logger.error("Client %s set as nonexistent or "
- "incomplete group %s" %
- (client, self.clients[client]))
- self.logger.error("Removing client mapping for %s" % client)
- self.bad_clients[client] = self.clients[client]
- del self.clients[client]
- for bclient in list(self.bad_clients.keys()):
- if self.bad_clients[bclient] in self.profiles:
- self.logger.info("Restored profile mapping for client %s" %
- bclient)
- self.clients[bclient] = self.bad_clients[bclient]
- del self.bad_clients[bclient]
-
- def set_profile(self, client, profile, addresspair):
+ for hdlr in self.handlers:
+ aname = re.sub(r'[^A-z0-9_]', '_', os.path.basename(event.filename))
+ if hdlr(event):
+ try:
+ proc = getattr(self, "_handle_%s_event" % aname)
+ except AttributeError:
+ proc = self._handle_default_event
+ proc(event)
+
+ if False not in list(self.states.values()) and self.debug_flag:
+ # check that all groups are real and complete. this is
+ # just logged at a debug level because many groups might
+ # be probed, and we don't want to warn about them.
+ for client, groups in list(self.clientgroups.items()):
+ for group in groups:
+ if group not in self.groups:
+ self.debug_log("Client %s set as nonexistent group %s" %
+ (client, group))
+ for gname, ginfo in list(self.groups.items()):
+ for group in ginfo.groups:
+ if group not in self.groups:
+ self.debug_log("Group %s set as nonexistent group %s" %
+ (gname, group))
+
+
+ def set_profile(self, client, profile, addresspair, force=False):
"""Set group parameter for provided client."""
- self.logger.info("Asserting client %s profile to %s" % (client, profile))
+ self.logger.info("Asserting client %s profile to %s" %
+ (client, profile))
if False in list(self.states.values()):
- raise MetadataRuntimeError
- if profile not in self.public:
- self.logger.error("Failed to set client %s to private group %s" %
- (client, profile))
- raise MetadataConsistencyError
+ raise Bcfg2.Server.Plugin.MetadataRuntimeError("Metadata has not been read yet")
+ if not force and profile not in self.groups:
+ msg = "Profile group %s does not exist" % profile
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.MetadataConsistencyError(msg)
+ group = self.groups[profile]
+ if not force and not group.is_public:
+ msg = "Cannot set client %s to private group %s" % (client, profile)
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.MetadataConsistencyError(msg)
+
if client in self.clients:
- self.logger.info("Changing %s group from %s to %s" %
- (client, self.clients[client], profile))
+ if self._use_db:
+ msg = "DBMetadata does not support asserting client profiles"
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+
+ profiles = [g for g in self.clientgroups[client]
+ if g in self.groups and self.groups[g].is_profile]
+ self.logger.info("Changing %s profile from %s to %s" %
+ (client, profiles, profile))
self.update_client(client, dict(profile=profile))
+ if client in self.clientgroups:
+ for p in profiles:
+ self.clientgroups[client].remove(p)
+ self.clientgroups[client].append(profile)
+ else:
+ self.clientgroups[client] = [profile]
else:
self.logger.info("Creating new client: %s, profile %s" %
(client, profile))
- if addresspair in self.session_cache:
- # we are working with a uuid'd client
- self.add_client(self.session_cache[addresspair][1],
- dict(uuid=client, profile=profile,
- address=addresspair[0]))
+ if self._use_db:
+ self.add_client(client)
else:
- self.add_client(client, dict(profile=profile))
- self.clients[client] = profile
+ if addresspair in self.session_cache:
+ # we are working with a uuid'd client
+ self.add_client(self.session_cache[addresspair][1],
+ dict(uuid=client, profile=profile,
+ address=addresspair[0]))
+ else:
+ self.add_client(client, dict(profile=profile))
+ self.clients.append(client)
+ self.clientgroups[client] = [profile]
+ if not self._use_db:
+ self.clients_xml.write()
+
+ def set_version(self, client, version):
+ """Set group parameter for provided client."""
+ self.logger.info("Setting client %s version to %s" % (client, version))
+ if client in self.clients:
+ self.logger.info("Setting version on client %s to %s" %
+ (client, version))
+ self.update_client(client, dict(version=version))
+ else:
+ msg = "Cannot set version on non-existent client %s" % client
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.MetadataConsistencyError(msg)
+ self.versions[client] = version
self.clients_xml.write()
def resolve_client(self, addresspair, cleanup_cache=False):
@@ -549,7 +821,7 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
# _any_ port numbers - perhaps a priority queue could
# be faster?
curtime = time.time()
- for addrpair in self.session_cache.keys():
+ for addrpair in list(self.session_cache.keys()):
if addresspair[0] == addrpair[0]:
(stamp, _) = self.session_cache[addrpair]
if curtime - stamp > cache_ttl:
@@ -565,9 +837,9 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
address = addresspair[0]
if address in self.addresses:
if len(self.addresses[address]) != 1:
- self.logger.error("Address %s has multiple reverse assignments; "
- "a uuid must be used" % (address))
- raise MetadataConsistencyError
+ err = "Address %s has multiple reverse assignments; a uuid must be used" % address
+ self.logger.error(err)
+ raise Bcfg2.Server.Plugin.MetadataConsistencyError(err)
return self.addresses[address][0]
try:
cname = socket.gethostbyaddr(address)[0].lower()
@@ -575,34 +847,102 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
return self.aliases[cname]
return cname
except socket.herror:
- warning = "address resolution error for %s" % (address)
+ warning = "address resolution error for %s" % address
self.logger.warning(warning)
- raise MetadataConsistencyError
+ raise Bcfg2.Server.Plugin.MetadataConsistencyError(warning)
+
+ def _merge_groups(self, client, groups, categories=None):
+ """ set group membership based on the contents of groups.xml
+ and initial group membership of this client. Returns a tuple
+ of (allgroups, categories)"""
+ numgroups = -1 # force one initial pass
+ if categories is None:
+ categories = dict()
+ while numgroups != len(groups):
+ numgroups = len(groups)
+ for predicate, group in self.group_membership.items():
+ if group.name in groups:
+ continue
+ if predicate(client, groups, categories):
+ groups.add(group.name)
+ if group.category:
+ categories[group.category] = group.name
+ for predicate, group in self.negated_groups.items():
+ if group.name not in groups:
+ continue
+ if predicate(client, groups, categories):
+ groups.remove(group.name)
+ if group.category:
+ del categories[group.category]
+ return (groups, categories)
def get_initial_metadata(self, client):
"""Return the metadata for a given client."""
if False in list(self.states.values()):
- raise MetadataRuntimeError
+ raise Bcfg2.Server.Plugin.MetadataRuntimeError("Metadata has not been read yet")
client = client.lower()
if client in self.aliases:
client = self.aliases[client]
- if client in self.clients:
- profile = self.clients[client]
- (bundles, groups, categories) = self.groups[profile]
- else:
- if self.default == None:
- self.logger.error("Cannot set group for client %s; "
- "no default group set" % client)
- raise MetadataConsistencyError
- self.set_profile(client, self.default, (None, None))
- profile = self.default
- [bundles, groups, categories] = self.groups[self.default]
+
+ groups = set()
+ categories = dict()
+ profile = None
+
+ if client not in self.clients:
+ pgroup = None
+ if client in self.clientgroups:
+ pgroup = self.clientgroups[client][0]
+ elif self.default:
+ pgroup = self.default
+
+ if pgroup:
+ self.set_profile(client, pgroup, (None, None), force=True)
+ groups.add(pgroup)
+ category = self.groups[pgroup].category
+ if category:
+ categories[category] = pgroup
+ if (pgroup in self.groups and self.groups[pgroup].is_profile):
+ profile = pgroup
+ else:
+ msg = "Cannot add new client %s; no default group set" % client
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.MetadataConsistencyError(msg)
+
+ if client in self.clientgroups:
+ for cgroup in self.clientgroups[client]:
+ if cgroup in groups:
+ continue
+ if cgroup not in self.groups:
+ self.groups[cgroup] = MetadataGroup(cgroup)
+ category = self.groups[cgroup].category
+ if category and category in categories:
+ self.logger.warning("%s: Group %s suppressed by "
+ "category %s; %s already a member "
+ "of %s" %
+ (self.name, cgroup, category,
+ client, categories[category]))
+ continue
+ if category:
+ categories[category] = cgroup
+ groups.add(cgroup)
+ # favor client groups for setting profile
+ if not profile and self.groups[cgroup].is_profile:
+ profile = cgroup
+
+ groups, categories = self._merge_groups(client, groups,
+ categories=categories)
+
+ bundles = set()
+ for group in groups:
+ try:
+ bundles.update(self.groups[group].bundles)
+ except KeyError:
+ self.logger.warning("%s: %s is a member of undefined group %s" %
+ (self.name, client, group))
+
aliases = self.raliases.get(client, set())
addresses = self.raddresses.get(client, set())
- newgroups = set(groups)
- newbundles = set(bundles)
- newcategories = {}
- newcategories.update(categories)
+ version = self.versions.get(client, None)
if client in self.passwords:
password = self.passwords[client]
else:
@@ -613,61 +953,70 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
uuid = uuids[0]
else:
uuid = None
- for group in self.cgroups.get(client, []):
- if group in self.groups:
- nbundles, ngroups, ncategories = self.groups[group]
- else:
- nbundles, ngroups, ncategories = ([], [group], {})
- [newbundles.add(b) for b in nbundles if b not in newbundles]
- [newgroups.add(g) for g in ngroups if g not in newgroups]
- newcategories.update(ncategories)
- return ClientMetadata(client, profile, newgroups, newbundles, aliases,
- addresses, newcategories, uuid, password,
+ if not profile:
+ # one last ditch attempt at setting the profile
+ profiles = [g for g in groups
+ if g in self.groups and self.groups[g].is_profile]
+ if len(profiles) >= 1:
+ profile = profiles[0]
+
+ return ClientMetadata(client, profile, groups, bundles, aliases,
+ addresses, categories, uuid, password, version,
self.query)
def get_all_group_names(self):
all_groups = set()
- [all_groups.update(g[1]) for g in list(self.groups.values())]
+ all_groups.update(self.groups.keys())
+ all_groups.update([g.name for g in self.group_membership.values()])
+ all_groups.update([g.name for g in self.negated_groups.values()])
+ for grp in self.clientgroups.values():
+ all_groups.update(grp)
return all_groups
def get_all_groups_in_category(self, category):
- all_groups = set()
- [all_groups.add(g) for g in self.categories \
- if self.categories[g] == category]
- return all_groups
+ return set([g.name for g in self.groups.values()
+ if g.category == category])
def get_client_names_by_profiles(self, profiles):
- return [client for client, profile in list(self.clients.items()) \
- if profile in profiles]
+ rv = []
+ for client in list(self.clients):
+ mdata = self.get_initial_metadata(client)
+ if mdata.profile in profiles:
+ rv.append(client)
+ return rv
def get_client_names_by_groups(self, groups):
mdata = [self.core.build_metadata(client)
- for client in list(self.clients.keys())]
+ for client in list(self.clients)]
return [md.hostname for md in mdata if md.groups.issuperset(groups)]
+ def get_client_names_by_bundles(self, bundles):
+ mdata = [self.core.build_metadata(client)
+ for client in list(self.clients.keys())]
+ return [md.hostname for md in mdata if md.bundles.issuperset(bundles)]
+
def merge_additional_groups(self, imd, groups):
for group in groups:
- if (group in self.categories and
- self.categories[group] in imd.categories):
+ if group in imd.groups:
continue
- newbundles, newgroups, _ = self.groups.get(group,
- (list(),
- [group],
- dict()))
- for newbundle in newbundles:
- if newbundle not in imd.bundles:
- imd.bundles.add(newbundle)
- for newgroup in newgroups:
- if newgroup not in imd.groups:
- if (newgroup in self.categories and
- self.categories[newgroup] in imd.categories):
- continue
- if newgroup in self.private:
- self.logger.error("Refusing to add dynamic membership "
- "in private group %s for client %s" %
- (newgroup, imd.hostname))
- continue
- imd.groups.add(newgroup)
+ if group in self.groups and self.groups[group].category:
+ category = self.groups[group].category
+ if self.groups[group].category in imd.categories:
+ self.logger.warning("%s: Group %s suppressed by category "
+ "%s; %s already a member of %s" %
+ (self.name, group, category,
+ imd.hostname,
+ imd.categories[category]))
+ continue
+ imd.categories[group] = category
+ imd.groups.add(group)
+
+ self._merge_groups(imd.hostname, imd.groups,
+ categories=imd.categories)
+
+ for group in imd.groups:
+ if group in self.groups:
+ imd.bundles.update(self.groups[group].bundles)
def merge_additional_data(self, imd, source, data):
if not hasattr(imd, source):
@@ -686,8 +1035,8 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
(client, address))
return True
else:
- self.logger.error("Got request for non-float client %s from %s" %
- (client, address))
+ self.logger.error("Got request for non-float client %s from %s"
+ % (client, address))
return False
resolved = self.resolve_client(addresspair)
if resolved.lower() == client.lower():
@@ -711,9 +1060,10 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
id_method = 'address'
try:
client = self.resolve_client(address)
- except MetadataConsistencyError:
- self.logger.error("Client %s failed to resolve; metadata problem"
- % address[0])
+ except Bcfg2.Server.Plugin.MetadataConsistencyError:
+ err = sys.exc_info()[1]
+ self.logger.error("Client %s failed to resolve: %s" %
+ (address[0], err))
return False
else:
id_method = 'uuid'
@@ -768,7 +1118,7 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
"secure mode" % address[0])
return False
# populate the session cache
- if user.decode('utf-8') != 'root':
+ if user != 'root':
self.session_cache[address] = (time.time(), client)
return True
@@ -792,7 +1142,8 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
def include_group(group):
return not only_client or group in clientmeta.groups
- groups_tree = lxml.etree.parse(os.path.join(self.data, "groups.xml"))
+ groups_tree = lxml.etree.parse(os.path.join(self.data, "groups.xml"),
+ parser=Bcfg2.Server.XMLParser)
try:
groups_tree.xinclude()
except lxml.etree.XIncludeError:
@@ -810,20 +1161,26 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
del categories[None]
if hosts:
instances = {}
- clients = self.clients
- for client, profile in list(clients.items()):
+ for client in list(self.clients):
if include_client(client):
continue
- if profile in instances:
- instances[profile].append(client)
+ if client in self.clientgroups:
+ groups = self.clientgroups[client]
+ elif self.default:
+ groups = [self.default]
else:
- instances[profile] = [client]
- for profile, clist in list(instances.items()):
+ continue
+ for group in groups:
+ try:
+ instances[group].append(client)
+ except KeyError:
+ instances[group] = [client]
+ for group, clist in list(instances.items()):
clist.sort()
viz_str.append('"%s-instances" [ label="%s", shape="record" ];' %
- (profile, '|'.join(clist)))
+ (group, '|'.join(clist)))
viz_str.append('"%s-instances" -> "group-%s";' %
- (profile, profile))
+ (group, group))
if bundles:
bundles = []
[bundles.append(bund.get('name')) \
@@ -864,3 +1221,35 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
viz_str.append('"%s" [label="%s", shape="record", style="filled", fillcolor="%s"];' %
(category, category, categories[category]))
return "\n".join("\t" + s for s in viz_str)
+
+
+class MetadataLint(Bcfg2.Server.Lint.ServerPlugin):
+ def Run(self):
+ self.nested_clients()
+ self.deprecated_options()
+
+ @classmethod
+ def Errors(cls):
+ return {"nested-client-tags": "warning",
+ "deprecated-clients-options": "warning"}
+
+ def deprecated_options(self):
+ clientdata = self.metadata.clients_xml.xdata
+ for el in groupdata.xpath("//Client"):
+ loc = el.get("location")
+ if loc:
+ if loc == "floating":
+ floating = True
+ else:
+ floating = False
+ self.LintError("deprecated-clients-options",
+ "The location='%s' option is deprecated. "
+ "Please use floating='%s' instead: %s" %
+ (loc, floating, self.RenderXML(el)))
+
+ def nested_clients(self):
+ groupdata = self.metadata.groups_xml.xdata
+ for el in groupdata.xpath("//Client//Client"):
+ self.LintError("nested-client-tags",
+ "Client %s nested within Client tag: %s" %
+ (el.get("name"), self.RenderXML(el)))
diff --git a/src/lib/Bcfg2/Server/Plugins/NagiosGen.py b/src/lib/Bcfg2/Server/Plugins/NagiosGen.py
index 4dbd57d16..f2b8336e0 100644
--- a/src/lib/Bcfg2/Server/Plugins/NagiosGen.py
+++ b/src/lib/Bcfg2/Server/Plugins/NagiosGen.py
@@ -7,18 +7,23 @@ import glob
import socket
import logging
import lxml.etree
-
+import Bcfg2.Server
import Bcfg2.Server.Plugin
LOGGER = logging.getLogger('Bcfg2.Plugins.NagiosGen')
line_fmt = '\t%-32s %s'
-class NagiosGenConfig(Bcfg2.Server.Plugin.SingleXMLFileBacked,
- Bcfg2.Server.Plugin.StructFile):
+class NagiosGenConfig(Bcfg2.Server.Plugin.StructFile):
def __init__(self, filename, fam):
- Bcfg2.Server.Plugin.SingleXMLFileBacked.__init__(self, filename, fam)
- Bcfg2.Server.Plugin.StructFile.__init__(self, filename)
+ # create config.xml if missing
+ if not os.path.exists(filename):
+ LOGGER.warning("NagiosGen: %s missing. "
+ "Creating empty one for you." % filename)
+ open(filename, "w").write("<NagiosGen></NagiosGen>")
+
+ Bcfg2.Server.Plugin.StructFile.__init__(self, filename, fam=fam,
+ should_monitor=True)
class NagiosGen(Bcfg2.Server.Plugin.Plugin,
@@ -51,7 +56,12 @@ class NagiosGen(Bcfg2.Server.Plugin.Plugin,
def createhostconfig(self, entry, metadata):
"""Build host specific configuration file."""
- host_address = socket.gethostbyname(metadata.hostname)
+ try:
+ host_address = socket.gethostbyname(metadata.hostname)
+ except socket.gaierror:
+ LOGGER.error("Failed to find IP address for %s" %
+ metadata.hostname)
+ raise Bcfg2.Server.Plugin.PluginExecutionError
host_groups = [grp for grp in metadata.groups
if os.path.isfile('%s/%s-group.cfg' % (self.data, grp))]
host_config = ['define host {',
@@ -84,7 +94,8 @@ class NagiosGen(Bcfg2.Server.Plugin.Plugin,
LOGGER.warn("Parsing deprecated NagiosGen/parents.xml. "
"Update to the new-style config with "
"nagiosgen-convert.py.")
- parents = lxml.etree.parse(pfile)
+ parents = lxml.etree.parse(pfile,
+ parser=Bcfg2.Server.XMLParser)
for el in parents.xpath("//Depend[@name='%s']" % metadata.hostname):
if 'parent' in xtra:
xtra['parent'] += "," + el.get("on")
diff --git a/src/lib/Bcfg2/Server/Plugins/Ohai.py b/src/lib/Bcfg2/Server/Plugins/Ohai.py
index 5fff20d98..20f9ba877 100644
--- a/src/lib/Bcfg2/Server/Plugins/Ohai.py
+++ b/src/lib/Bcfg2/Server/Plugins/Ohai.py
@@ -41,7 +41,7 @@ class OhaiCache(object):
# simply return if the client returned nothing
return
self.cache[item] = json.loads(value)
- file("%s/%s.json" % (self.dirname, item), 'w').write(value)
+ open("%s/%s.json" % (self.dirname, item), 'w').write(value)
def __getitem__(self, item):
if item not in self.cache:
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py b/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py
index 49e9d417b..685cd5c1d 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py
@@ -2,13 +2,15 @@ import re
import gzip
from Bcfg2.Server.Plugins.Packages.Collection import Collection
from Bcfg2.Server.Plugins.Packages.Source import Source
-from Bcfg2.Bcfg2Py3k import cPickle, file
+from Bcfg2.Bcfg2Py3k import cPickle
class AptCollection(Collection):
def get_group(self, group):
- self.logger.warning("Packages: Package groups are not supported by APT")
+ self.logger.warning("Packages: Package groups are not "
+ "supported by APT")
return []
+
class AptSource(Source):
basegroups = ['apt', 'debian', 'ubuntu', 'nexenta']
ptype = 'deb'
@@ -22,14 +24,15 @@ class AptSource(Source):
'components': self.components, 'arches': self.arches}]
def save_state(self):
- cache = file(self.cachefile, 'wb')
- cPickle.dump((self.pkgnames, self.deps, self.provides),
- cache, 2)
+ cache = open(self.cachefile, 'wb')
+ cPickle.dump((self.pkgnames, self.deps, self.provides,
+ self.essentialpkgs), cache, 2)
cache.close()
def load_state(self):
- data = file(self.cachefile)
- self.pkgnames, self.deps, self.provides = cPickle.load(data)
+ data = open(self.cachefile)
+ (self.pkgnames, self.deps, self.provides,
+ self.essentialpkgs) = cPickle.load(data)
def filter_unknown(self, unknown):
filtered = set([u for u in unknown if u.startswith('choice')])
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Collection.py b/src/lib/Bcfg2/Server/Plugins/Packages/Collection.py
index 3ea14ce75..b05a69d4a 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/Collection.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Collection.py
@@ -1,6 +1,7 @@
import sys
import copy
import logging
+import lxml
import Bcfg2.Server.Plugin
logger = logging.getLogger(__name__)
@@ -52,13 +53,40 @@ class Collection(Bcfg2.Server.Plugin.Debuggable):
@property
def cachekey(self):
- return md5(self.get_config()).hexdigest()
+ return md5(self.sourcelist()).hexdigest()
def get_config(self):
- self.logger.error("Packages: Cannot generate config for host with "
- "multiple source types (%s)" % self.metadata.hostname)
+ self.logger.error("Packages: Cannot generate config for host %s with "
+ "no sources or multiple source types" %
+ self.metadata.hostname)
return ""
+ def sourcelist(self):
+ srcs = []
+ for source in self.sources:
+ # get_urls() loads url_map as a side-effect
+ source.get_urls()
+ for url_map in source.url_map:
+ if url_map['arch'] not in metadata.groups:
+ continue
+ reponame = source.get_repo_name(url_map)
+ srcs.append("Name: %s" % reponame)
+ srcs.append(" Type: %s" % source.ptype)
+ if url_map['url']:
+ srcs.append(" URL: %s" % url_map['url'])
+ elif url_map['rawurl']:
+ srcs.append(" RAWURL: %s" % url_map['rawurl'])
+ if source.gpgkeys:
+ srcs.append(" GPG Key(s): %s" % ", ".join(source.gpgkeys))
+ else:
+ srcs.append(" GPG Key(s): None")
+ if len(source.blacklist):
+ srcs.append(" Blacklist: %s" % ", ".join(source.blacklist))
+ if len(source.whitelist):
+ srcs.append(" Whitelist: %s" % ", ".join(source.whitelist))
+ srcs.append("")
+ return "\n".join(srcs)
+
def get_relevant_groups(self):
groups = []
for source in self.sources:
@@ -79,6 +107,14 @@ class Collection(Bcfg2.Server.Plugin.Debuggable):
cachefiles.add(source.cachefile)
return list(cachefiles)
+ def get_groups(self, grouplist):
+ """ provided since some backends may be able to query multiple
+ groups at once faster than serially """
+ rv = dict()
+ for group, ptype in grouplist:
+ rv[group] = self.get_group(group, ptype)
+ return rv
+
def get_group(self, group, ptype=None):
for source in self.sources:
pkgs = source.get_group(self.metadata, group, ptype=ptype)
@@ -152,6 +188,28 @@ class Collection(Bcfg2.Server.Plugin.Debuggable):
""" do any collection-level data setup tasks """
pass
+ def packages_from_entry(self, entry):
+ """ given a Package or BoundPackage entry, get a list of the
+ package(s) described by it in a format appropriate for passing
+ to complete(). by default, that's just the name; only the Yum
+ backend supports getting versions"""
+ return [entry.get("name")]
+
+ def packages_to_entry(self, pkglist, entry):
+ for pkg in pkglist:
+ lxml.etree.SubElement(entry, 'BoundPackage', name=pkg,
+ version=self.setup.cfp.get("packages",
+ "version",
+ default="auto"),
+ type=self.ptype, origin='Packages')
+
+ def get_new_packages(self, initial, complete):
+ """ compute the difference between the complete package list
+ and the initial package list. this is necessary because the
+ format may be different between the two lists due to
+ packages_{to,from}_entry() """
+ return list(complete.difference(initial))
+
def complete(self, packagelist):
'''Build the transitive closure of all package dependencies
@@ -350,15 +408,7 @@ def factory(metadata, sources, basepath, debug=False):
",".join([s.__name__ for s in sclasses]))
cclass = Collection
elif len(sclasses) == 0:
- # you'd think this should be a warning, but it happens all the
- # freaking time if you have a) machines in your clients.xml
- # that do not have the proper groups set up yet (e.g., if you
- # have multiple Bcfg2 servers and Packages-relevant groups set
- # by probes); and b) templates that query all or multiple
- # machines (e.g., with metadata.query.all_clients())
- if debug:
- logger.error("Packages: No sources found for %s" %
- metadata.hostname)
+ logger.error("Packages: No sources found for %s" % metadata.hostname)
cclass = Collection
else:
cclass = get_collection_class(sclasses.pop().__name__.replace("Source",
@@ -373,4 +423,3 @@ def factory(metadata, sources, basepath, debug=False):
clients[metadata.hostname] = ckey
collections[ckey] = collection
return collection
-
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Pac.py b/src/lib/Bcfg2/Server/Plugins/Packages/Pac.py
index 99a090739..34c7b42c1 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/Pac.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Pac.py
@@ -1,6 +1,6 @@
import gzip
import tarfile
-from Bcfg2.Bcfg2Py3k import cPickle, file
+from Bcfg2.Bcfg2Py3k import cPickle
from Bcfg2.Server.Plugins.Packages.Collection import Collection
from Bcfg2.Server.Plugins.Packages.Source import Source
@@ -9,6 +9,7 @@ class PacCollection(Collection):
self.logger.warning("Packages: Package groups are not supported by Pacman")
return []
+
class PacSource(Source):
basegroups = ['arch', 'parabola']
ptype = 'pacman'
@@ -22,13 +23,13 @@ class PacSource(Source):
'components': self.components, 'arches': self.arches}]
def save_state(self):
- cache = file(self.cachefile, 'wb')
+ cache = open(self.cachefile, 'wb')
cPickle.dump((self.pkgnames, self.deps, self.provides),
cache, 2)
cache.close()
def load_state(self):
- data = file(self.cachefile)
+ data = open(self.cachefile)
self.pkgnames, self.deps, self.provides = cPickle.load(data)
def filter_unknown(self, unknown):
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py b/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py
index 7796b9e34..0d565be31 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py
@@ -4,17 +4,15 @@ import lxml.etree
import Bcfg2.Server.Plugin
from Bcfg2.Server.Plugins.Packages.Source import SourceInitError
-class PackagesSources(Bcfg2.Server.Plugin.SingleXMLFileBacked,
- Bcfg2.Server.Plugin.StructFile,
+class PackagesSources(Bcfg2.Server.Plugin.StructFile,
Bcfg2.Server.Plugin.Debuggable):
__identifier__ = None
def __init__(self, filename, cachepath, fam, packages, setup):
Bcfg2.Server.Plugin.Debuggable.__init__(self)
try:
- Bcfg2.Server.Plugin.SingleXMLFileBacked.__init__(self,
- filename,
- fam)
+ Bcfg2.Server.Plugin.StructFile.__init__(self, filename, fam=fam,
+ should_monitor=True)
except OSError:
err = sys.exc_info()[1]
msg = "Packages: Failed to read configuration file: %s" % err
@@ -22,7 +20,6 @@ class PackagesSources(Bcfg2.Server.Plugin.SingleXMLFileBacked,
msg += " Have you created it?"
self.logger.error(msg)
raise Bcfg2.Server.Plugin.PluginInitError(msg)
- Bcfg2.Server.Plugin.StructFile.__init__(self, filename)
self.cachepath = cachepath
self.setup = setup
if not os.path.exists(self.cachepath):
@@ -42,18 +39,11 @@ class PackagesSources(Bcfg2.Server.Plugin.SingleXMLFileBacked,
source.toggle_debug()
def HandleEvent(self, event=None):
- Bcfg2.Server.Plugin.SingleXMLFileBacked.HandleEvent(self, event=event)
+ Bcfg2.Server.Plugin.XMLFileBacked.HandleEvent(self, event=event)
if event and event.filename != self.name:
- for fname in self.extras:
- fpath = None
- if fname.startswith("/"):
- fpath = os.path.abspath(fname)
- else:
- fpath = \
- os.path.abspath(os.path.join(os.path.dirname(self.name),
- fname))
+ for fpath in self.extras:
if fpath == os.path.abspath(event.filename):
- self.parsed.add(fname)
+ self.parsed.add(fpath)
break
if self.loaded:
@@ -65,7 +55,7 @@ class PackagesSources(Bcfg2.Server.Plugin.SingleXMLFileBacked,
return sorted(list(self.parsed)) == sorted(self.extras)
def Index(self):
- Bcfg2.Server.Plugin.SingleXMLFileBacked.Index(self)
+ Bcfg2.Server.Plugin.XMLFileBacked.Index(self)
self.entries = []
for xsource in self.xdata.findall('.//Source'):
source = self.source_from_xml(xsource)
@@ -87,7 +77,8 @@ class PackagesSources(Bcfg2.Server.Plugin.SingleXMLFileBacked,
stype.title())
cls = getattr(module, "%sSource" % stype.title())
except (ImportError, AttributeError):
- self.logger.error("Packages: Unknown source type %s" % stype)
+ ex = sys.exc_info()[1]
+ self.logger.error("Packages: Unknown source type %s (%s)" % (stype, ex))
return None
try:
@@ -106,4 +97,7 @@ class PackagesSources(Bcfg2.Server.Plugin.SingleXMLFileBacked,
return "PackagesSources: %s" % repr(self.entries)
def __str__(self):
- return "PackagesSources: %s" % str(self.entries)
+ return "PackagesSources: %s sources" % len(self.entries)
+
+ def __len__(self):
+ return len(self.entries)
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Source.py b/src/lib/Bcfg2/Server/Plugins/Packages/Source.py
index edcdcd9f2..df3706fb1 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/Source.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Source.py
@@ -1,11 +1,10 @@
import os
import re
import sys
-import base64
import Bcfg2.Server.Plugin
from Bcfg2.Bcfg2Py3k import HTTPError, HTTPBasicAuthHandler, \
HTTPPasswordMgrWithDefaultRealm, install_opener, build_opener, \
- urlopen, file, cPickle
+ urlopen, cPickle
try:
from hashlib import md5
@@ -51,7 +50,18 @@ class Source(Bcfg2.Server.Plugin.Debuggable):
for key, tag in [('components', 'Component'), ('arches', 'Arch'),
('blacklist', 'Blacklist'),
('whitelist', 'Whitelist')]:
- self.__dict__[key] = [item.text for item in xsource.findall(tag)]
+ setattr(self, key, [item.text for item in xsource.findall(tag)])
+ self.server_options = dict()
+ self.client_options = dict()
+ opts = xsource.findall("Options")
+ for el in opts:
+ repoopts = dict([(k, v)
+ for k, v in el.attrib.items()
+ if k != "clientonly" and k != "serveronly"])
+ if el.get("clientonly", "false").lower() == "false":
+ self.server_options.update(repoopts)
+ if el.get("serveronly", "false").lower() == "false":
+ self.client_options.update(repoopts)
self.gpgkeys = [el.text for el in xsource.findall("GPGKey")]
@@ -137,9 +147,8 @@ class Source(Bcfg2.Server.Plugin.Debuggable):
def get_repo_name(self, url_map):
# try to find a sensible name for a repo
- if 'components' in url_map and url_map['components']:
- # use the first component as the name
- rname = url_map['components'][0]
+ if 'component' in url_map and url_map['component']:
+ rname = url_map['component']
else:
name = None
for repo_re in (self.mrepo_re,
@@ -149,12 +158,15 @@ class Source(Bcfg2.Server.Plugin.Debuggable):
if match:
name = match.group(1)
break
- if name is None:
- # couldn't figure out the name from the URL or URL map
- # (which probably means its a screwy URL), so we just
- # generate a random one
- name = base64.b64encode(os.urandom(16))[:-2]
- rname = "%s-%s" % (self.groups[0], name)
+ if name and self.groups:
+ rname = "%s-%s" % (self.groups[0], name)
+ elif self.groups:
+ rname = self.groups[0]
+ else:
+ # a global source with no reasonable name. just use
+ # the full url and let the regex below make it even
+ # uglier.
+ rname = url_map['url']
# see yum/__init__.py in the yum source, lines 441-449, for
# the source of this regex. yum doesn't like anything but
# string.ascii_letters, string.digits, and [-_.:]. There
@@ -169,6 +181,9 @@ class Source(Bcfg2.Server.Plugin.Debuggable):
else:
return self.__class__.__name__
+ def __repr__(self):
+ return str(self)
+
def get_urls(self):
return []
urls = property(get_urls)
@@ -182,6 +197,10 @@ class Source(Bcfg2.Server.Plugin.Debuggable):
if a in metadata.groups]
vdict = dict()
for agrp in agroups:
+ if agrp not in self.provides:
+ self.logger.warning("%s provides no packages for %s" %
+ (self, agrp))
+ continue
for key, value in list(self.provides[agrp].items()):
if key not in vdict:
vdict[key] = set(value)
@@ -213,7 +232,7 @@ class Source(Bcfg2.Server.Plugin.Debuggable):
fname = self.escape_url(url)
try:
data = fetch_url(url)
- file(fname, 'w').write(data)
+ open(fname, 'w').write(data)
except ValueError:
self.logger.error("Packages: Bad url string %s" % url)
raise
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py b/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py
index 53344e200..cba3373c1 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py
@@ -1,17 +1,14 @@
import os
+import re
import sys
-import time
import copy
-import glob
import socket
-import random
import logging
-import threading
import lxml.etree
-from UserDict import DictMixin
-from subprocess import Popen, PIPE, STDOUT
+from subprocess import Popen, PIPE
import Bcfg2.Server.Plugin
-from Bcfg2.Bcfg2Py3k import StringIO, cPickle, HTTPError, ConfigParser, file
+from Bcfg2.Bcfg2Py3k import StringIO, cPickle, HTTPError, URLError, \
+ ConfigParser
from Bcfg2.Server.Plugins.Packages.Collection import Collection
from Bcfg2.Server.Plugins.Packages.Source import SourceInitError, Source, \
fetch_url
@@ -96,19 +93,29 @@ class YumCollection(Collection):
if not os.path.exists(self.cachefile):
os.mkdir(self.cachefile)
- self.configdir = os.path.join(self.basepath, "yum")
- if not os.path.exists(self.configdir):
- os.mkdir(self.configdir)
- self.cfgfile = os.path.join(self.configdir,
- "%s-yum.conf" % self.cachekey)
+ self.cfgfile = os.path.join(self.cachefile, "yum.conf")
self.write_config()
if has_pulp and self.has_pulp_sources:
_setup_pulp(self.setup)
+ self._helper = None
+
@property
def helper(self):
- return self.setup.cfp.get("packages:yum", "helper",
- default="/usr/sbin/bcfg2-yum-helper")
+ try:
+ return self.setup.cfp.get("packages:yum", "helper")
+ except:
+ pass
+
+ if not self._helper:
+ # first see if bcfg2-yum-helper is in PATH
+ try:
+ Popen(['bcfg2-yum-helper'],
+ stdin=PIPE, stdout=PIPE, stderr=PIPE).wait()
+ self._helper = 'bcfg2-yum-helper'
+ except OSError:
+ self._helper = "/usr/sbin/bcfg2-yum-helper"
+ return self._helper
@property
def use_yum(self):
@@ -129,11 +136,21 @@ class YumCollection(Collection):
yumconf = self.get_config(raw=True)
yumconf.add_section("main")
- mainopts = dict(cachedir=self.cachefile,
+ # we set installroot to the cache directory so
+ # bcfg2-yum-helper works with an empty rpmdb. otherwise
+ # the rpmdb is so hopelessly intertwined with yum that we
+ # have to totally reinvent the dependency resolver.
+ mainopts = dict(cachedir='/',
+ installroot=self.cachefile,
keepcache="0",
- sslverify="0",
debuglevel="0",
+ sslverify="0",
reposdir="/dev/null")
+ if self.setup['debug']:
+ mainopts['debuglevel'] = "5"
+ elif self.setup['verbose']:
+ mainopts['debuglevel'] = "2"
+
try:
for opt in self.setup.cfp.options("packages:yum"):
if opt not in self.option_blacklist:
@@ -162,7 +179,7 @@ class YumCollection(Collection):
config.add_section(reponame)
added = True
except ConfigParser.DuplicateSectionError:
- match = re.match("-(\d)", reponame)
+ match = re.search("-(\d+)", reponame)
if match:
rid = int(match.group(1)) + 1
else:
@@ -186,6 +203,13 @@ class YumCollection(Collection):
config.set(reponame, "includepkgs",
" ".join(source.whitelist))
+ if raw:
+ opts = source.server_options
+ else:
+ opts = source.client_options
+ for opt, val in opts.items():
+ config.set(reponame, opt, val)
+
if raw:
return config
else:
@@ -346,6 +370,25 @@ class YumCollection(Collection):
# for API completeness
return self.call_helper("get_provides", package)
+ def get_groups(self, grouplist):
+ if not self.use_yum:
+ self.logger.warning("Packages: Package groups are not supported by "
+ "Bcfg2's internal Yum dependency generator")
+ return []
+
+ if not grouplist:
+ return dict()
+
+ gdicts = []
+ for group, ptype in grouplist:
+ if group.startswith("@"):
+ group = group[1:]
+ if not ptype:
+ ptype = "default"
+ gdicts.append(dict(group=group, type=ptype))
+
+ return self.call_helper("get_groups", gdicts)
+
def get_group(self, group, ptype="default"):
if not self.use_yum:
self.logger.warning("Packages: Package groups are not supported by "
@@ -355,32 +398,106 @@ class YumCollection(Collection):
if group.startswith("@"):
group = group[1:]
- pkgs = self.call_helper("get_group", dict(group=group, type=ptype))
- return pkgs
+ return self.call_helper("get_group", dict(group=group, type=ptype))
+
+ def packages_from_entry(self, entry):
+ rv = set()
+ name = entry.get("name")
+
+ def _tag_to_pkg(tag):
+ rv = (name, tag.get("arch"), tag.get("epoch"),
+ tag.get("version"), tag.get("release"))
+ # if a package requires no specific version, we just use
+ # the name, not the tuple. this limits the amount of JSON
+ # encoding/decoding that has to be done to pass the
+ # package list to bcfg2-yum-helper.
+ if rv[1:] == (None, None, None, None):
+ return name
+ else:
+ return rv
+
+ for inst in entry.getchildren():
+ if inst.tag != "Instance":
+ continue
+ rv.add(_tag_to_pkg(inst))
+ if not rv:
+ rv.add(_tag_to_pkg(entry))
+ return list(rv)
+
+ def packages_to_entry(self, pkglist, entry):
+ def _get_entry_attrs(pkgtup):
+ attrs = dict(version=self.setup.cfp.get("packages",
+ "version",
+ default="auto"))
+ if attrs['version'] == 'any':
+ return attrs
+
+ if pkgtup[1]:
+ attrs['arch'] = pkgtup[1]
+ if pkgtup[2]:
+ attrs['epoch'] = pkgtup[2]
+ if pkgtup[3]:
+ attrs['version'] = pkgtup[3]
+ if pkgtup[4]:
+ attrs['release'] = pkgtup[4]
+ return attrs
+
+ packages = dict()
+ for pkg in pkglist:
+ try:
+ packages[pkg[0]].append(pkg)
+ except KeyError:
+ packages[pkg[0]] = [pkg]
+ for name, instances in packages.items():
+ pkgattrs = dict(type=self.ptype,
+ origin='Packages',
+ name=name)
+ if len(instances) > 1:
+ pkg_el = lxml.etree.SubElement(entry, 'BoundPackage',
+ **pkgattrs)
+ for inst in instances:
+ lxml.etree.SubElement(pkg_el, "Instance",
+ _get_entry_attrs(inst))
+ else:
+ attrs = _get_entry_attrs(instances[0])
+ attrs.update(pkgattrs)
+ lxml.etree.SubElement(entry, 'BoundPackage', **attrs)
+
+ def get_new_packages(self, initial, complete):
+ initial_names = []
+ for pkg in initial:
+ if isinstance(pkg, tuple):
+ initial_names.append(pkg[0])
+ else:
+ initial_names.append(pkg)
+ new = []
+ for pkg in complete:
+ if pkg[0] not in initial_names:
+ new.append(pkg)
+ return new
def complete(self, packagelist):
if not self.use_yum:
return Collection.complete(self, packagelist)
- packages = set()
- unknown = set(packagelist)
-
- if unknown:
+ if packagelist:
result = \
self.call_helper("complete",
- dict(packages=list(unknown),
+ dict(packages=list(packagelist),
groups=list(self.get_relevant_groups())))
- if result and "packages" in result and "unknown" in result:
- # we stringify every package because it gets returned
- # in unicode; set.update() doesn't work if some
- # elements are unicode and other are strings. (I.e.,
- # u'foo' and 'foo' get treated as unique elements.)
- packages.update([str(p) for p in result['packages']])
- unknown = set([str(p) for p in result['unknown']])
-
+ if not result:
+ # some sort of error, reported by call_helper()
+ return set(), packagelist
+ # json doesn't understand sets or tuples, so we get back a
+ # lists of lists (packages) and a list of unicode strings
+ # (unknown). turn those into a set of tuples and a set of
+ # strings, respectively.
+ unknown = set([str(u) for u in result['unknown']])
+ packages = set([tuple(p) for p in result['packages']])
self.filter_unknown(unknown)
-
- return packages, unknown
+ return packages, unknown
+ else:
+ return set(), set()
def call_helper(self, command, input=None):
""" Make a call to bcfg2-yum-helper. The yum libs have
@@ -388,16 +505,12 @@ class YumCollection(Collection):
around that in long-running processes it to have a short-lived
helper. No, seriously -- check out the yum-updatesd code.
It's pure madness. """
- # it'd be nice if we could change this to be more verbose if
- # -v was given to bcfg2-server, but Collection objects don't
- # get the 'setup' variable, so we don't know how verbose
- # bcfg2-server is. It'd also be nice if we could tell yum to
- # log to syslog. So would a unicorn.
cmd = [self.helper, "-c", self.cfgfile]
- if self.debug_flag:
+ verbose = self.debug_flag or self.setup['verbose']
+ if verbose:
cmd.append("-v")
cmd.append(command)
- self.debug_log("Packages: running %s" % " ".join(cmd))
+ self.debug_log("Packages: running %s" % " ".join(cmd), flag=verbose)
try:
helper = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
except OSError:
@@ -415,9 +528,9 @@ class YumCollection(Collection):
if rv:
self.logger.error("Packages: error running bcfg2-yum-helper "
"(returned %d): %s" % (rv, stderr))
- elif self.debug_flag:
+ else:
self.debug_log("Packages: debug info from bcfg2-yum-helper: %s" %
- stderr)
+ stderr, flag=verbose)
try:
return json.loads(stdout)
except ValueError:
@@ -500,15 +613,14 @@ class YumSource(Source):
def save_state(self):
if not self.use_yum:
- cache = file(self.cachefile, 'wb')
+ cache = open(self.cachefile, 'wb')
cPickle.dump((self.packages, self.deps, self.provides,
self.filemap, self.url_map), cache, 2)
cache.close()
-
def load_state(self):
if not self.use_yum:
- data = file(self.cachefile)
+ data = open(self.cachefile)
(self.packages, self.deps, self.provides,
self.filemap, self.url_map) = cPickle.load(data)
@@ -520,7 +632,7 @@ class YumSource(Source):
usettings = [{'version':self.version, 'component':comp,
'arch':arch}
for comp in self.components]
- else: # rawurl given
+ else: # rawurl given
usettings = [{'version':self.version, 'component':None,
'arch':arch}]
@@ -546,6 +658,11 @@ class YumSource(Source):
except ValueError:
self.logger.error("Packages: Bad url string %s" % rmdurl)
return []
+ except URLError:
+ err = sys.exc_info()[1]
+ self.logger.error("Packages: Failed to fetch url %s. %s" %
+ (rmdurl, err))
+ return []
except HTTPError:
err = sys.exc_info()[1]
self.logger.error("Packages: Failed to fetch url %s. code=%s" %
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py b/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py
index d789a6d39..d3095300a 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py
@@ -11,14 +11,16 @@ from Bcfg2.Bcfg2Py3k import ConfigParser, urlopen
from Bcfg2.Server.Plugins.Packages import Collection
from Bcfg2.Server.Plugins.Packages.PackagesSources import PackagesSources
+yum_config_default = "/etc/yum.repos.d/bcfg2.repo"
+apt_config_default = "/etc/apt/sources.d/bcfg2"
+
class Packages(Bcfg2.Server.Plugin.Plugin,
Bcfg2.Server.Plugin.StructureValidator,
Bcfg2.Server.Plugin.Generator,
Bcfg2.Server.Plugin.Connector,
- Bcfg2.Server.Plugin.GoalValidator):
+ Bcfg2.Server.Plugin.ClientRunHooks):
name = 'Packages'
conflicts = ['Pkgmgr']
- experimental = True
__rmi__ = Bcfg2.Server.Plugin.Plugin.__rmi__ + ['Refresh', 'Reload']
def __init__(self, core, datastore):
@@ -26,11 +28,15 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
Bcfg2.Server.Plugin.StructureValidator.__init__(self)
Bcfg2.Server.Plugin.Generator.__init__(self)
Bcfg2.Server.Plugin.Connector.__init__(self)
- Bcfg2.Server.Plugin.Probing.__init__(self)
+ Bcfg2.Server.Plugin.ClientRunHooks.__init__(self)
self.sentinels = set()
- self.cachepath = os.path.join(self.data, 'cache')
- self.keypath = os.path.join(self.data, 'keys')
+ self.cachepath = \
+ self.core.setup.cfp.get("packages", "cache",
+ default=os.path.join(self.data, 'cache'))
+ self.keypath = \
+ self.core.setup.cfp.get("packages", "keycache",
+ default=os.path.join(self.data, 'keys'))
if not os.path.exists(self.keypath):
# create key directory if needed
os.makedirs(self.keypath)
@@ -40,11 +46,16 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
self.core.setup)
def toggle_debug(self):
- Bcfg2.Server.Plugin.Plugin.toggle_debug(self)
+ rv = Bcfg2.Server.Plugin.Plugin.toggle_debug(self)
self.sources.toggle_debug()
+ return rv
@property
def disableResolver(self):
+ if self.disableMetaData:
+ # disabling metadata without disabling the resolver Breaks
+ # Things
+ return True
try:
return not self.core.setup.cfp.getboolean("packages", "resolver")
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
@@ -87,16 +98,18 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
if entry.tag == 'Package':
collection = self._get_collection(metadata)
entry.set('version', self.core.setup.cfp.get("packages",
- "version",
- default="auto"))
+ "version",
+ default="auto"))
entry.set('type', collection.ptype)
elif entry.tag == 'Path':
- if (entry.get("name") == self.core.setup.cfp.get("packages",
- "yum_config",
- default="") or
- entry.get("name") == self.core.setup.cfp.get("packages",
- "apt_config",
- default="")):
+ if (entry.get("name") == \
+ self.core.setup.cfp.get("packages",
+ "yum_config",
+ default=yum_config_default) or
+ entry.get("name") == \
+ self.core.setup.cfp.get("packages",
+ "apt_config",
+ default=apt_config_default)):
self.create_config(entry, metadata)
def HandlesEntry(self, entry, metadata):
@@ -110,12 +123,14 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
return True
elif entry.tag == 'Path':
# managed entries for yum/apt configs
- if (entry.get("name") == self.core.setup.cfp.get("packages",
- "yum_config",
- default="") or
- entry.get("name") == self.core.setup.cfp.get("packages",
- "apt_config",
- default="")):
+ if (entry.get("name") == \
+ self.core.setup.cfp.get("packages",
+ "yum_config",
+ default=yum_config_default) or
+ entry.get("name") == \
+ self.core.setup.cfp.get("packages",
+ "apt_config",
+ default=apt_config_default)):
return True
return False
@@ -151,26 +166,24 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
# essential pkgs are those marked as such by the distribution
essential = collection.get_essential()
to_remove = []
+ groups = []
for struct in structures:
for pkg in struct.xpath('//Package | //BoundPackage'):
if pkg.get("name"):
- initial.add(pkg.get("name"))
+ initial.update(collection.packages_from_entry(pkg))
elif pkg.get("group"):
- try:
- if pkg.get("type"):
- gpkgs = collection.get_group(pkg.get("group"),
- ptype=pkg.get("type"))
- else:
- gpkgs = collection.get_group(pkg.get("group"))
- base.update(gpkgs)
- except TypeError:
- raise
- self.logger.error("Could not resolve group %s" %
- pkg.get("group"))
+ groups.append((pkg.get("group"),
+ pkg.get("type")))
to_remove.append(pkg)
else:
self.logger.error("Packages: Malformed Package: %s" %
- lxml.etree.tostring(pkg))
+ lxml.etree.tostring(pkg,
+ xml_declaration=False).decode('UTF-8'))
+
+ gpkgs = collection.get_groups(groups)
+ for group, pkgs in gpkgs.items():
+ base.update(pkgs)
+
base.update(initial | essential)
for el in to_remove:
el.getparent().remove(el)
@@ -179,16 +192,11 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
if unknown:
self.logger.info("Packages: Got %d unknown entries" % len(unknown))
self.logger.info("Packages: %s" % list(unknown))
- newpkgs = list(packages.difference(initial))
+ newpkgs = collection.get_new_packages(initial, packages)
self.debug_log("Packages: %d initial, %d complete, %d new" %
(len(initial), len(packages), len(newpkgs)))
newpkgs.sort()
- for pkg in newpkgs:
- lxml.etree.SubElement(independent, 'BoundPackage', name=pkg,
- version=self.core.setup.cfp.get("packages",
- "version",
- default="auto"),
- type=collection.ptype, origin='Packages')
+ collection.packages_to_entry(newpkgs, independent)
def Refresh(self):
'''Packages.Refresh() => True|False\nReload configuration
@@ -271,10 +279,11 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
collection = self._get_collection(metadata)
return dict(sources=collection.get_additional_data())
- def validate_goals(self, metadata, _):
- """ we abuse the GoalValidator plugin since validate_goals()
- is the very last thing called during a client config run. so
- we use this to clear the collection cache for this client,
- which must persist only the duration of a client run """
+ def end_client_run(self, metadata):
+ """ clear the collection cache for this client, which must
+ persist only the duration of a client run"""
if metadata.hostname in Collection.clients:
del Collection.clients[metadata.hostname]
+
+ def end_statistics(self, metadata):
+ self.end_client_run(metadata)
diff --git a/src/lib/Bcfg2/Server/Plugins/Pkgmgr.py b/src/lib/Bcfg2/Server/Plugins/Pkgmgr.py
index e9254cdcc..7dac907e1 100644
--- a/src/lib/Bcfg2/Server/Plugins/Pkgmgr.py
+++ b/src/lib/Bcfg2/Server/Plugins/Pkgmgr.py
@@ -1,12 +1,17 @@
'''This module implements a package management scheme for all images'''
-import logging
+import os
import re
+import glob
+import logging
+import lxml.etree
import Bcfg2.Server.Plugin
-import lxml
+import Bcfg2.Server.Lint
+
try:
set
except NameError:
+ # deprecated since python 2.6
from sets import Set as set
logger = logging.getLogger('Bcfg2.Plugins.Pkgmgr')
@@ -24,12 +29,14 @@ class FuzzyDict(dict):
print("got non-string key %s" % str(key))
return dict.__getitem__(self, key)
- def has_key(self, key):
+ def __contains__(self, key):
if isinstance(key, str):
mdata = self.fuzzy.match(key)
- if self.fuzzy.match(key):
- return dict.has_key(self, mdata.groupdict()['name'])
- return dict.has_key(self, key)
+ if mdata:
+ return dict.__contains__(self, mdata.groupdict()['name'])
+ else:
+ print("got non-string key %s" % str(key))
+ return dict.__contains__(self, key)
def get(self, key, default=None):
try:
@@ -167,3 +174,40 @@ class Pkgmgr(Bcfg2.Server.Plugin.PrioDir):
def HandleEntry(self, entry, metadata):
self.BindEntry(entry, metadata)
+
+
+class PkgmgrLint(Bcfg2.Server.Lint.ServerlessPlugin):
+ """ find duplicate Pkgmgr entries with the same priority """
+ def Run(self):
+ pset = set()
+ for pfile in glob.glob(os.path.join(self.config['repo'], 'Pkgmgr',
+ '*.xml')):
+ if self.HandlesFile(pfile):
+ xdata = lxml.etree.parse(pfile).getroot()
+ # get priority, type, group
+ priority = xdata.get('priority')
+ ptype = xdata.get('type')
+ for pkg in xdata.xpath("//Package"):
+ if pkg.getparent().tag == 'Group':
+ grp = pkg.getparent().get('name')
+ if (type(grp) is not str and
+ grp.getparent().tag == 'Group'):
+ pgrp = grp.getparent().get('name')
+ else:
+ pgrp = 'none'
+ else:
+ grp = 'none'
+ pgrp = 'none'
+ ptuple = (pkg.get('name'), priority, ptype, grp, pgrp)
+ # check if package is already listed with same
+ # priority, type, grp
+ if ptuple in pset:
+ self.LintError("duplicate-package",
+ "Duplicate Package %s, priority:%s, type:%s" %
+ (pkg.get('name'), priority, ptype))
+ else:
+ pset.add(ptuple)
+
+ @classmethod
+ def Errors(cls):
+ return {"duplicate-packages":"error"}
diff --git a/src/lib/Bcfg2/Server/Plugins/Probes.py b/src/lib/Bcfg2/Server/Plugins/Probes.py
index af908eee8..7f300ebe0 100644
--- a/src/lib/Bcfg2/Server/Plugins/Probes.py
+++ b/src/lib/Bcfg2/Server/Plugins/Probes.py
@@ -1,7 +1,17 @@
+import re
+import os
+import sys
import time
-import lxml.etree
import operator
-import re
+import lxml.etree
+import Bcfg2.Server
+import Bcfg2.Server.Plugin
+
+try:
+ from django.db import models
+ has_django = True
+except ImportError:
+ has_django = False
try:
import json
@@ -14,23 +24,36 @@ except ImportError:
has_json = False
try:
- import syck
- has_syck = True
+ import syck as yaml
+ has_yaml = True
+ yaml_error = syck.error
except ImportError:
- has_syck = False
try:
import yaml
+ yaml_error = yaml.YAMLError
has_yaml = True
except ImportError:
has_yaml = False
import Bcfg2.Server.Plugin
-specific_probe_matcher = re.compile("(.*/)?(?P<basename>\S+)(.(?P<mode>[GH](\d\d)?)_\S+)")
-probe_matcher = re.compile("(.*/)?(?P<basename>\S+)")
+if has_django:
+ class ProbesDataModel(models.Model,
+ Bcfg2.Server.Plugin.PluginDatabaseModel):
+ hostname = models.CharField(max_length=255)
+ probe = models.CharField(max_length=255)
+ timestamp = models.DateTimeField(auto_now=True)
+ data = models.TextField(null=True)
+
+ class ProbesGroupsModel(models.Model,
+ Bcfg2.Server.Plugin.PluginDatabaseModel):
+ hostname = models.CharField(max_length=255)
+ group = models.CharField(max_length=255)
+
class ClientProbeDataSet(dict):
- """ dict of probe => [probe data] that records a for each host """
+ """ dict of probe => [probe data] that records a timestamp for
+ each host """
def __init__(self, *args, **kwargs):
if "timestamp" in kwargs and kwargs['timestamp'] is not None:
self.timestamp = kwargs.pop("timestamp")
@@ -39,61 +62,31 @@ class ClientProbeDataSet(dict):
dict.__init__(self, *args, **kwargs)
-class ProbeData(object):
- """ a ProbeData object emulates a str object, but also has .xdata
- and .json properties to provide convenient ways to use ProbeData
- objects as XML or JSON data """
+class ProbeData(str):
+ """ a ProbeData object emulates a str object, but also has .xdata,
+ .json, and .yaml properties to provide convenient ways to use
+ ProbeData objects as XML, JSON, or YAML data """
+ def __new__(cls, data):
+ return str.__new__(cls, data)
+
def __init__(self, data):
- self.data = data
+ str.__init__(self)
self._xdata = None
self._json = None
self._yaml = None
- def __str__(self):
- return str(self.data)
-
- def __repr__(self):
- return repr(self.data)
-
- def __getattr__(self, name):
- """ make ProbeData act like a str object """
- return getattr(self.data, name)
-
- def __complex__(self):
- return complex(self.data)
-
- def __int__(self):
- return int(self.data)
-
- def __long__(self):
- return long(self.data)
-
- def __float__(self):
- return float(self.data)
-
- def __eq__(self, other):
- return str(self) == str(other)
-
- def __ne__(self, other):
- return str(self) != str(other)
-
- def __gt__(self, other):
- return str(self) > str(other)
-
- def __lt__(self, other):
- return str(self) < str(other)
-
- def __ge__(self, other):
- return self > other or self == other
-
- def __le__(self, other):
- return self < other or self == other
-
+ @property
+ def data(self):
+ """ provide backwards compatibility with broken ProbeData
+ object in bcfg2 1.2.0 thru 1.2.2 """
+ return str(self)
+
@property
def xdata(self):
if self._xdata is None:
try:
- self._xdata = lxml.etree.XML(self.data)
+ self._xdata = lxml.etree.XML(self.data,
+ parser=Bcfg2.Server.XMLParser)
except lxml.etree.XMLSyntaxError:
pass
return self._xdata
@@ -109,44 +102,30 @@ class ProbeData(object):
@property
def yaml(self):
- if self._yaml is None:
- if has_yaml:
- try:
- self._yaml = yaml.load(self.data)
- except yaml.YAMLError:
- pass
- elif has_syck:
- try:
- self._yaml = syck.load(self.data)
- except syck.error:
- pass
+ if self._yaml is None and has_yaml:
+ try:
+ self._yaml = yaml.load(self.data)
+ except yaml_error:
+ pass
return self._yaml
class ProbeSet(Bcfg2.Server.Plugin.EntrySet):
ignore = re.compile("^(\.#.*|.*~|\\..*\\.(tmp|sw[px])|probed\\.xml)$")
+ probename = re.compile("(.*/)?(?P<basename>\S+?)(\.(?P<mode>(?:G\d\d)|H)_\S+)?$")
+ bangline = re.compile('^#!\s*(?P<interpreter>.*)$')
+ basename_is_regex = True
def __init__(self, path, fam, encoding, plugin_name):
- fpattern = '[0-9A-Za-z_\-]+'
self.plugin_name = plugin_name
- Bcfg2.Server.Plugin.EntrySet.__init__(self, fpattern, path,
+ Bcfg2.Server.Plugin.EntrySet.__init__(self, '[0-9A-Za-z_\-]+', path,
Bcfg2.Server.Plugin.SpecificData,
encoding)
fam.AddMonitor(path, self)
- self.bangline = re.compile('^#!(?P<interpreter>.*)$')
def HandleEvent(self, event):
- if event.filename != self.path:
- if (event.code2str == 'changed' and
- event.filename.endswith("probed.xml") and
- event.filename not in self.entries):
- # for some reason, probed.xml is particularly prone to
- # getting changed events before created events,
- # because gamin is the worst ever. anyhow, we
- # specifically handle it here to avoid a warning on
- # every single server startup.
- self.entry_init(event)
- return
+ if (event.filename != self.path and
+ not event.filename.endswith("probed.xml")):
return self.handle_event(event)
def get_probe_data(self, metadata):
@@ -155,9 +134,7 @@ class ProbeSet(Bcfg2.Server.Plugin.EntrySet):
candidates = self.get_matching(metadata)
candidates.sort(key=operator.attrgetter('specific'))
for entry in candidates:
- rem = specific_probe_matcher.match(entry.name)
- if not rem:
- rem = probe_matcher.match(entry.name)
+ rem = self.probename.match(entry.name)
pname = rem.group('basename')
if pname not in build:
build[pname] = entry
@@ -176,30 +153,37 @@ class ProbeSet(Bcfg2.Server.Plugin.EntrySet):
return ret
-class Probes(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Probing,
- Bcfg2.Server.Plugin.Connector):
+class Probes(Bcfg2.Server.Plugin.Probing,
+ Bcfg2.Server.Plugin.Connector,
+ Bcfg2.Server.Plugin.DatabaseBacked):
"""A plugin to gather information from a client machine."""
name = 'Probes'
__author__ = 'bcfg-dev@mcs.anl.gov'
def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
Bcfg2.Server.Plugin.Connector.__init__(self)
Bcfg2.Server.Plugin.Probing.__init__(self)
+ Bcfg2.Server.Plugin.DatabaseBacked.__init__(self, core, datastore)
try:
self.probes = ProbeSet(self.data, core.fam, core.encoding,
self.name)
except:
- raise Bcfg2.Server.Plugin.PluginInitError
+ err = sys.exc_info()[1]
+ raise Bcfg2.Server.Plugin.PluginInitError(err)
self.probedata = dict()
self.cgroups = dict()
self.load_data()
- def write_data(self):
+ def write_data(self, client):
"""Write probe data out for use with bcfg2-info."""
+ if self._use_db:
+ return self._write_data_db(client)
+ else:
+ return self._write_data_xml(client)
+
+ def _write_data_xml(self, _):
top = lxml.etree.Element("Probed")
for client, probed in sorted(self.probedata.items()):
cx = lxml.etree.SubElement(top, 'Client', name=client,
@@ -209,20 +193,47 @@ class Probes(Bcfg2.Server.Plugin.Plugin,
value=str(self.probedata[client][probe]))
for group in sorted(self.cgroups[client]):
lxml.etree.SubElement(cx, "Group", name=group)
- data = lxml.etree.tostring(top, encoding='UTF-8',
- xml_declaration=True,
- pretty_print='true')
try:
- datafile = open("%s/%s" % (self.data, 'probed.xml'), 'w')
+ datafile = open(os.path.join(self.data, 'probed.xml'), 'w')
+ datafile.write(lxml.etree.tostring(top, xml_declaration=False,
+ pretty_print='true').decode('UTF-8'))
except IOError:
- self.logger.error("Failed to write probed.xml")
- datafile.write(data.decode('utf-8'))
+ err = sys.exc_info()[1]
+ self.logger.error("Failed to write probed.xml: %s" % err)
+
+ def _write_data_db(self, client):
+ for probe, data in self.probedata[client.hostname].items():
+ pdata = \
+ ProbesDataModel.objects.get_or_create(hostname=client.hostname,
+ probe=probe)[0]
+ if pdata.data != data:
+ pdata.data = data
+ pdata.save()
+ ProbesDataModel.objects.filter(hostname=client.hostname).exclude(probe__in=self.probedata[client.hostname]).delete()
+
+ for group in self.cgroups[client.hostname]:
+ try:
+ ProbesGroupsModel.objects.get(hostname=client.hostname,
+ group=group)
+ except ProbesGroupsModel.DoesNotExist:
+ grp = ProbesGroupsModel(hostname=client.hostname,
+ group=group)
+ grp.save()
+ ProbesGroupsModel.objects.filter(hostname=client.hostname).exclude(group__in=self.cgroups[client.hostname]).delete()
def load_data(self):
+ if self._use_db:
+ return self._load_data_db()
+ else:
+ return self._load_data_xml()
+
+ def _load_data_xml(self):
try:
- data = lxml.etree.parse(self.data + '/probed.xml').getroot()
+ data = lxml.etree.parse(os.path.join(self.data, 'probed.xml'),
+ parser=Bcfg2.Server.XMLParser).getroot()
except:
- self.logger.error("Failed to read file probed.xml")
+ err = sys.exc_info()[1]
+ self.logger.error("Failed to read file probed.xml: %s" % err)
return
self.probedata = {}
self.cgroups = {}
@@ -231,12 +242,25 @@ class Probes(Bcfg2.Server.Plugin.Plugin,
ClientProbeDataSet(timestamp=client.get("timestamp"))
self.cgroups[client.get('name')] = []
for pdata in client:
- if (pdata.tag == 'Probe'):
+ if pdata.tag == 'Probe':
self.probedata[client.get('name')][pdata.get('name')] = \
- ProbeData(pdata.get('value'))
- elif (pdata.tag == 'Group'):
+ ProbeData(pdata.get("value"))
+ elif pdata.tag == 'Group':
self.cgroups[client.get('name')].append(pdata.get('name'))
+ def _load_data_db(self):
+ self.probedata = {}
+ self.cgroups = {}
+ for pdata in ProbesDataModel.objects.all():
+ if pdata.hostname not in self.probedata:
+ self.probedata[pdata.hostname] = \
+ ClientProbeDataSet(timestamp=time.mktime(pdata.timestamp.timetuple()))
+ self.probedata[pdata.hostname][pdata.probe] = ProbeData(pdata.data)
+ for pgroup in ProbesGroupsModel.objects.all():
+ if pgroup.hostname not in self.cgroups:
+ self.cgroups[pgroup.hostname] = []
+ self.cgroups[pgroup.hostname].append(pgroup.group)
+
def GetProbes(self, meta, force=False):
"""Return a set of probes for execution on client."""
return self.probes.get_probe_data(meta)
@@ -246,25 +270,24 @@ class Probes(Bcfg2.Server.Plugin.Plugin,
self.probedata[client.hostname] = ClientProbeDataSet()
for data in datalist:
self.ReceiveDataItem(client, data)
- self.write_data()
+ self.write_data(client)
def ReceiveDataItem(self, client, data):
"""Receive probe results pertaining to client."""
if client.hostname not in self.cgroups:
self.cgroups[client.hostname] = []
+ if client.hostname not in self.probedata:
+ self.probedata[client.hostname] = ClientProbeDataSet()
if data.text == None:
- self.logger.error("Got null response to probe %s from %s" % \
- (data.get('name'), client.hostname))
- try:
- self.probedata[client.hostname].update({data.get('name'):
+ self.logger.info("Got null response to probe %s from %s" %
+ (data.get('name'), client.hostname))
+ self.probedata[client.hostname].update({data.get('name'):
ProbeData('')})
- except KeyError:
- self.probedata[client.hostname] = \
- ClientProbeDataSet([(data.get('name'), ProbeData(''))])
return
dlines = data.text.split('\n')
- self.logger.debug("%s:probe:%s:%s" % (client.hostname,
- data.get('name'), [line.strip() for line in dlines]))
+ self.logger.debug("%s:probe:%s:%s" %
+ (client.hostname, data.get('name'),
+ [line.strip() for line in dlines]))
for line in dlines[:]:
if line.split(':')[0] == 'group':
newgroup = line.split(':')[1].strip()
@@ -272,11 +295,7 @@ class Probes(Bcfg2.Server.Plugin.Plugin,
self.cgroups[client.hostname].append(newgroup)
dlines.remove(line)
dobj = ProbeData("\n".join(dlines))
- try:
- self.probedata[client.hostname].update({data.get('name'): dobj})
- except KeyError:
- self.probedata[client.hostname] = \
- ClientProbeDataSet([(data.get('name'), dobj)])
+ self.probedata[client.hostname].update({data.get('name'): dobj})
def get_additional_groups(self, meta):
return self.cgroups.get(meta.hostname, list())
diff --git a/src/lib/Bcfg2/Server/Plugins/Properties.py b/src/lib/Bcfg2/Server/Plugins/Properties.py
index 680881858..78019933a 100644
--- a/src/lib/Bcfg2/Server/Plugins/Properties.py
+++ b/src/lib/Bcfg2/Server/Plugins/Properties.py
@@ -5,26 +5,53 @@ import copy
import logging
import lxml.etree
import Bcfg2.Server.Plugin
+try:
+ from Bcfg2.Encryption import ssl_decrypt, EVPError
+ have_crypto = True
+except ImportError:
+ have_crypto = False
+
+logger = logging.getLogger(__name__)
+
+SETUP = None
+
+def passphrases():
+ section = "encryption"
+ if SETUP.cfp.has_section(section):
+ return dict([(o, SETUP.cfp.get(section, o))
+ for o in SETUP.cfp.options(section)])
+ else:
+ return dict()
-logger = logging.getLogger('Bcfg2.Plugins.Properties')
class PropertyFile(Bcfg2.Server.Plugin.StructFile):
"""Class for properties files."""
def write(self):
""" Write the data in this data structure back to the property
file """
- if self.validate_data():
- try:
- open(self.name,
- "wb").write(lxml.etree.tostring(self.xdata,
- pretty_print=True))
- return True
- except IOError:
- err = sys.exc_info()[1]
- logger.error("Failed to write %s: %s" % (self.name, err))
- return False
- else:
- return False
+ if not SETUP.cfp.getboolean("properties", "writes_enabled",
+ default=True):
+ msg = "Properties files write-back is disabled in the configuration"
+ logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+ try:
+ self.validate_data()
+ except Bcfg2.Server.Plugin.PluginExecutionError:
+ msg = "Cannot write %s: %s" % (self.name, sys.exc_info()[1])
+ logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+
+ try:
+ open(self.name,
+ "wb").write(lxml.etree.tostring(self.xdata,
+ xml_declaration=False,
+ pretty_print=True).decode('UTF-8'))
+ return True
+ except IOError:
+ err = sys.exc_info()[1]
+ msg = "Failed to write %s: %s" % (self.name, err)
+ logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
def validate_data(self):
""" ensure that the data in this object validates against the
@@ -34,19 +61,51 @@ class PropertyFile(Bcfg2.Server.Plugin.StructFile):
try:
schema = lxml.etree.XMLSchema(file=schemafile)
except:
- logger.error("Failed to process schema for %s" % self.name)
- return False
+ err = sys.exc_info()[1]
+ raise Bcfg2.Server.Plugin.PluginExecutionError("Failed to process schema for %s: %s" % (self.name, err))
else:
# no schema exists
return True
if not schema.validate(self.xdata):
- logger.error("Data for %s fails to validate; run bcfg2-lint for "
- "more details" % self.name)
- return False
+ raise Bcfg2.Server.Plugin.PluginExecutionError("Data for %s fails to validate; run bcfg2-lint for more details" % self.name)
else:
return True
+ def Index(self):
+ Bcfg2.Server.Plugin.StructFile.Index(self)
+ if self.xdata.get("encryption", "false").lower() != "false":
+ if not have_crypto:
+ msg = "Properties: M2Crypto is not available: %s" % self.name
+ logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+ for el in self.xdata.xpath("*[@encrypted]"):
+ try:
+ el.text = self._decrypt(el)
+ except EVPError:
+ msg = "Failed to decrypt %s element in %s" % (el.tag,
+ self.name)
+ logger.error(msg)
+ raise Bcfg2.Server.PluginExecutionError(msg)
+
+ def _decrypt(self, element):
+ if not element.text.strip():
+ return
+ passes = passphrases()
+ try:
+ passphrase = passes[element.get("encrypted")]
+ try:
+ return ssl_decrypt(element.text, passphrase)
+ except EVPError:
+ # error is raised below
+ pass
+ except KeyError:
+ for passwd in passes.values():
+ try:
+ return ssl_decrypt(element.text, passwd)
+ except EVPError:
+ pass
+ raise EVPError("Failed to decrypt")
class PropDirectoryBacked(Bcfg2.Server.Plugin.DirectoryBacked):
__child__ = PropertyFile
@@ -62,6 +121,7 @@ class Properties(Bcfg2.Server.Plugin.Plugin,
name = 'Properties'
def __init__(self, core, datastore):
+ global SETUP
Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
Bcfg2.Server.Plugin.Connector.__init__(self)
try:
@@ -72,5 +132,16 @@ class Properties(Bcfg2.Server.Plugin.Plugin,
(e.strerror, e.filename))
raise Bcfg2.Server.Plugin.PluginInitError
- def get_additional_data(self, _):
- return copy.copy(self.store.entries)
+ SETUP = core.setup
+
+ def get_additional_data(self, metadata):
+ autowatch = self.core.setup.cfp.getboolean("properties", "automatch",
+ default=False)
+ rv = dict()
+ for fname, pfile in self.store.entries.items():
+ if (autowatch or
+ pfile.xdata.get("automatch", "false").lower() == "true"):
+ rv[fname] = pfile.XMLMatch(metadata)
+ else:
+ rv[fname] = copy.copy(pfile)
+ return rv
diff --git a/src/lib/Bcfg2/Server/Plugins/PuppetENC.py b/src/lib/Bcfg2/Server/Plugins/PuppetENC.py
new file mode 100644
index 000000000..46182e9a2
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/PuppetENC.py
@@ -0,0 +1,117 @@
+import os
+import Bcfg2.Server
+import Bcfg2.Server.Plugin
+from subprocess import Popen, PIPE
+
+try:
+ from syck import load as yaml_load, error as yaml_error
+except ImportError:
+ try:
+ from yaml import load as yaml_load, YAMLError as yaml_error
+ except ImportError:
+ raise ImportError("No yaml library could be found")
+
+class PuppetENCFile(Bcfg2.Server.Plugin.FileBacked):
+ def HandleEvent(self, event=None):
+ return
+
+
+class PuppetENC(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Connector,
+ Bcfg2.Server.Plugin.ClientRunHooks,
+ Bcfg2.Server.Plugin.DirectoryBacked):
+ """ A plugin to run Puppet external node classifiers
+ (http://docs.puppetlabs.com/guides/external_nodes.html) """
+ name = 'PuppetENC'
+ experimental = True
+ __child__ = PuppetENCFile
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Connector.__init__(self)
+ Bcfg2.Server.Plugin.ClientRunHooks.__init__(self)
+ Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data,
+ self.core.fam)
+ self.cache = dict()
+
+ def _run_encs(self, metadata):
+ cache = dict(groups=[], params=dict())
+ for enc in self.entries.keys():
+ epath = os.path.join(self.data, enc)
+ self.debug_log("PuppetENC: Running ENC %s for %s" %
+ (enc, metadata.hostname))
+ proc = Popen([epath, metadata.hostname], stdin=PIPE, stdout=PIPE,
+ stderr=PIPE)
+ (out, err) = proc.communicate()
+ rv = proc.wait()
+ if rv != 0:
+ msg = "PuppetENC: Error running ENC %s for %s (%s): %s" % \
+ (enc, metadata.hostname, rv)
+ self.logger.error("%s: %s" % (msg, err))
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+ if err:
+ self.debug_log("ENC Error: %s" % err)
+
+ try:
+ yaml = yaml_load(out)
+ self.debug_log("Loaded data from %s for %s: %s" %
+ (enc, metadata.hostname, yaml))
+ except yaml_error:
+ err = sys.exc_info()[1]
+ msg = "Error decoding YAML from %s for %s: %s" % \
+ (enc, metadata.hostname, err)
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+
+ groups = []
+ if "classes" in yaml:
+ # stock Puppet ENC output format
+ groups = yaml['classes']
+ elif "groups" in yaml:
+ # more Bcfg2-ish output format
+ groups = yaml['groups']
+ if groups:
+ if isinstance(groups, list):
+ self.debug_log("ENC %s adding groups to %s: %s" %
+ (enc, metadata.hostname, groups))
+ cache['groups'].extend(groups)
+ else:
+ self.debug_log("ENC %s adding groups to %s: %s" %
+ (enc, metadata.hostname, groups.keys()))
+ for group, params in groups.items():
+ cache['groups'].append(group)
+ if params:
+ cache['params'].update(params)
+ if "parameters" in yaml and yaml['parameters']:
+ cache['params'].update(yaml['parameters'])
+ if "environment" in yaml:
+ self.logger.info("Ignoring unsupported environment section of "
+ "ENC %s for %s" % (enc, metadata.hostname))
+
+ self.cache[metadata.hostname] = cache
+
+ def get_additional_groups(self, metadata):
+ if metadata.hostname not in self.cache:
+ self._run_encs(metadata)
+ return self.cache[metadata.hostname]['groups']
+
+ def get_additional_data(self, metadata):
+ if metadata.hostname not in self.cache:
+ self._run_encs(metadata)
+ return self.cache[metadata.hostname]['params']
+
+ def end_client_run(self, metadata):
+ """ clear the entire cache at the end of each client run. this
+ guarantees that each client will run all ENCs at or near the
+ start of each run; we have to clear the entire cache instead
+ of just the cache for this client because a client that builds
+ templates that use metadata for other clients will populate
+ the cache for those clients, which we don't want. This makes
+ the caching less than stellar, but it does prevent multiple
+ runs of ENCs for a single host a) for groups and data
+ separately; and b) when a single client's metadata is
+ generated multiple times by separate templates """
+ self.cache = dict()
+
+ def end_statistics(self, metadata):
+ self.end_client_run(self, metadata)
diff --git a/src/lib/Bcfg2/Server/Plugins/SEModules.py b/src/lib/Bcfg2/Server/Plugins/SEModules.py
new file mode 100644
index 000000000..62b3fb10a
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/SEModules.py
@@ -0,0 +1,45 @@
+import os
+import logging
+import Bcfg2.Server.Plugin
+from Bcfg2.Bcfg2Py3k import b64encode
+
+logger = logging.getLogger(__name__)
+
+class SEModuleData(Bcfg2.Server.Plugin.SpecificData):
+ def bind_entry(self, entry, _):
+ entry.set('encoding', 'base64')
+ entry.text = b64encode(self.data)
+
+
+class SEModules(Bcfg2.Server.Plugin.GroupSpool):
+ """ Handle SELinux 'module' entries """
+ name = 'SEModules'
+ __author__ = 'chris.a.st.pierre@gmail.com'
+ es_child_cls = SEModuleData
+ entry_type = 'SELinux'
+ experimental = True
+
+ def _get_module_name(self, entry):
+ """ GroupSpool stores entries as /foo.pp, but we want people
+ to be able to specify module entries as name='foo' or
+ name='foo.pp', so we put this abstraction in between """
+ if entry.get("name").endswith(".pp"):
+ name = entry.get("name")
+ else:
+ name = entry.get("name") + ".pp"
+ return "/" + name
+
+ def HandlesEntry(self, entry, metadata):
+ if entry.tag in self.Entries and entry.get('type') == 'module':
+ return self._get_module_name(entry) in self.Entries[entry.tag]
+ return Bcfg2.Server.Plugin.GroupSpool.HandlesEntry(self, entry,
+ metadata)
+
+ def HandleEntry(self, entry, metadata):
+ entry.set("name", self._get_module_name(entry))
+ return self.Entries[entry.tag][entry.get("name")](entry, metadata)
+
+ def add_entry(self, event):
+ self.filename_pattern = \
+ os.path.basename(os.path.dirname(self.event_path(event)))
+ Bcfg2.Server.Plugin.GroupSpool.add_entry(self, event)
diff --git a/src/lib/Bcfg2/Server/Plugins/SGenshi.py b/src/lib/Bcfg2/Server/Plugins/SGenshi.py
deleted file mode 100644
index 0ba08125e..000000000
--- a/src/lib/Bcfg2/Server/Plugins/SGenshi.py
+++ /dev/null
@@ -1,97 +0,0 @@
-'''This module implements a templating generator based on Genshi'''
-
-import genshi.input
-import genshi.template
-import lxml.etree
-import logging
-import copy
-import sys
-import os.path
-
-import Bcfg2.Server.Plugin
-import Bcfg2.Server.Plugins.TGenshi
-
-logger = logging.getLogger('Bcfg2.Plugins.SGenshi')
-
-
-class SGenshiTemplateFile(Bcfg2.Server.Plugins.TGenshi.TemplateFile,
- Bcfg2.Server.Plugin.StructFile):
- def __init__(self, name, specific, encoding):
- Bcfg2.Server.Plugins.TGenshi.TemplateFile.__init__(self, name,
- specific, encoding)
- Bcfg2.Server.Plugin.StructFile.__init__(self, name)
-
- def get_xml_value(self, metadata):
- if not hasattr(self, 'template'):
- logger.error("No parsed template information for %s" % (self.name))
- raise Bcfg2.Server.Plugin.PluginExecutionError
- try:
- stream = self.template.generate(metadata=metadata).filter( \
- Bcfg2.Server.Plugins.TGenshi.removecomment)
- data = lxml.etree.XML(stream.render('xml', strip_whitespace=False))
- bundlename = os.path.splitext(os.path.basename(self.name))[0]
- bundle = lxml.etree.Element('Bundle', name=bundlename)
- for item in self.Match(metadata, data):
- bundle.append(copy.deepcopy(item))
- return bundle
- except LookupError:
- lerror = sys.exc_info()[1]
- logger.error('Genshi lookup error: %s' % lerror)
- except genshi.template.TemplateError:
- terror = sys.exc_info()[1]
- logger.error('Genshi template error: %s' % terror)
- raise
- except genshi.input.ParseError:
- perror = sys.exc_info()[1]
- logger.error('Genshi parse error: %s' % perror)
- raise
-
- def Match(self, metadata, xdata):
- """Return matching fragments of parsed template."""
- rv = []
- for child in xdata.getchildren():
- rv.extend(self._match(child, metadata))
- logger.debug("File %s got %d match(es)" % (self.name, len(rv)))
- return rv
-
-class SGenshiEntrySet(Bcfg2.Server.Plugin.EntrySet):
-
- def __init__(self, path, fam, encoding):
- fpattern = '\S+\.xml'
- Bcfg2.Server.Plugin.EntrySet.__init__(self, fpattern, path,
- SGenshiTemplateFile, encoding)
- fam.AddMonitor(path, self)
-
- def HandleEvent(self, event):
- '''passthrough event handler for old calling convention'''
- if event.filename != self.path:
- return self.handle_event(event)
-
- def BuildStructures(self, metadata):
- """Build SGenshi structures."""
- ret = []
- for entry in self.get_matching(metadata):
- try:
- ret.append(entry.get_xml_value(metadata))
- except:
- logger.error("SGenshi: Failed to template file %s" % entry.name)
- return ret
-
-
-class SGenshi(SGenshiEntrySet,
- Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Structure):
- """The SGenshi plugin provides templated structures."""
- name = 'SGenshi'
- __author__ = 'bcfg-dev@mcs.anl.gov'
- deprecated = True
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Structure.__init__(self)
- try:
- SGenshiEntrySet.__init__(self, self.data, self.core.fam, core.encoding)
- except:
- logger.error("Failed to load %s repository; disabling %s" \
- % (self.name, self.name))
- raise Bcfg2.Server.Plugin.PluginInitError
diff --git a/src/lib/Bcfg2/Server/Plugins/SSHbase.py b/src/lib/Bcfg2/Server/Plugins/SSHbase.py
index a1a29727f..cbe8d0d9b 100644
--- a/src/lib/Bcfg2/Server/Plugins/SSHbase.py
+++ b/src/lib/Bcfg2/Server/Plugins/SSHbase.py
@@ -1,20 +1,16 @@
"""This module manages ssh key files for bcfg2"""
-import binascii
import re
import os
+import sys
import socket
import shutil
-import sys
+import logging
import tempfile
from subprocess import Popen, PIPE
import Bcfg2.Server.Plugin
-from Bcfg2.Bcfg2Py3k import u_str
+from Bcfg2.Bcfg2Py3k import u_str, reduce, b64encode
-if sys.hexversion >= 0x03000000:
- from functools import reduce
-
-import logging
logger = logging.getLogger(__name__)
class KeyData(Bcfg2.Server.Plugin.SpecificData):
@@ -31,7 +27,7 @@ class KeyData(Bcfg2.Server.Plugin.SpecificData):
def bind_entry(self, entry, metadata):
entry.set('type', 'file')
if entry.get('encoding') == 'base64':
- entry.text = binascii.b2a_base64(self.data)
+ entry.text = b64encode(self.data)
else:
try:
entry.text = u_str(self.data, self.encoding)
diff --git a/src/lib/Bcfg2/Server/Plugins/SSLCA.py b/src/lib/Bcfg2/Server/Plugins/SSLCA.py
index 0072dc62d..9d1c51a08 100644
--- a/src/lib/Bcfg2/Server/Plugins/SSLCA.py
+++ b/src/lib/Bcfg2/Server/Plugins/SSLCA.py
@@ -3,12 +3,15 @@ import Bcfg2.Options
import lxml.etree
import posixpath
import tempfile
-import pipes
import os
from subprocess import Popen, PIPE, STDOUT
# Compatibility import
from Bcfg2.Bcfg2Py3k import ConfigParser
+try:
+ from hashlib import md5
+except ImportError:
+ from md5 import md5
class SSLCA(Bcfg2.Server.Plugin.GroupSpool):
"""
@@ -22,6 +25,10 @@ class SSLCA(Bcfg2.Server.Plugin.GroupSpool):
cert_specs = {}
CAs = {}
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.GroupSpool.__init__(self, core, datastore)
+ self.infoxml = dict()
+
def HandleEvent(self, event=None):
"""
Updates which files this plugin handles based upon filesystem events.
@@ -37,19 +44,21 @@ class SSLCA(Bcfg2.Server.Plugin.GroupSpool):
else:
ident = self.handles[event.requestID][:-1]
- fname = "".join([ident, '/', event.filename])
+ fname = os.path.join(ident, event.filename)
if event.filename.endswith('.xml'):
if action in ['exists', 'created', 'changed']:
if event.filename.endswith('key.xml'):
- key_spec = dict(list(lxml.etree.parse(epath).find('Key').items()))
+ key_spec = dict(list(lxml.etree.parse(epath,
+ parser=Bcfg2.Server.XMLParser).find('Key').items()))
self.key_specs[ident] = {
'bits': key_spec.get('bits', 2048),
'type': key_spec.get('type', 'rsa')
}
self.Entries['Path'][ident] = self.get_key
elif event.filename.endswith('cert.xml'):
- cert_spec = dict(list(lxml.etree.parse(epath).find('Cert').items()))
+ cert_spec = dict(list(lxml.etree.parse(epath,
+ parser=Bcfg2.Server.XMLParser).find('Cert').items()))
ca = cert_spec.get('ca', 'default')
self.cert_specs[ident] = {
'ca': ca,
@@ -67,6 +76,9 @@ class SSLCA(Bcfg2.Server.Plugin.GroupSpool):
cp.read(self.core.cfile)
self.CAs[ca] = dict(cp.items('sslca_' + ca))
self.Entries['Path'][ident] = self.get_cert
+ elif event.filename.endswith("info.xml"):
+ self.infoxml[ident] = Bcfg2.Server.Plugin.InfoXML(epath)
+ self.infoxml[ident].HandleEvent(event)
if action == 'deleted':
if ident in self.Entries['Path']:
del self.Entries['Path'][ident]
@@ -90,28 +102,27 @@ class SSLCA(Bcfg2.Server.Plugin.GroupSpool):
either grabs a prexisting key hostfile, or triggers the generation
of a new key if one doesn't exist.
"""
- # set path type and permissions, otherwise bcfg2 won't bind the file
- permdata = {'owner': 'root',
- 'group': 'root',
- 'type': 'file',
- 'perms': '644'}
- [entry.attrib.__setitem__(key, permdata[key]) for key in permdata]
-
# check if we already have a hostfile, or need to generate a new key
# TODO: verify key fits the specs
path = entry.get('name')
- filename = "".join([path, '/', path.rsplit('/', 1)[1],
- '.H_', metadata.hostname])
+ filename = os.path.join(path, "%s.H_%s" % (os.path.basename(path),
+ metadata.hostname))
if filename not in list(self.entries.keys()):
key = self.build_key(filename, entry, metadata)
open(self.data + filename, 'w').write(key)
entry.text = key
- self.entries[filename] = self.__child__("%s%s" % (self.data,
- filename))
+ self.entries[filename] = self.__child__(self.data + filename)
self.entries[filename].HandleEvent()
else:
entry.text = self.entries[filename].data
+ entry.set("type", "file")
+ if path in self.infoxml:
+ Bcfg2.Server.Plugin.bind_info(entry, metadata,
+ infoxml=self.infoxml[path])
+ else:
+ Bcfg2.Server.Plugin.bind_info(entry, metadata)
+
def build_key(self, filename, entry, metadata):
"""
generates a new key according the the specification
@@ -130,56 +141,61 @@ class SSLCA(Bcfg2.Server.Plugin.GroupSpool):
either grabs a prexisting cert hostfile, or triggers the generation
of a new cert if one doesn't exist.
"""
- # set path type and permissions, otherwise bcfg2 won't bind the file
- permdata = {'owner': 'root',
- 'group': 'root',
- 'type': 'file',
- 'perms': '644'}
- [entry.attrib.__setitem__(key, permdata[key]) for key in permdata]
-
path = entry.get('name')
- filename = "".join([path, '/', path.rsplit('/', 1)[1],
- '.H_', metadata.hostname])
+ filename = os.path.join(path, "%s.H_%s" % (os.path.basename(path),
+ metadata.hostname))
# first - ensure we have a key to work with
key = self.cert_specs[entry.get('name')].get('key')
- key_filename = "".join([key, '/', key.rsplit('/', 1)[1],
- '.H_', metadata.hostname])
+ key_filename = os.path.join(key, "%s.H_%s" % (os.path.basename(key),
+ metadata.hostname))
if key_filename not in self.entries:
e = lxml.etree.Element('Path')
- e.attrib['name'] = key
+ e.set('name', key)
self.core.Bind(e, metadata)
# check if we have a valid hostfile
- if filename in list(self.entries.keys()) and self.verify_cert(filename,
- key_filename,
- entry):
+ if (filename in list(self.entries.keys()) and
+ self.verify_cert(filename, key_filename, entry)):
entry.text = self.entries[filename].data
else:
cert = self.build_cert(key_filename, entry, metadata)
open(self.data + filename, 'w').write(cert)
- self.entries[filename] = self.__child__("%s%s" % (self.data,
- filename))
+ self.entries[filename] = self.__child__(self.data + filename)
self.entries[filename].HandleEvent()
entry.text = cert
+ entry.set("type", "file")
+ if path in self.infoxml:
+ Bcfg2.Server.Plugin.bind_info(entry, metadata,
+ infoxml=self.infoxml[path])
+ else:
+ Bcfg2.Server.Plugin.bind_info(entry, metadata)
+
def verify_cert(self, filename, key_filename, entry):
- if self.verify_cert_against_ca(filename, entry):
- if self.verify_cert_against_key(filename, key_filename):
- return True
- return False
+ do_verify = self.CAs[self.cert_specs[entry.get('name')]['ca']].get('verify_certs', True)
+ if do_verify:
+ return (self.verify_cert_against_ca(filename, entry) and
+ self.verify_cert_against_key(filename, key_filename))
+ return True
def verify_cert_against_ca(self, filename, entry):
"""
check that a certificate validates against the ca cert,
and that it has not expired.
"""
- chaincert = self.CAs[self.cert_specs[entry.get('name')]['ca']].get('chaincert')
+ chaincert = \
+ self.CAs[self.cert_specs[entry.get('name')]['ca']].get('chaincert')
cert = self.data + filename
- res = Popen(["openssl", "verify", "-CAfile", chaincert, cert],
+ res = Popen(["openssl", "verify", "-untrusted", chaincert, "-purpose",
+ "sslserver", cert],
stdout=PIPE, stderr=STDOUT).stdout.read()
if res == cert + ": OK\n":
+ self.debug_log("SSLCA: %s verified successfully against CA" %
+ entry.get("name"))
return True
+ self.logger.warning("SSLCA: %s failed verification against CA: %s" %
+ (entry.get("name"), res))
return False
def verify_cert_against_key(self, filename, key_filename):
@@ -188,14 +204,20 @@ class SSLCA(Bcfg2.Server.Plugin.GroupSpool):
"""
cert = self.data + filename
key = self.data + key_filename
- cmd = ("openssl x509 -noout -modulus -in %s | openssl md5" %
- pipes.quote(cert))
- cert_md5 = Popen(cmd, shell=True, stdout=PIPE, stderr=STDOUT).stdout.read()
- cmd = ("openssl rsa -noout -modulus -in %s | openssl md5" %
- pipes.quote(key))
- key_md5 = Popen(cmd, shell=True, stdout=PIPE, stderr=STDOUT).stdout.read()
+ cert_md5 = \
+ md5(Popen(["openssl", "x509", "-noout", "-modulus", "-in", cert],
+ stdout=PIPE,
+ stderr=STDOUT).stdout.read().strip()).hexdigest()
+ key_md5 = \
+ md5(Popen(["openssl", "rsa", "-noout", "-modulus", "-in", key],
+ stdout=PIPE,
+ stderr=STDOUT).stdout.read().strip()).hexdigest()
if cert_md5 == key_md5:
+ self.debug_log("SSLCA: %s verified successfully against key %s" %
+ (filename, key_filename))
return True
+ self.logger.warning("SSLCA: %s failed verification against key %s" %
+ (filename, key_filename))
return False
def build_cert(self, key_filename, entry, metadata):
diff --git a/src/lib/Bcfg2/Server/Plugins/ServiceCompat.py b/src/lib/Bcfg2/Server/Plugins/ServiceCompat.py
new file mode 100644
index 000000000..aad92b7c7
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/ServiceCompat.py
@@ -0,0 +1,32 @@
+import Bcfg2.Server.Plugin
+
+class ServiceCompat(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.StructureValidator):
+ """ Use old-style service modes for older clients """
+ name = 'ServiceCompat'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+ mode_map = {('true', 'true'): 'default',
+ ('interactive', 'true'): 'interactive_only',
+ ('false', 'false'): 'manual'}
+
+ def validate_structures(self, metadata, structures):
+ """ Apply defaults """
+ if metadata.version_info and metadata.version_info > (1, 3, 0, '', 0):
+ # do not care about a client that is _any_ 1.3.0 release
+ # (including prereleases and RCs)
+ return
+
+ for struct in structures:
+ for entry in struct.xpath("//BoundService|//Service"):
+ mode_key = (entry.get("restart", "true").lower(),
+ entry.get("install", "true").lower())
+ try:
+ mode = self.mode_map[mode_key]
+ except KeyError:
+ self.logger.info("Could not map restart and install "
+ "settings of %s:%s to an old-style "
+ "Service mode for %s; using 'manual'" %
+ (entry.tag, entry.get("name"),
+ metadata.hostname))
+ mode = "manual"
+ entry.set("mode", mode)
diff --git a/src/lib/Bcfg2/Server/Plugins/Snapshots.py b/src/lib/Bcfg2/Server/Plugins/Snapshots.py
index aeb3b9f74..e62638b4f 100644
--- a/src/lib/Bcfg2/Server/Plugins/Snapshots.py
+++ b/src/lib/Bcfg2/Server/Plugins/Snapshots.py
@@ -1,9 +1,5 @@
-#import lxml.etree
import logging
-import binascii
import difflib
-#import sqlalchemy
-#import sqlalchemy.orm
import Bcfg2.Server.Plugin
import Bcfg2.Server.Snapshots
import Bcfg2.Logger
@@ -13,7 +9,7 @@ import time
import threading
# Compatibility import
-from Bcfg2.Bcfg2Py3k import Queue
+from Bcfg2.Bcfg2Py3k import Queue, u_str, b64decode
logger = logging.getLogger('Snapshots')
@@ -28,14 +24,6 @@ datafields = {
}
-# py3k compatibility
-def u_str(string):
- if sys.hexversion >= 0x03000000:
- return string
- else:
- return unicode(string)
-
-
def build_snap_ent(entry):
basefields = []
if entry.tag in ['Package', 'Service']:
@@ -52,13 +40,12 @@ def build_snap_ent(entry):
if entry.get('encoding', 'ascii') == 'ascii':
desired['contents'] = u_str(entry.text)
else:
- desired['contents'] = u_str(binascii.a2b_base64(entry.text))
+ desired['contents'] = u_str(b64decode(entry.text))
if 'current_bfile' in entry.attrib:
- state['contents'] = u_str(binascii.a2b_base64( \
- entry.get('current_bfile')))
+ state['contents'] = u_str(b64decode(entry.get('current_bfile')))
elif 'current_bdiff' in entry.attrib:
- diff = binascii.a2b_base64(entry.get('current_bdiff'))
+ diff = b64decode(entry.get('current_bdiff'))
state['contents'] = u_str( \
'\n'.join(difflib.restore(diff.split('\n'), 1)))
@@ -69,14 +56,12 @@ def build_snap_ent(entry):
return [desired, state]
-class Snapshots(Bcfg2.Server.Plugin.Statistics,
- Bcfg2.Server.Plugin.Plugin):
+class Snapshots(Bcfg2.Server.Plugin.Statistics):
name = 'Snapshots'
experimental = True
def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Statistics.__init__(self)
+ Bcfg2.Server.Plugin.Statistics.__init__(self, core, datastore)
self.session = Bcfg2.Server.Snapshots.setup_session(core.cfile)
self.work_queue = Queue()
self.loader = threading.Thread(target=self.load_snapshot)
diff --git a/src/lib/Bcfg2/Server/Plugins/Statistics.py b/src/lib/Bcfg2/Server/Plugins/Statistics.py
index 265ef95a8..984efb76c 100644
--- a/src/lib/Bcfg2/Server/Plugins/Statistics.py
+++ b/src/lib/Bcfg2/Server/Plugins/Statistics.py
@@ -1,15 +1,14 @@
'''This file manages the statistics collected by the BCFG2 Server'''
-import binascii
import copy
import difflib
import logging
-from lxml.etree import XML, SubElement, Element, XMLSyntaxError
import lxml.etree
import os
+import sys
from time import asctime, localtime, time, strptime, mktime
import threading
-
+from Bcfg2.Bcfg2Py3k import b64decode
import Bcfg2.Server.Plugin
@@ -19,7 +18,7 @@ class StatisticsStore(object):
def __init__(self, filename):
self.filename = filename
- self.element = Element('Dummy')
+ self.element = lxml.etree.Element('Dummy')
self.dirty = 0
self.lastwrite = 0
self.logger = logging.getLogger('Bcfg2.Server.Statistics')
@@ -35,7 +34,8 @@ class StatisticsStore(object):
ioerr = sys.exc_info()[1]
self.logger.error("Failed to open %s for writing: %s" % (self.filename + '.new', ioerr))
else:
- fout.write(lxml.etree.tostring(self.element, encoding='UTF-8', xml_declaration=True))
+ fout.write(lxml.etree.tostring(self.element,
+ xml_declaration=False).decode('UTF-8'))
fout.close()
os.rename(self.filename + '.new', self.filename)
self.dirty = 0
@@ -47,11 +47,11 @@ class StatisticsStore(object):
fin = open(self.filename, 'r')
data = fin.read()
fin.close()
- self.element = XML(data)
+ self.element = lxml.etree.XML(data)
self.dirty = 0
- except (IOError, XMLSyntaxError):
+ except (IOError, lxml.etree.XMLSyntaxError):
self.logger.error("Creating new statistics file %s"%(self.filename))
- self.element = Element('ConfigStatistics')
+ self.element = lxml.etree.Element('ConfigStatistics')
self.WriteBack()
self.dirty = 0
@@ -77,7 +77,7 @@ class StatisticsStore(object):
nummatch = len(nodes)
if nummatch == 0:
# Create an entry for this node
- node = SubElement(self.element, 'Node', name=client)
+ node = lxml.etree.SubElement(self.element, 'Node', name=client)
elif nummatch == 1 and not node_dirty:
# Delete old instance
node = nodes[0]
@@ -112,13 +112,11 @@ class StatisticsStore(object):
return (now-utime) > secondsPerDay
-class Statistics(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.ThreadedStatistics,
+class Statistics(Bcfg2.Server.Plugin.ThreadedStatistics,
Bcfg2.Server.Plugin.PullSource):
name = 'Statistics'
def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
Bcfg2.Server.Plugin.ThreadedStatistics.__init__(self, core, datastore)
Bcfg2.Server.Plugin.PullSource.__init__(self)
fpath = "%s/etc/statistics.xml" % datastore
@@ -151,9 +149,9 @@ class Statistics(Bcfg2.Server.Plugin.Plugin,
if cfentry.get('sensitive') in ['true', 'True']:
raise Bcfg2.Server.Plugin.PluginExecutionError
elif 'current_bfile' in cfentry.attrib:
- contents = binascii.a2b_base64(cfentry.get('current_bfile'))
+ contents = b64decode(cfentry.get('current_bfile'))
elif 'current_bdiff' in cfentry.attrib:
- diff = binascii.a2b_base64(cfentry.get('current_bdiff'))
+ diff = b64decode(cfentry.get('current_bdiff'))
contents = '\n'.join(difflib.restore(diff.split('\n'), 1))
else:
contents = None
diff --git a/src/lib/Bcfg2/Server/Plugins/Svcmgr.py b/src/lib/Bcfg2/Server/Plugins/Svcmgr.py
deleted file mode 100644
index f4232ad5c..000000000
--- a/src/lib/Bcfg2/Server/Plugins/Svcmgr.py
+++ /dev/null
@@ -1,10 +0,0 @@
-"""This generator provides service mappings."""
-
-import Bcfg2.Server.Plugin
-
-
-class Svcmgr(Bcfg2.Server.Plugin.PrioDir):
- """This is a generator that handles service assignments."""
- name = 'Svcmgr'
- __author__ = 'bcfg-dev@mcs.anl.gov'
- deprecated = True
diff --git a/src/lib/Bcfg2/Server/Plugins/TCheetah.py b/src/lib/Bcfg2/Server/Plugins/TCheetah.py
index 8879fdef1..2bf475363 100644
--- a/src/lib/Bcfg2/Server/Plugins/TCheetah.py
+++ b/src/lib/Bcfg2/Server/Plugins/TCheetah.py
@@ -1,13 +1,11 @@
'''This module implements a templating generator based on Cheetah'''
-import binascii
import logging
import sys
import traceback
import Bcfg2.Server.Plugin
-# py3k compatibility
-if sys.hexversion >= 0x03000000:
- unicode = str
+
+from Bcfg2.Bcfg2Py3k import unicode, b64encode
logger = logging.getLogger('Bcfg2.Plugins.TCheetah')
@@ -60,7 +58,7 @@ class TemplateFile:
else:
if entry.get('encoding') == 'base64':
# take care of case where file needs base64 encoding
- entry.text = binascii.b2a_base64(self.template)
+ entry.text = b64encode(self.template)
else:
entry.text = unicode(str(self.template), self.encoding)
except:
@@ -78,3 +76,4 @@ class TCheetah(Bcfg2.Server.Plugin.GroupSpool):
__author__ = 'bcfg-dev@mcs.anl.gov'
filename_pattern = 'template'
es_child_cls = TemplateFile
+ deprecated = True
diff --git a/src/lib/Bcfg2/Server/Plugins/TGenshi.py b/src/lib/Bcfg2/Server/Plugins/TGenshi.py
index c4dd40614..c7335a0c0 100644
--- a/src/lib/Bcfg2/Server/Plugins/TGenshi.py
+++ b/src/lib/Bcfg2/Server/Plugins/TGenshi.py
@@ -1,12 +1,10 @@
"""This module implements a templating generator based on Genshi."""
-import binascii
import logging
import sys
import Bcfg2.Server.Plugin
-# py3k compatibility
-if sys.hexversion >= 0x03000000:
- unicode = str
+
+from Bcfg2.Bcfg2Py3k import unicode, b64encode
logger = logging.getLogger('Bcfg2.Plugins.TGenshi')
@@ -18,7 +16,7 @@ try:
TextTemplate, MarkupTemplate, TemplateError
except ImportError:
logger.error("TGenshi: Failed to import Genshi. Is it installed?")
- raise Bcfg2.Server.Plugin.PluginInitError
+ raise
try:
from genshi.template import NewTextTemplate
have_ntt = True
@@ -33,7 +31,7 @@ def removecomment(stream):
yield kind, data, pos
-class TemplateFile:
+class TemplateFile(object):
"""Template file creates Genshi template structures for the loaded file."""
def __init__(self, name, specific, encoding):
@@ -99,7 +97,7 @@ class TemplateFile:
else:
if entry.get('encoding') == 'base64':
# take care of case where file needs base64 encoding
- entry.text = binascii.b2a_base64(textdata)
+ entry.text = b64encode(textdata)
else:
entry.text = unicode(textdata, self.encoding)
else:
@@ -123,6 +121,10 @@ class TemplateFile:
raise Bcfg2.Server.Plugin.PluginExecutionError('Genshi template loading error: %s' % err)
+class TemplateEntrySet(Bcfg2.Server.Plugin.EntrySet):
+ basename_is_regex = True
+
+
class TGenshi(Bcfg2.Server.Plugin.GroupSpool):
"""
The TGenshi generator implements a templating
@@ -132,4 +134,6 @@ class TGenshi(Bcfg2.Server.Plugin.GroupSpool):
name = 'TGenshi'
__author__ = 'jeff@ocjtech.us'
filename_pattern = 'template\.(txt|newtxt|xml)'
+ es_cls = TemplateEntrySet
es_child_cls = TemplateFile
+ deprecated = True
diff --git a/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py b/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py
index 2c0ee03e0..6d92bb530 100644
--- a/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py
+++ b/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py
@@ -1,26 +1,23 @@
import re
import imp
import sys
+import glob
import logging
+import Bcfg2.Server.Lint
import Bcfg2.Server.Plugin
logger = logging.getLogger(__name__)
-class HelperModule(Bcfg2.Server.Plugin.SpecificData):
- _module_name_re = re.compile(r'([^/]+?)\.py')
-
- def __init__(self, name, specific, encoding):
- Bcfg2.Server.Plugin.SpecificData.__init__(self, name, specific,
- encoding)
- match = self._module_name_re.search(self.name)
- if match:
- self._module_name = match.group(1)
- else:
- self._module_name = name
+module_pattern = r'(?P<filename>(?P<module>[^\/]+)\.py)$'
+module_re = re.compile(module_pattern)
+
+class HelperModule(Bcfg2.Server.Plugin.FileBacked):
+ def __init__(self, name, fam=None):
+ Bcfg2.Server.Plugin.FileBacked.__init__(self, name, fam=fam)
+ self._module_name = module_re.search(self.name).group('module')
self._attrs = []
- def handle_event(self, event):
- Bcfg2.Server.Plugin.SpecificData.handle_event(self, event)
+ def Index(self):
try:
module = imp.load_source(self._module_name, self.name)
except:
@@ -34,32 +31,29 @@ class HelperModule(Bcfg2.Server.Plugin.SpecificData):
self.name)
return
+ newattrs = []
for sym in module.__export__:
if sym not in self._attrs and hasattr(self, sym):
logger.warning("TemplateHelper: %s: %s is a reserved keyword, "
"skipping export" % (self.name, sym))
- setattr(self, sym, getattr(module, sym))
+ continue
+ try:
+ setattr(self, sym, getattr(module, sym))
+ newattrs.append(sym)
+ except AttributeError:
+ logger.warning("TemplateHelper: %s: %s exports %s, but has no "
+ "such attribute" % (self.name, sym))
# remove old exports
- for sym in set(self._attrs) - set(module.__export__):
+ for sym in set(self._attrs) - set(newattrs):
delattr(self, sym)
- self._attrs = module.__export__
+ self._attrs = newattrs
-class HelperSet(Bcfg2.Server.Plugin.EntrySet):
+class HelperSet(Bcfg2.Server.Plugin.DirectoryBacked):
ignore = re.compile("^(\.#.*|.*~|\\..*\\.(sw[px])|.*\.py[co])$")
-
- def __init__(self, path, fam, encoding, plugin_name):
- fpattern = '[0-9A-Za-z_\-]+\.py'
- self.plugin_name = plugin_name
- Bcfg2.Server.Plugin.EntrySet.__init__(self, fpattern, path,
- HelperModule, encoding)
- fam.AddMonitor(path, self)
-
- def HandleEvent(self, event):
- if (event.filename != self.path and
- not self.ignore.match(event.filename)):
- return self.handle_event(event)
+ patterns = module_re
+ __child__ = HelperModule
class TemplateHelper(Bcfg2.Server.Plugin.Plugin,
@@ -71,13 +65,69 @@ class TemplateHelper(Bcfg2.Server.Plugin.Plugin,
def __init__(self, core, datastore):
Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
Bcfg2.Server.Plugin.Connector.__init__(self)
+ self.helpers = HelperSet(self.data, core.fam)
+
+ def get_additional_data(self, _):
+ return dict([(h._module_name, h)
+ for h in self.helpers.entries.values()])
+
+
+class TemplateHelperLint(Bcfg2.Server.Lint.ServerlessPlugin):
+ """ find duplicate Pkgmgr entries with the same priority """
+ def __init__(self, *args, **kwargs):
+ Bcfg2.Server.Lint.ServerlessPlugin.__init__(self, *args, **kwargs)
+ hm = HelperModule("foo.py")
+ self.reserved_keywords = dir(hm)
+
+ def Run(self):
+ for fname in os.listdir(os.path.join(self.config['repo'],
+ "TemplateHelper")):
+ helper = os.path.join(self.config['repo'], "TemplateHelper",
+ fname)
+ if not module_re.search(helper) or not self.HandlesFile(helper):
+ continue
+ self.check_helper(helper)
+
+ def check_helper(self, helper):
+ module_name = module_re.search(helper).group(1)
try:
- self.helpers = HelperSet(self.data, core.fam, core.encoding,
- self.name)
+ module = imp.load_source(module_name, helper)
except:
- raise Bcfg2.Server.Plugin.PluginInitError
+ err = sys.exc_info()[1]
+ self.LintError("templatehelper-import-error",
+ "Failed to import %s: %s" %
+ (helper, err))
+ return
- def get_additional_data(self, metadata):
- return dict([(h._module_name, h)
- for h in list(self.helpers.entries.values())])
+ if not hasattr(module, "__export__"):
+ self.LintError("templatehelper-no-export",
+ "%s has no __export__ list" % helper)
+ return
+ elif not isinstance(module.__export__, list):
+ self.LintError("templatehelper-nonlist-export",
+ "__export__ is not a list in %s" % helper)
+ return
+
+ for sym in module.__export__:
+ if not hasattr(module, sym):
+ self.LintError("templatehelper-nonexistent-export",
+ "%s: exported symbol %s does not exist" %
+ (helper, sym))
+ elif sym in self.reserved_keywords:
+ self.LintError("templatehelper-reserved-export",
+ "%s: exported symbol %s is reserved" %
+ (helper, sym))
+ elif sym.startswith("_"):
+ self.LintError("templatehelper-underscore-export",
+ "%s: exported symbol %s starts with underscore" %
+ (helper, sym))
+
+ @classmethod
+ def Errors(cls):
+ return {"templatehelper-import-error":"error",
+ "templatehelper-no-export":"error",
+ "templatehelper-nonlist-export":"error",
+ "templatehelper-nonexistent-export":"error",
+ "templatehelper-reserved-export":"error",
+ "templatehelper-underscore-export":"warning"}
diff --git a/src/lib/Bcfg2/Server/Plugins/Trigger.py b/src/lib/Bcfg2/Server/Plugins/Trigger.py
index b0d21545c..313a1bf03 100644
--- a/src/lib/Bcfg2/Server/Plugins/Trigger.py
+++ b/src/lib/Bcfg2/Server/Plugins/Trigger.py
@@ -1,43 +1,52 @@
import os
+import pipes
import Bcfg2.Server.Plugin
+from subprocess import Popen, PIPE
+class TriggerFile(Bcfg2.Server.Plugin.FileBacked):
+ def HandleEvent(self, event=None):
+ return
-def async_run(prog, args):
- pid = os.fork()
- if pid:
- os.waitpid(pid, 0)
- else:
- dpid = os.fork()
- if not dpid:
- os.system(" ".join([prog] + args))
- os._exit(0)
+ def __str__(self):
+ return "%s: %s" % (self.__class__.__name__, self.name)
class Trigger(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Statistics):
+ Bcfg2.Server.Plugin.ClientRunHooks,
+ Bcfg2.Server.Plugin.DirectoryBacked):
"""Trigger is a plugin that calls external scripts (on the server)."""
name = 'Trigger'
__author__ = 'bcfg-dev@mcs.anl.gov'
def __init__(self, core, datastore):
Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Statistics.__init__(self)
- try:
- os.stat(self.data)
- except:
- self.logger.error("Trigger: spool directory %s does not exist; "
- "unloading" % self.data)
- raise Bcfg2.Server.Plugin.PluginInitError
-
- def process_statistics(self, metadata, _):
+ Bcfg2.Server.Plugin.ClientRunHooks.__init__(self)
+ Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data,
+ self.core.fam)
+
+ def async_run(self, args):
+ pid = os.fork()
+ if pid:
+ os.waitpid(pid, 0)
+ else:
+ dpid = os.fork()
+ if not dpid:
+ self.debug_log("Running %s" % " ".join(pipes.quote(a)
+ for a in args))
+ proc = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
+ (out, err) = proc.communicate()
+ rv = proc.wait()
+ if rv != 0:
+ self.logger.error("Trigger: Error running %s (%s): %s" %
+ (args[0], rv, err))
+ elif err:
+ self.debug_log("Trigger: Error: %s" % err)
+ os._exit(0)
+
+
+ def end_client_run(self, metadata):
args = [metadata.hostname, '-p', metadata.profile, '-g',
':'.join([g for g in metadata.groups])]
- for notifier in os.listdir(self.data):
- if ((notifier[-1] == '~') or
- (notifier[:2] == '.#') or
- (notifier[-4:] == '.swp') or
- (notifier in ['SCCS', '.svn', '4913'])):
- continue
- npath = self.data + '/' + notifier
- self.logger.debug("Running %s %s" % (npath, " ".join(args)))
- async_run(npath, args)
+ for notifier in self.entries.keys():
+ npath = os.path.join(self.data, notifier)
+ self.async_run([npath] + args)
diff --git a/src/lib/Bcfg2/Server/Plugins/__init__.py b/src/lib/Bcfg2/Server/Plugins/__init__.py
index f9f1b4e52..b33eeba28 100644
--- a/src/lib/Bcfg2/Server/Plugins/__init__.py
+++ b/src/lib/Bcfg2/Server/Plugins/__init__.py
@@ -25,10 +25,8 @@ __all__ = [
'SSHbase',
'Snapshots',
'Statistics',
- 'Svcmgr',
'Svn',
'TCheetah',
'Trigger',
- 'SGenshi',
'TGenshi',
]
diff --git a/src/lib/Bcfg2/Server/Reports/importscript.py b/src/lib/Bcfg2/Server/Reports/importscript.py
index 16df86a9b..4eced8340 100755
--- a/src/lib/Bcfg2/Server/Reports/importscript.py
+++ b/src/lib/Bcfg2/Server/Reports/importscript.py
@@ -4,17 +4,17 @@ Imports statistics.xml and clients.xml files in to database backend for
new statistics engine
"""
-import binascii
import os
import sys
+import traceback
try:
- import Bcfg2.Server.Reports.settings
+ import Bcfg2.settings
except Exception:
e = sys.exc_info()[1]
sys.stderr.write("Failed to load configuration settings. %s\n" % e)
sys.exit(1)
-project_directory = os.path.dirname(Bcfg2.Server.Reports.settings.__file__)
+project_directory = os.path.dirname(Bcfg2.settings.__file__)
project_name = os.path.basename(project_directory)
sys.path.append(os.path.join(project_directory, '..'))
project_module = __import__(project_name, '', '', [''])
@@ -27,14 +27,14 @@ from lxml.etree import XML, XMLSyntaxError
from getopt import getopt, GetoptError
from datetime import datetime
from time import strptime
-from django.db import connection
-from Bcfg2.Server.Reports.updatefix import update_database
+from django.db import connection, transaction
+from Bcfg2.Server.Plugins.Metadata import ClientMetadata
import logging
import Bcfg2.Logger
import platform
# Compatibility import
-from Bcfg2.Bcfg2Py3k import ConfigParser
+from Bcfg2.Bcfg2Py3k import ConfigParser, b64decode
def build_reason_kwargs(r_ent, encoding, logger):
@@ -53,7 +53,7 @@ def build_reason_kwargs(r_ent, encoding, logger):
# No point in flagging binary if we have no data
binary_file = False
elif r_ent.get('current_bdiff', False):
- rc_diff = binascii.a2b_base64(r_ent.get('current_bdiff'))
+ rc_diff = b64decode(r_ent.get('current_bdiff'))
elif r_ent.get('current_diff', False):
rc_diff = r_ent.get('current_diff')
else:
@@ -86,130 +86,160 @@ def build_reason_kwargs(r_ent, encoding, logger):
is_sensitive=sensitive_file,
unpruned=unpruned_entries)
+def _fetch_reason(elem, kargs, logger):
+ try:
+ rr = None
+ try:
+ rr = Reason.objects.filter(**kargs)[0]
+ except IndexError:
+ rr = Reason(**kargs)
+ rr.save()
+ logger.debug("Created reason: %s" % rr.id)
+ except Exception:
+ ex = sys.exc_info()[1]
+ logger.error("Failed to create reason for %s: %s" % (elem.get('name'), ex))
+ rr = Reason(current_exists=elem.get('current_exists',
+ default="True").capitalize() == "True")
+ rr.save()
+ return rr
-def load_stats(cdata, sdata, encoding, vlevel, logger, quick=False, location=''):
- clients = {}
- [clients.__setitem__(c.name, c) \
- for c in Client.objects.all()]
-
- pingability = {}
- [pingability.__setitem__(n.get('name'), n.get('pingable', default='N')) \
- for n in cdata.findall('Client')]
+def load_stats(sdata, encoding, vlevel, logger, quick=False, location=''):
for node in sdata.findall('Node'):
name = node.get('name')
- c_inst, created = Client.objects.get_or_create(name=name)
- if vlevel > 0:
- logger.info("Client %s added to db" % name)
- clients[name] = c_inst
- try:
- pingability[name]
- except KeyError:
- pingability[name] = 'N'
for statistics in node.findall('Statistics'):
- timestamp = datetime(*strptime(statistics.get('time'))[0:6])
- ilist = Interaction.objects.filter(client=c_inst,
- timestamp=timestamp)
- if ilist:
- current_interaction = ilist[0]
- if vlevel > 0:
- logger.info("Interaction for %s at %s with id %s already exists" % \
- (c_inst.id, timestamp, current_interaction.id))
- continue
- else:
- newint = Interaction(client=c_inst,
- timestamp=timestamp,
- state=statistics.get('state',
+ try:
+ load_stat(name, statistics, encoding, vlevel, logger, quick, location)
+ except:
+ logger.error("Failed to create interaction for %s: %s" %
+ (name, traceback.format_exc().splitlines()[-1]))
+
+@transaction.commit_on_success
+def load_stat(cobj, statistics, encoding, vlevel, logger, quick, location):
+ if isinstance(cobj, ClientMetadata):
+ client_name = cobj.hostname
+ else:
+ client_name = cobj
+ client, created = Client.objects.get_or_create(name=client_name)
+ if created and vlevel > 0:
+ logger.info("Client %s added to db" % client_name)
+
+ timestamp = datetime(*strptime(statistics.get('time'))[0:6])
+ ilist = Interaction.objects.filter(client=client,
+ timestamp=timestamp)
+ if ilist:
+ current_interaction = ilist[0]
+ if vlevel > 0:
+ logger.info("Interaction for %s at %s with id %s already exists" % \
+ (client.id, timestamp, current_interaction.id))
+ return
+ else:
+ newint = Interaction(client=client,
+ timestamp=timestamp,
+ state=statistics.get('state',
+ default="unknown"),
+ repo_rev_code=statistics.get('revision',
default="unknown"),
- repo_rev_code=statistics.get('revision',
- default="unknown"),
- goodcount=statistics.get('good',
- default="0"),
- totalcount=statistics.get('total',
- default="0"),
- server=location)
- newint.save()
- current_interaction = newint
- if vlevel > 0:
- logger.info("Interaction for %s at %s with id %s INSERTED in to db" % (c_inst.id,
- timestamp, current_interaction.id))
-
- counter_fields = {TYPE_CHOICES[0]: 0,
- TYPE_CHOICES[1]: 0,
- TYPE_CHOICES[2]: 0}
- pattern = [('Bad/*', TYPE_CHOICES[0]),
- ('Extra/*', TYPE_CHOICES[2]),
- ('Modified/*', TYPE_CHOICES[1])]
- for (xpath, type) in pattern:
- for x in statistics.findall(xpath):
- counter_fields[type] = counter_fields[type] + 1
- kargs = build_reason_kwargs(x, encoding, logger)
-
- try:
- rr = None
- try:
- rr = Reason.objects.filter(**kargs)[0]
- except IndexError:
- rr = Reason(**kargs)
- rr.save()
- if vlevel > 0:
- logger.info("Created reason: %s" % rr.id)
- except Exception:
- ex = sys.exc_info()[1]
- logger.error("Failed to create reason for %s: %s" % (x.get('name'), ex))
- rr = Reason(current_exists=x.get('current_exists',
- default="True").capitalize() == "True")
- rr.save()
-
- entry, created = Entries.objects.get_or_create(\
- name=x.get('name'), kind=x.tag)
-
- Entries_interactions(entry=entry, reason=rr,
- interaction=current_interaction,
- type=type[0]).save()
- if vlevel > 0:
- logger.info("%s interaction created with reason id %s and entry %s" % (xpath, rr.id, entry.id))
-
- # Update interaction counters
- current_interaction.bad_entries = counter_fields[TYPE_CHOICES[0]]
- current_interaction.modified_entries = counter_fields[TYPE_CHOICES[1]]
- current_interaction.extra_entries = counter_fields[TYPE_CHOICES[2]]
- current_interaction.save()
-
- mperfs = []
- for times in statistics.findall('OpStamps'):
- for metric, value in list(times.items()):
- mmatch = []
- if not quick:
- mmatch = Performance.objects.filter(metric=metric, value=value)
-
- if mmatch:
- mperf = mmatch[0]
- else:
- mperf = Performance(metric=metric, value=value)
- mperf.save()
- mperfs.append(mperf)
- current_interaction.performance_items.add(*mperfs)
-
- for key in list(pingability.keys()):
- if key not in clients:
- continue
+ goodcount=statistics.get('good',
+ default="0"),
+ totalcount=statistics.get('total',
+ default="0"),
+ server=location)
+ newint.save()
+ current_interaction = newint
+ if vlevel > 0:
+ logger.info("Interaction for %s at %s with id %s INSERTED in to db" % (client.id,
+ timestamp, current_interaction.id))
+
+ if isinstance(cobj, ClientMetadata):
try:
- pmatch = Ping.objects.filter(client=clients[key]).order_by('-endtime')[0]
- if pmatch.status == pingability[key]:
- pmatch.endtime = datetime.now()
- pmatch.save()
- continue
- except IndexError:
- pass
- Ping(client=clients[key], status=pingability[key],
- starttime=datetime.now(),
- endtime=datetime.now()).save()
+ imeta = InteractionMetadata(interaction=current_interaction)
+ profile, created = Group.objects.get_or_create(name=cobj.profile)
+ imeta.profile = profile
+ imeta.save() # save here for m2m
+
+ #FIXME - this should be more efficient
+ group_set = []
+ for group_name in cobj.groups:
+ group, created = Group.objects.get_or_create(name=group_name)
+ if created:
+ logger.debug("Added group %s" % group)
+ imeta.groups.add(group)
+ for bundle_name in cobj.bundles:
+ bundle, created = Bundle.objects.get_or_create(name=bundle_name)
+ if created:
+ logger.debug("Added bundle %s" % bundle)
+ imeta.bundles.add(bundle)
+ imeta.save()
+ except:
+ logger.error("Failed to save interaction metadata for %s: %s" %
+ (client_name, traceback.format_exc().splitlines()[-1]))
+
+
+ entries_cache = {}
+ [entries_cache.__setitem__((e.kind, e.name), e) \
+ for e in Entries.objects.all()]
+ counter_fields = {TYPE_BAD: 0,
+ TYPE_MODIFIED: 0,
+ TYPE_EXTRA: 0}
+ pattern = [('Bad/*', TYPE_BAD),
+ ('Extra/*', TYPE_EXTRA),
+ ('Modified/*', TYPE_MODIFIED)]
+ for (xpath, type) in pattern:
+ for x in statistics.findall(xpath):
+ counter_fields[type] = counter_fields[type] + 1
+ rr = _fetch_reason(x, build_reason_kwargs(x, encoding, logger), logger)
- if vlevel > 1:
- logger.info("---------------PINGDATA SYNCED---------------------")
+ try:
+ entry = entries_cache[(x.tag, x.get('name'))]
+ except KeyError:
+ entry, created = Entries.objects.get_or_create(\
+ name=x.get('name'), kind=x.tag)
+
+ Entries_interactions(entry=entry, reason=rr,
+ interaction=current_interaction,
+ type=type).save()
+ if vlevel > 0:
+ logger.info("%s interaction created with reason id %s and entry %s" % (xpath, rr.id, entry.id))
+
+ # add good entries
+ good_reason = None
+ for x in statistics.findall('Good/*'):
+ if good_reason == None:
+ # Do this once. Really need to fix Reasons...
+ good_reason = _fetch_reason(x, build_reason_kwargs(x, encoding, logger), logger)
+ try:
+ entry = entries_cache[(x.tag, x.get('name'))]
+ except KeyError:
+ entry, created = Entries.objects.get_or_create(\
+ name=x.get('name'), kind=x.tag)
+ Entries_interactions(entry=entry, reason=good_reason,
+ interaction=current_interaction,
+ type=TYPE_GOOD).save()
+ if vlevel > 0:
+ logger.info("%s interaction created with reason id %s and entry %s" % (xpath, good_reason.id, entry.id))
+
+ # Update interaction counters
+ current_interaction.bad_entries = counter_fields[TYPE_BAD]
+ current_interaction.modified_entries = counter_fields[TYPE_MODIFIED]
+ current_interaction.extra_entries = counter_fields[TYPE_EXTRA]
+ current_interaction.save()
+
+ mperfs = []
+ for times in statistics.findall('OpStamps'):
+ for metric, value in list(times.items()):
+ mmatch = []
+ if not quick:
+ mmatch = Performance.objects.filter(metric=metric, value=value)
+
+ if mmatch:
+ mperf = mmatch[0]
+ else:
+ mperf = Performance(metric=metric, value=value)
+ mperf.save()
+ mperfs.append(mperf)
+ current_interaction.performance_items.add(*mperfs)
- #Clients are consistent
if __name__ == '__main__':
from sys import argv
@@ -231,18 +261,17 @@ if __name__ == '__main__':
except GetoptError:
mesg = sys.exc_info()[1]
# print help information and exit:
- print("%s\nUsage:\nimportscript.py [-h] [-v] [-u] [-d] [-S] [-C bcfg2 config file] [-c clients-file] [-s statistics-file]" % (mesg))
+ print("%s\nUsage:\nimportscript.py [-h] [-v] [-u] [-d] [-S] [-C bcfg2 config file] [-s statistics-file]" % (mesg))
raise SystemExit(2)
for o, a in opts:
if o in ("-h", "--help"):
- print("Usage:\nimportscript.py [-h] [-v] -c <clients-file> -s <statistics-file> \n")
+ print("Usage:\nimportscript.py [-h] [-v] -s <statistics-file> \n")
print("h : help; this message")
print("v : verbose; print messages on record insertion/skip")
print("u : updates; print status messages as items inserted semi-verbose")
print("d : debug; print most SQL used to manipulate database")
print("C : path to bcfg2.conf config file.")
- print("c : clients.xml file")
print("s : statistics.xml file")
print("S : syslog; output to syslog")
raise SystemExit
@@ -256,7 +285,7 @@ if __name__ == '__main__':
if o in ("-d", "--debug"):
verb = 3
if o in ("-c", "--clients"):
- clientspath = a
+ print("DeprecationWarning: %s is no longer used" % o)
if o in ("-s", "--stats"):
statpath = a
@@ -267,7 +296,7 @@ if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
Bcfg2.Logger.setup_logging('importscript.py',
True,
- syslog)
+ syslog, level=logging.INFO)
cf = ConfigParser.ConfigParser()
cf.read([cpath])
@@ -289,24 +318,16 @@ if __name__ == '__main__':
except:
encoding = 'UTF-8'
- if not clientpath:
- try:
- clientspath = "%s/Metadata/clients.xml" % \
- cf.get('server', 'repository')
- except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
- print("Could not read bcfg2.conf; exiting")
- raise SystemExit(1)
- try:
- clientsdata = XML(open(clientspath).read())
- except (IOError, XMLSyntaxError):
- print("StatReports: Failed to parse %s" % (clientspath))
- raise SystemExit(1)
-
q = '-O3' in sys.argv
+
+ # don't load this at the top. causes a circular import error
+ from Bcfg2.Server.SchemaUpdater import update_database, UpdaterError
# Be sure the database is ready for new schema
- update_database()
- load_stats(clientsdata,
- statsdata,
+ try:
+ update_database()
+ except UpdaterError:
+ raise SystemExit(1)
+ load_stats(statsdata,
encoding,
verb,
logger,
diff --git a/src/lib/Bcfg2/Server/Reports/reports/fixtures/initial_version.xml b/src/lib/Bcfg2/Server/Reports/reports/fixtures/initial_version.xml
deleted file mode 100644
index bde236989..000000000
--- a/src/lib/Bcfg2/Server/Reports/reports/fixtures/initial_version.xml
+++ /dev/null
@@ -1,43 +0,0 @@
-<?xml version='1.0' encoding='utf-8' ?>
-<django-objects version="1.0">
- <object pk="1" model="reports.internaldatabaseversion">
- <field type="IntegerField" name="version">0</field>
- <field type="DateTimeField" name="updated">2008-08-05 11:03:50</field>
- </object>
- <object pk="2" model="reports.internaldatabaseversion">
- <field type="IntegerField" name="version">1</field>
- <field type="DateTimeField" name="updated">2008-08-05 11:04:10</field>
- </object>
- <object pk="3" model="reports.internaldatabaseversion">
- <field type="IntegerField" name="version">2</field>
- <field type="DateTimeField" name="updated">2008-08-05 13:37:19</field>
- </object>
- <object pk="4" model="reports.internaldatabaseversion">
- <field type='IntegerField' name='version'>3</field>
- <field type='DateTimeField' name='updated'>2008-08-11 08:44:36</field>
- </object>
- <object pk="5" model="reports.internaldatabaseversion">
- <field type='IntegerField' name='version'>10</field>
- <field type='DateTimeField' name='updated'>2008-08-22 11:28:50</field>
- </object>
- <object pk="5" model="reports.internaldatabaseversion">
- <field type='IntegerField' name='version'>11</field>
- <field type='DateTimeField' name='updated'>2009-01-13 12:26:10</field>
- </object>
- <object pk="6" model="reports.internaldatabaseversion">
- <field type='IntegerField' name='version'>16</field>
- <field type='DateTimeField' name='updated'>2010-06-01 12:26:10</field>
- </object>
- <object pk="7" model="reports.internaldatabaseversion">
- <field type='IntegerField' name='version'>17</field>
- <field type='DateTimeField' name='updated'>2010-07-02 00:00:00</field>
- </object>
- <object pk="8" model="reports.internaldatabaseversion">
- <field type='IntegerField' name='version'>18</field>
- <field type='DateTimeField' name='updated'>2011-06-30 00:00:00</field>
- </object>
- <object pk="8" model="reports.internaldatabaseversion">
- <field type='IntegerField' name='version'>19</field>
- <field type='DateTimeField' name='updated'>2012-03-28 00:00:00</field>
- </object>
-</django-objects>
diff --git a/src/lib/Bcfg2/Server/Reports/reports/models.py b/src/lib/Bcfg2/Server/Reports/reports/models.py
index 35f2a4393..73adaaaaf 100644
--- a/src/lib/Bcfg2/Server/Reports/reports/models.py
+++ b/src/lib/Bcfg2/Server/Reports/reports/models.py
@@ -23,16 +23,13 @@ KIND_CHOICES = (
('Path', 'symlink'),
('Service', 'Service'),
)
-PING_CHOICES = (
- #These are possible ping states
- ('Up (Y)', 'Y'),
- ('Down (N)', 'N')
-)
+TYPE_GOOD = 0
TYPE_BAD = 1
TYPE_MODIFIED = 2
TYPE_EXTRA = 3
TYPE_CHOICES = (
+ (TYPE_GOOD, 'Good'),
(TYPE_BAD, 'Bad'),
(TYPE_MODIFIED, 'Modified'),
(TYPE_EXTRA, 'Extra'),
@@ -87,30 +84,9 @@ class Client(models.Model):
pass
-class Ping(models.Model):
- """Represents a ping of a client (sparsely)."""
- client = models.ForeignKey(Client, related_name="pings")
- starttime = models.DateTimeField()
- endtime = models.DateTimeField()
- status = models.CharField(max_length=4, choices=PING_CHOICES) # up/down
-
- class Meta:
- get_latest_by = 'endtime'
-
-
class InteractiveManager(models.Manager):
"""Manages interactions objects."""
- def recent_interactions_dict(self, maxdate=None, active_only=True):
- """
- Return the most recent interactions for clients as of a date.
-
- This method uses aggregated queries to return a ValuesQueryDict object.
- Faster then raw sql since this is executed as a single query.
- """
-
- return list(self.values('client').annotate(max_timestamp=Max('timestamp')).values())
-
def interaction_per_client(self, maxdate=None, active_only=True):
"""
Returns the most recent interactions for clients as of a date
@@ -154,15 +130,15 @@ class InteractiveManager(models.Manager):
cursor.execute(sql)
return [item[0] for item in cursor.fetchall()]
except:
- '''FIXME - really need some error hadling'''
+ '''FIXME - really need some error handling'''
pass
return []
class Interaction(models.Model):
"""Models each reconfiguration operation interaction between client and server."""
- client = models.ForeignKey(Client, related_name="interactions",)
- timestamp = models.DateTimeField() # Timestamp for this record
+ client = models.ForeignKey(Client, related_name="interactions")
+ timestamp = models.DateTimeField(db_index=True) # Timestamp for this record
state = models.CharField(max_length=32) # good/bad/modified/etc
repo_rev_code = models.CharField(max_length=64) # repo revision at time of interaction
goodcount = models.IntegerField() # of good config-items
@@ -270,27 +246,47 @@ class Interaction(models.Model):
class Reason(models.Model):
"""reason why modified or bad entry did not verify, or changed."""
- owner = models.TextField(max_length=128, blank=True)
- current_owner = models.TextField(max_length=128, blank=True)
- group = models.TextField(max_length=128, blank=True)
- current_group = models.TextField(max_length=128, blank=True)
- perms = models.TextField(max_length=4, blank=True) # txt fixes typing issue
- current_perms = models.TextField(max_length=4, blank=True)
- status = models.TextField(max_length=3, blank=True) # on/off/(None)
- current_status = models.TextField(max_length=1, blank=True) # on/off/(None)
- to = models.TextField(max_length=256, blank=True)
- current_to = models.TextField(max_length=256, blank=True)
- version = models.TextField(max_length=128, blank=True)
- current_version = models.TextField(max_length=128, blank=True)
+ owner = models.CharField(max_length=255, blank=True)
+ current_owner = models.CharField(max_length=255, blank=True)
+ group = models.CharField(max_length=255, blank=True)
+ current_group = models.CharField(max_length=255, blank=True)
+ perms = models.CharField(max_length=4, blank=True)
+ current_perms = models.CharField(max_length=4, blank=True)
+ status = models.CharField(max_length=128, blank=True)
+ current_status = models.CharField(max_length=128, blank=True)
+ to = models.CharField(max_length=1024, blank=True)
+ current_to = models.CharField(max_length=1024, blank=True)
+ version = models.CharField(max_length=1024, blank=True)
+ current_version = models.CharField(max_length=1024, blank=True)
current_exists = models.BooleanField() # False means its missing. Default True
- current_diff = models.TextField(max_length=1280, blank=True)
+ current_diff = models.TextField(max_length=1024*1024, blank=True)
is_binary = models.BooleanField(default=False)
is_sensitive = models.BooleanField(default=False)
- unpruned = models.TextField(max_length=1280, blank=True)
+ unpruned = models.TextField(max_length=4096, blank=True, default='')
def _str_(self):
return "Reason"
+ def short_list(self):
+ rv = []
+ if self.current_owner or self.current_group or self.current_perms:
+ rv.append("File permissions")
+ if self.current_status:
+ rv.append("Incorrect status")
+ if self.current_to:
+ rv.append("Incorrect target")
+ if self.current_version or self.version == 'auto':
+ rv.append("Wrong version")
+ if not self.current_exists:
+ rv.append("Missing")
+ if self.current_diff or self.is_sensitive:
+ rv.append("Incorrect data")
+ if self.unpruned:
+ rv.append("Directory has extra files")
+ if len(rv) == 0:
+ rv.append("Exists")
+ return rv
+
@staticmethod
@transaction.commit_on_success
def prune_orphans():
@@ -316,6 +312,9 @@ class Entries(models.Model):
cursor.execute('delete from reports_entries where not exists (select rei.id from reports_entries_interactions rei where rei.entry_id = reports_entries.id)')
transaction.set_dirty()
+ class Meta:
+ unique_together = ("name", "kind")
+
class Entries_interactions(models.Model):
"""Define the relation between the reason, the interaction and the entry."""
@@ -343,10 +342,52 @@ class Performance(models.Model):
transaction.set_dirty()
-class InternalDatabaseVersion(models.Model):
- """Object that tell us to witch version is the database."""
- version = models.IntegerField()
- updated = models.DateTimeField(auto_now_add=True)
+class Group(models.Model):
+ """
+ Groups extracted from interactions
+
+ name - The group name
+
+ TODO - Most of this is for future use
+ TODO - set a default group
+ """
+
+ name = models.CharField(max_length=255, unique=True)
+ profile = models.BooleanField(default=False)
+ public = models.BooleanField(default=False)
+ category = models.CharField(max_length=1024, blank=True)
+ comment = models.TextField(blank=True)
+
+ groups = models.ManyToManyField("self", symmetrical=False)
+ bundles = models.ManyToManyField("Bundle")
+
+ def __unicode__(self):
+ return self.name
+
+
+class Bundle(models.Model):
+ """
+ Bundles extracted from interactions
+
+ name - The bundle name
+ """
+
+ name = models.CharField(max_length=255, unique=True)
+
+ def __unicode__(self):
+ return self.name
+
+
+class InteractionMetadata(models.Model):
+ """
+ InteractionMetadata
+
+ Hold extra data associated with the client and interaction
+ """
+
+ interaction = models.OneToOneField(Interaction, primary_key=True, related_name='metadata')
+ profile = models.ForeignKey(Group, related_name="+")
+ groups = models.ManyToManyField(Group)
+ bundles = models.ManyToManyField(Bundle)
+
- def __str__(self):
- return "version %d updated the %s" % (self.version, self.updated.isoformat())
diff --git a/src/lib/Bcfg2/Server/Reports/reports/sql/client.sql b/src/lib/Bcfg2/Server/Reports/reports/sql/client.sql
deleted file mode 100644
index 28e785450..000000000
--- a/src/lib/Bcfg2/Server/Reports/reports/sql/client.sql
+++ /dev/null
@@ -1,7 +0,0 @@
-CREATE VIEW reports_current_interactions AS SELECT x.client_id AS client_id, reports_interaction.id AS interaction_id FROM (select client_id, MAX(timestamp) as timer FROM reports_interaction GROUP BY client_id) x, reports_interaction WHERE reports_interaction.client_id = x.client_id AND reports_interaction.timestamp = x.timer;
-
-create index reports_interaction_client_id on reports_interaction (client_id);
-create index reports_client_current_interaction_id on reports_client (current_interaction_id);
-create index reports_performance_interaction_performance_id on reports_performance_interaction (performance_id);
-create index reports_interaction_timestamp on reports_interaction (timestamp);
-create index reports_performance_interation_interaction_id on reports_performance_interaction (interaction_id);
diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/base-timeview.html b/src/lib/Bcfg2/Server/Reports/reports/templates/base-timeview.html
index 842de36f0..9a5ef651c 100644
--- a/src/lib/Bcfg2/Server/Reports/reports/templates/base-timeview.html
+++ b/src/lib/Bcfg2/Server/Reports/reports/templates/base-timeview.html
@@ -20,6 +20,9 @@ document.write(getCalendarStyles());
{% if not timestamp %}Rendered at {% now "Y-m-d H:i" %} | {% else %}View as of {{ timestamp|date:"Y-m-d H:i" }} | {% endif %}{% spaceless %}
<a id='cal_link' name='cal_link' href='#' onclick='showCalendar(); return false;'
>[change]</a>
- <form method='post' action='{{ path }}' id='cal_form' name='cal_form'><input id='cal_date' name='cal_date' type='hidden' value=''/></form>
+ <form method='post' action='{{ path }}' id='cal_form' name='cal_form'>
+ <input id='cal_date' name='cal_date' type='hidden' value=''/>
+ <input name='op' type='hidden' value='timeview'/>
+ </form>
{% endspaceless %}
{% endblock %}
diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/base.html b/src/lib/Bcfg2/Server/Reports/reports/templates/base.html
index f541c0d2b..3fa482a19 100644
--- a/src/lib/Bcfg2/Server/Reports/reports/templates/base.html
+++ b/src/lib/Bcfg2/Server/Reports/reports/templates/base.html
@@ -62,6 +62,7 @@
<li>Entries Configured</li>
</ul>
<ul class='menu-level2'>
+ <li><a href="{% url reports_common_problems %}">Common problems</a></li>
<li><a href="{% url reports_item_list "bad" %}">Bad</a></li>
<li><a href="{% url reports_item_list "modified" %}">Modified</a></li>
<li><a href="{% url reports_item_list "extra" %}">Extra</a></li>
@@ -87,7 +88,7 @@
<div style='clear:both'></div>
</div><!-- document -->
<div id="footer">
- <span>Bcfg2 Version 1.2.2</span>
+ <span>Bcfg2 Version 1.2.3</span>
</div>
<div id="calendar_div" style='position:absolute; visibility:hidden; background-color:white; layer-background-color:white;'></div>
diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/clients/detail.html b/src/lib/Bcfg2/Server/Reports/reports/templates/clients/detail.html
index dd4295f21..9b86b609f 100644
--- a/src/lib/Bcfg2/Server/Reports/reports/templates/clients/detail.html
+++ b/src/lib/Bcfg2/Server/Reports/reports/templates/clients/detail.html
@@ -50,6 +50,9 @@ span.history_links a {
{% if interaction.server %}
<tr><td>Served by</td><td>{{interaction.server}}</td></tr>
{% endif %}
+ {% if interaction.metadata %}
+ <tr><td>Profile</td><td>{{interaction.metadata.profile}}</td></tr>
+ {% endif %}
{% if interaction.repo_rev_code %}
<tr><td>Revision</td><td>{{interaction.repo_rev_code}}</td></tr>
{% endif %}
@@ -60,58 +63,57 @@ span.history_links a {
{% endif %}
</table>
- {% if interaction.bad_entry_count %}
+ {% if interaction.metadata.groups.count %}
<div class='entry_list'>
- <div class='entry_list_head dirty-lineitem' onclick='javascript:toggleMe("bad_table");'>
- <h3>Bad Entries &#8212; {{ interaction.bad_entry_count }}</h3>
- <div class='entry_expand_tab' id='plusminus_bad_table'>[+]</div>
+ <div class='entry_list_head' onclick='javascript:toggleMe("groups_table");'>
+ <h3>Group membership</h3>
+ <div class='entry_expand_tab' id='plusminus_groups_table'>[+]</div>
</div>
- <table id='bad_table' class='entry_list'>
- {% for e in interaction.bad|sortwell %}
+ <table id='groups_table' class='entry_list' style='display: none'>
+ {% for group in interaction.metadata.groups.all %}
<tr class='{% cycle listview,listview_alt %}'>
- <td class='entry_list_type'>{{e.entry.kind}}:</td>
- <td><a href="{% url reports_item "bad",e.id %}">
- {{e.entry.name}}</a></td>
+ <td class='entry_list_type'>{{group}}</td>
</tr>
{% endfor %}
</table>
</div>
{% endif %}
- {% if interaction.modified_entry_count %}
+ {% if interaction.metadata.bundles.count %}
<div class='entry_list'>
- <div class='entry_list_head modified-lineitem' onclick='javascript:toggleMe("modified_table");'>
- <h3>Modified Entries &#8212; {{ interaction.modified_entry_count }}</h3>
- <div class='entry_expand_tab' id='plusminus_modified_table'>[+]</div>
+ <div class='entry_list_head' onclick='javascript:toggleMe("bundles_table");'>
+ <h3>Bundle membership</h3>
+ <div class='entry_expand_tab' id='plusminus_bundless_table'>[+]</div>
</div>
- <table id='modified_table' class='entry_list'>
- {% for e in interaction.modified|sortwell %}
+ <table id='bundles_table' class='entry_list' style='display: none'>
+ {% for bundle in interaction.metadata.bundles.all %}
<tr class='{% cycle listview,listview_alt %}'>
- <td class='entry_list_type'>{{e.entry.kind}}:</td>
- <td><a href="{% url reports_item "modified",e.id %}">
- {{e.entry.name}}</a></td>
+ <td class='entry_list_type'>{{bundle}}</td>
</tr>
{% endfor %}
</table>
</div>
{% endif %}
- {% if interaction.extra_entry_count %}
+ {% for type, ei_list in ei_lists %}
+ {% if ei_list %}
<div class='entry_list'>
- <div class='entry_list_head extra-lineitem' onclick='javascript:toggleMe("extra_table");'>
- <h3>Extra Entries &#8212; {{ interaction.extra_entry_count }}</h3>
- <div class='entry_expand_tab' id='plusminus_extra_table'>[+]</div>
+ <div class='entry_list_head {{type}}-lineitem' onclick='javascript:toggleMe("{{type}}_table");'>
+ <h3>{{ type|capfirst }} Entries &#8212; {{ ei_list|length }}</h3>
+ <div class='entry_expand_tab' id='plusminus_{{type}}_table'>[+]</div>
</div>
- <table id='extra_table' class='entry_list'>
- {% for e in interaction.extra|sortwell %}
+ <table id='{{type}}_table' class='entry_list'>
+ {% for ei in ei_list %}
<tr class='{% cycle listview,listview_alt %}'>
- <td class='entry_list_type'>{{e.entry.kind}}:</td>
- <td><a href="{% url reports_item "extra",e.id %}">{{e.entry.name}}</a></td>
+ <td class='entry_list_type'>{{ei.entry.kind}}</td>
+ <td><a href="{% url reports_item type ei.id %}">
+ {{ei.entry.name}}</a></td>
</tr>
- {% endfor %}
+ {% endfor %}
</table>
</div>
{% endif %}
+ {% endfor %}
{% if entry_list %}
<div class="entry_list recent_history_wrapper">
diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/clients/detailed-list.html b/src/lib/Bcfg2/Server/Reports/reports/templates/clients/detailed-list.html
index 84ac71d92..9be59e7d2 100644
--- a/src/lib/Bcfg2/Server/Reports/reports/templates/clients/detailed-list.html
+++ b/src/lib/Bcfg2/Server/Reports/reports/templates/clients/detailed-list.html
@@ -6,18 +6,18 @@
{% block content %}
<div class='client_list_box'>
-{% if entry_list %}
{% filter_navigator %}
+{% if entry_list %}
<table cellpadding="3">
<tr id='table_list_header' class='listview'>
- <td class='left_column'>Node</td>
- <td class='right_column' style='width:75px'>State</td>
- <td class='right_column_narrow'>Good</td>
- <td class='right_column_narrow'>Bad</td>
- <td class='right_column_narrow'>Modified</td>
- <td class='right_column_narrow'>Extra</td>
- <td class='right_column'>Last Run</td>
- <td class='right_column_wide'>Server</td>
+ <td class='left_column'>{% sort_link 'client' 'Node' %}</td>
+ <td class='right_column' style='width:75px'>{% sort_link 'state' 'State' %}</td>
+ <td class='right_column_narrow'>{% sort_link '-good' 'Good' %}</td>
+ <td class='right_column_narrow'>{% sort_link '-bad' 'Bad' %}</td>
+ <td class='right_column_narrow'>{% sort_link '-modified' 'Modified' %}</td>
+ <td class='right_column_narrow'>{% sort_link '-extra' 'Extra' %}</td>
+ <td class='right_column'>{% sort_link 'timestamp' 'Last Run' %}</td>
+ <td class='right_column_wide'>{% sort_link 'server' 'Server' %}</td>
</tr>
{% for entry in entry_list %}
<tr class='{% cycle listview,listview_alt %}'>
diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/clients/index.html b/src/lib/Bcfg2/Server/Reports/reports/templates/clients/index.html
index 134e237d6..45ba20b86 100644
--- a/src/lib/Bcfg2/Server/Reports/reports/templates/clients/index.html
+++ b/src/lib/Bcfg2/Server/Reports/reports/templates/clients/index.html
@@ -9,6 +9,7 @@
{% block pagebanner %}Clients - Grid View{% endblock %}
{% block content %}
+{% filter_navigator %}
{% if inter_list %}
<table class='grid-view' align='center'>
{% for inter in inter_list %}
diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/clients/manage.html b/src/lib/Bcfg2/Server/Reports/reports/templates/clients/manage.html
index 5725ae577..443ec8ccb 100644
--- a/src/lib/Bcfg2/Server/Reports/reports/templates/clients/manage.html
+++ b/src/lib/Bcfg2/Server/Reports/reports/templates/clients/manage.html
@@ -38,8 +38,8 @@
</tr>
{% endfor %}
</table>
- </div>
{% else %}
<p>No client records are available.</p>
{% endif %}
+ </div>
{% endblock %}
diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/config_items/common.html b/src/lib/Bcfg2/Server/Reports/reports/templates/config_items/common.html
new file mode 100644
index 000000000..d6ad303fc
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Reports/reports/templates/config_items/common.html
@@ -0,0 +1,42 @@
+{% extends "base-timeview.html" %}
+{% load bcfg2_tags %}
+
+{% block title %}Bcfg2 - Common Problems{% endblock %}
+
+{% block extra_header_info %}
+{% endblock%}
+
+{% block pagebanner %}Common configuration problems{% endblock %}
+
+{% block content %}
+ <div id='threshold_box'>
+ <form method='post' action='{{ request.path }}'>
+ <span>Showing items with more then {{ threshold }} entries</span>
+ <input type='text' name='threshold' value='{{ threshold }}' maxlength='5' size='5' />
+ <input type='submit' value='Change' />
+ </form>
+ </div>
+ {% for type_name, type_list in lists %}
+ <div class='entry_list'>
+ <div class='entry_list_head element_list_head' onclick='javascript:toggleMe("table_{{ type_name }}");'>
+ <h3>{{ type_name|capfirst }} entries</h3>
+ <div class='entry_expand_tab' id='plusminus_table_{{ type_name }}'>[&ndash;]</div>
+ </div>
+ {% if type_list %}
+ <table id='table_{{ type_name }}' class='entry_list'>
+ <tr style='text-align: left'><th>Type</th><th>Name</th><th>Count</th><th>Reason</th></tr>
+ {% for entry, reason, interaction in type_list %}
+ <tr class='{% cycle listview,listview_alt %}'>
+ <td>{{ entry.kind }}</td>
+ <td><a href="{% url reports_entry eid=entry.pk %}">{{ entry.name }}</a></td>
+ <td>{{ interaction|length }}</td>
+ <td><a href="{% url reports_item type=type_name pk=interaction.0 %}">{{ reason.short_list|join:"," }}</a></td>
+ </tr>
+ {% endfor %}
+ </table>
+ {% else %}
+ <p>There are currently no inconsistent {{ type_name }} configuration entries.</p>
+ {% endif %}
+ </div>
+ {% endfor %}
+{% endblock %}
diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/config_items/entry_status.html b/src/lib/Bcfg2/Server/Reports/reports/templates/config_items/entry_status.html
new file mode 100644
index 000000000..5f7579eb9
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Reports/reports/templates/config_items/entry_status.html
@@ -0,0 +1,30 @@
+{% extends "base-timeview.html" %}
+{% load bcfg2_tags %}
+
+{% block title %}Bcfg2 - Entry Status{% endblock %}
+
+{% block extra_header_info %}
+{% endblock%}
+
+{% block pagebanner %}{{ entry.kind }} entry {{ entry.name }} status{% endblock %}
+
+{% block content %}
+{% filter_navigator %}
+{% if item_data %}
+ <div class='entry_list'>
+ <table class='entry_list'>
+ <tr style='text-align: left' ><th>Name</th><th>Timestamp</th><th>State</th><th>Reason</th></tr>
+ {% for ei, inter, reason in item_data %}
+ <tr class='{% cycle listview,listview_alt %}'>
+ <td><a href='{% url Bcfg2.Server.Reports.reports.views.client_detail hostname=inter.client.name, pk=inter.id %}'>{{ inter.client.name }}</a></td>
+ <td style='white-space: nowrap'><a href='{% url Bcfg2.Server.Reports.reports.views.client_detail hostname=inter.client.name, pk=inter.id %}'>{{ inter.timestamp|date:"Y-m-d\&\n\b\s\p\;H:i"|safe }}</a></td>
+ <td>{{ ei.get_type_display }}</td>
+ <td style='white-space: nowrap'><a href="{% url reports_item type=ei.get_type_display pk=ei.pk %}">{{ reason.short_list|join:"," }}</a></td>
+ </tr>
+ {% endfor %}
+ </table>
+ </div>
+{% else %}
+ <p>There are currently no hosts with this configuration entry.</p>
+{% endif %}
+{% endblock %}
diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/config_items/listing.html b/src/lib/Bcfg2/Server/Reports/reports/templates/config_items/listing.html
index 9b1026a08..0a92e7fc0 100644
--- a/src/lib/Bcfg2/Server/Reports/reports/templates/config_items/listing.html
+++ b/src/lib/Bcfg2/Server/Reports/reports/templates/config_items/listing.html
@@ -9,19 +9,21 @@
{% block pagebanner %}{{mod_or_bad|capfirst}} Element Listing{% endblock %}
{% block content %}
-{% if item_list_dict %}
- {% for kind, entries in item_list_dict.items %}
-
+{% filter_navigator %}
+{% if item_list %}
+ {% for type_name, type_data in item_list %}
<div class='entry_list'>
- <div class='entry_list_head element_list_head' onclick='javascript:toggleMe("table_{{ kind }}");'>
- <h3>{{ kind }} &#8212; {{ entries|length }}</h3>
- <div class='entry_expand_tab' id='plusminus_table_{{ kind }}'>[&ndash;]</div>
+ <div class='entry_list_head element_list_head' onclick='javascript:toggleMe("table_{{ type_name }}");'>
+ <h3>{{ type_name }} &#8212; {{ type_data|length }}</h3>
+ <div class='entry_expand_tab' id='plusminus_table_{{ type_name }}'>[&ndash;]</div>
</div>
-
- <table id='table_{{ kind }}' class='entry_list'>
- {% for e in entries %}
+ <table id='table_{{ type_name }}' class='entry_list'>
+ <tr style='text-align: left' ><th>Name</th><th>Count</th><th>Reason</th></tr>
+ {% for entry, reason, eis in type_data %}
<tr class='{% cycle listview,listview_alt %}'>
- <td><a href="{% url reports_item type=mod_or_bad,pk=e.id %}">{{e.entry.name}}</a></td>
+ <td><a href="{% url reports_entry eid=entry.pk %}">{{entry.name}}</a></td>
+ <td>{{ eis|length }}</td>
+ <td><a href="{% url reports_item type=mod_or_bad,pk=eis.0 %}">{{ reason.short_list|join:"," }}</a></td>
</tr>
{% endfor %}
</table>
diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/widgets/filter_bar.html b/src/lib/Bcfg2/Server/Reports/reports/templates/widgets/filter_bar.html
index 6fbe585ab..759415507 100644
--- a/src/lib/Bcfg2/Server/Reports/reports/templates/widgets/filter_bar.html
+++ b/src/lib/Bcfg2/Server/Reports/reports/templates/widgets/filter_bar.html
@@ -1,13 +1,25 @@
{% spaceless %}
+<div class="filter_bar">
+<form name='filter_form'>
{% if filters %}
{% for filter, filter_url in filters %}
{% if forloop.first %}
- <div class="filter_bar">Active filters (click to remove):
+ Active filters (click to remove):
{% endif %}
<a href='{{ filter_url }}'>{{ filter|capfirst }}</a>{% if not forloop.last %}, {% endif %}
{% if forloop.last %}
- </div>
+ {% if groups %}|{% endif %}
{% endif %}
{% endfor %}
{% endif %}
+{% if groups %}
+<label for="id_group">Group filter:</label>
+<select id="id_group" name="group" onchange="javascript:url=document.forms['filter_form'].group.value; if(url) { location.href=url }">
+ {% for group, group_url, selected in groups %}
+ <option label="{{group}}" value="{{group_url}}" {% if selected %}selected {% endif %}/>
+ {% endfor %}
+</select>
+{% endif %}
+</form>
+</div>
{% endspaceless %}
diff --git a/src/lib/Bcfg2/Server/Reports/reports/templatetags/bcfg2_tags.py b/src/lib/Bcfg2/Server/Reports/reports/templatetags/bcfg2_tags.py
index ac63cda3e..894353bba 100644
--- a/src/lib/Bcfg2/Server/Reports/reports/templatetags/bcfg2_tags.py
+++ b/src/lib/Bcfg2/Server/Reports/reports/templatetags/bcfg2_tags.py
@@ -1,11 +1,17 @@
import sys
+from copy import copy
from django import template
+from django.conf import settings
from django.core.urlresolvers import resolve, reverse, \
Resolver404, NoReverseMatch
+from django.template.loader import get_template, \
+ get_template_from_string,TemplateDoesNotExist
from django.utils.encoding import smart_unicode, smart_str
+from django.utils.safestring import mark_safe
from datetime import datetime, timedelta
from Bcfg2.Server.Reports.utils import filter_list
+from Bcfg2.Server.Reports.reports.models import Group
register = template.Library()
@@ -115,13 +121,27 @@ def filter_navigator(context):
filters = []
for filter in filter_list:
+ if filter == 'group':
+ continue
if filter in kwargs:
myargs = kwargs.copy()
del myargs[filter]
filters.append((filter,
reverse(view, args=args, kwargs=myargs)))
filters.sort(lambda x, y: cmp(x[0], y[0]))
- return {'filters': filters}
+
+ myargs = kwargs.copy()
+ selected=True
+ if 'group' in myargs:
+ del myargs['group']
+ selected=False
+ groups = [('---', reverse(view, args=args, kwargs=myargs), selected)]
+ for group in Group.objects.values('name'):
+ myargs['group'] = group['name']
+ groups.append((group['name'], reverse(view, args=args, kwargs=myargs),
+ group['name'] == kwargs.get('group', '')))
+
+ return {'filters': filters, 'groups': groups}
except (Resolver404, NoReverseMatch, ValueError, KeyError):
pass
return dict()
@@ -242,19 +262,6 @@ def add_url_filter(parser, token):
return AddUrlFilter(filter_name, filter_value)
-@register.filter
-def sortwell(value):
- """
- Sorts a list(or evaluates queryset to list) of bad, extra, or modified items in the best
- way for presentation
- """
-
- configItems = list(value)
- configItems.sort(lambda x, y: cmp(x.entry.name, y.entry.name))
- configItems.sort(lambda x, y: cmp(x.entry.kind, y.entry.kind))
- return configItems
-
-
class MediaTag(template.Node):
def __init__(self, filter_value):
self.filter_value = filter_value
@@ -311,3 +318,98 @@ def determine_client_state(entry):
else:
thisdirty = "very-dirty-lineitem"
return thisdirty
+
+
+@register.tag(name='qs')
+def do_qs(parser, token):
+ """
+ qs tag
+
+ accepts a name value pair and inserts or replaces it in the query string
+ """
+ try:
+ tag, name, value = token.split_contents()
+ except ValueError:
+ raise TemplateSyntaxError, "%r tag requires exactly two arguments" \
+ % token.contents.split()[0]
+ return QsNode(name, value)
+
+class QsNode(template.Node):
+ def __init__(self, name, value):
+ self.name = template.Variable(name)
+ self.value = template.Variable(value)
+
+ def render(self, context):
+ try:
+ name = self.name.resolve(context)
+ value = self.value.resolve(context)
+ request = context['request']
+ qs = copy(request.GET)
+ qs[name] = value
+ return "?%s" % qs.urlencode()
+ except template.VariableDoesNotExist:
+ return ''
+ except KeyError:
+ if settings.TEMPLATE_DEBUG:
+ raise Exception, "'qs' tag requires context['request']"
+ return ''
+ except:
+ return ''
+
+
+@register.tag
+def sort_link(parser, token):
+ '''
+ Create a sort anchor tag. Reverse it if active.
+
+ {% sort_link sort_key text %}
+ '''
+ try:
+ tag, sort_key, text = token.split_contents()
+ except ValueError:
+ raise TemplateSyntaxError("%r tag requires at least four arguments" \
+ % token.split_contents()[0])
+
+ return SortLinkNode(sort_key, text)
+
+class SortLinkNode(template.Node):
+ __TMPL__ = "{% load bcfg2_tags %}<a href='{% qs 'sort' key %}'>{{ text }}</a>"
+
+ def __init__(self, sort_key, text):
+ self.sort_key = template.Variable(sort_key)
+ self.text = template.Variable(text)
+
+ def render(self, context):
+ try:
+ try:
+ sort = context['request'].GET['sort']
+ except KeyError:
+ #fall back on this
+ sort = context.get('sort', '')
+ sort_key = self.sort_key.resolve(context)
+ text = self.text.resolve(context)
+
+ # add arrows
+ try:
+ sort_base = sort_key.lstrip('-')
+ if sort[0] == '-' and sort[1:] == sort_base:
+ text = text + '&#x25BC;'
+ sort_key = sort_base
+ elif sort_base == sort:
+ text = text + '&#x25B2;'
+ sort_key = '-' + sort_base
+ except IndexError:
+ pass
+
+ context.push()
+ context['key'] = sort_key
+ context['text'] = mark_safe(text)
+ output = get_template_from_string(self.__TMPL__).render(context)
+ context.pop()
+ return output
+ except:
+ if settings.DEBUG:
+ raise
+ raise
+ return ''
+
diff --git a/src/lib/Bcfg2/Server/Reports/reports/templatetags/syntax_coloring.py b/src/lib/Bcfg2/Server/Reports/reports/templatetags/syntax_coloring.py
index 36d4cf693..0d4c6501d 100644
--- a/src/lib/Bcfg2/Server/Reports/reports/templatetags/syntax_coloring.py
+++ b/src/lib/Bcfg2/Server/Reports/reports/templatetags/syntax_coloring.py
@@ -4,6 +4,8 @@ from django.utils.encoding import smart_unicode
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
+from Bcfg2.Bcfg2Py3k import u_str
+
register = template.Library()
try:
@@ -16,14 +18,6 @@ except:
colorize = False
-# py3k compatibility
-def u_str(string):
- if sys.hexversion >= 0x03000000:
- return string
- else:
- return unicode(string)
-
-
@register.filter
def syntaxhilight(value, arg="diff", autoescape=None):
"""
diff --git a/src/lib/Bcfg2/Server/Reports/reports/urls.py b/src/lib/Bcfg2/Server/Reports/reports/urls.py
index 434ce07b7..1cfe725c2 100644
--- a/src/lib/Bcfg2/Server/Reports/reports/urls.py
+++ b/src/lib/Bcfg2/Server/Reports/reports/urls.py
@@ -17,20 +17,23 @@ urlpatterns = patterns('Bcfg2.Server.Reports.reports',
url(r'^client/(?P<hostname>[^/]+)/(?P<pk>\d+)/?$', 'views.client_detail', name='reports_client_detail_pk'),
url(r'^client/(?P<hostname>[^/]+)/?$', 'views.client_detail', name='reports_client_detail'),
url(r'^elements/(?P<type>\w+)/(?P<pk>\d+)/?$', 'views.config_item', name='reports_item'),
+ url(r'^entry/(?P<eid>\w+)/?$', 'views.entry_status', name='reports_entry'),
)
urlpatterns += patterns('Bcfg2.Server.Reports.reports',
*timeviewUrls(
- (r'^grid/?$', 'views.client_index', None, 'reports_grid_view'),
(r'^summary/?$', 'views.display_summary', None, 'reports_summary'),
(r'^timing/?$', 'views.display_timing', None, 'reports_timing'),
- (r'^elements/(?P<type>\w+)/?$', 'views.config_item_list', None, 'reports_item_list'),
+ (r'^common/(?P<threshold>\d+)/?$', 'views.common_problems', None, 'reports_common_problems'),
+ (r'^common/?$', 'views.common_problems', None, 'reports_common_problems'),
))
urlpatterns += patterns('Bcfg2.Server.Reports.reports',
*filteredUrls(*timeviewUrls(
+ (r'^grid/?$', 'views.client_index', None, 'reports_grid_view'),
(r'^detailed/?$',
- 'views.client_detailed_list', None, 'reports_detailed_list')
+ 'views.client_detailed_list', None, 'reports_detailed_list'),
+ (r'^elements/(?P<type>\w+)/?$', 'views.config_item_list', None, 'reports_item_list'),
)))
urlpatterns += patterns('Bcfg2.Server.Reports.reports',
diff --git a/src/lib/Bcfg2/Server/Reports/reports/views.py b/src/lib/Bcfg2/Server/Reports/reports/views.py
index ccd71a60e..e4c38363f 100644
--- a/src/lib/Bcfg2/Server/Reports/reports/views.py
+++ b/src/lib/Bcfg2/Server/Reports/reports/views.py
@@ -13,16 +13,41 @@ from django.http import \
from django.shortcuts import render_to_response, get_object_or_404
from django.core.urlresolvers import \
resolve, reverse, Resolver404, NoReverseMatch
-from django.db import connection
+from django.db import connection, DatabaseError
+from django.db.models import Q
from Bcfg2.Server.Reports.reports.models import *
+__SORT_FIELDS__ = ( 'client', 'state', 'good', 'bad', 'modified', 'extra', \
+ 'timestamp', 'server' )
+
class PaginationError(Exception):
"""This error is raised when pagination cannot be completed."""
pass
+def _in_bulk(model, ids):
+ """
+ Short cut to fetch in bulk and trap database errors. sqlite will raise
+ a "too many SQL variables" exception if this list is too long. Try using
+ django and fetch manually if an error occurs
+
+ returns a dict of this form { id: <model instance> }
+ """
+
+ try:
+ return model.objects.in_bulk(ids)
+ except DatabaseError:
+ pass
+
+ # if objects.in_bulk fails so will obejcts.filter(pk__in=ids)
+ bulk_dict = {}
+ [bulk_dict.__setitem__(i.id, i) \
+ for i in model.objects.all() if i.id in ids]
+ return bulk_dict
+
+
def server_error(request):
"""
500 error handler.
@@ -44,7 +69,7 @@ def timeview(fn):
"""
def _handle_timeview(request, **kwargs):
"""Send any posts back."""
- if request.method == 'POST':
+ if request.method == 'POST' and request.POST.get('op', '') == 'timeview':
cal_date = request.POST['cal_date']
try:
fmt = "%Y/%m/%d"
@@ -84,6 +109,30 @@ def timeview(fn):
return _handle_timeview
+def _handle_filters(query, **kwargs):
+ """
+ Applies standard filters to a query object
+
+ Returns an updated query object
+
+ query - query object to filter
+
+ server -- Filter interactions by server
+ state -- Filter interactions by state
+ group -- Filter interactions by group
+
+ """
+ if 'state' in kwargs and kwargs['state']:
+ query = query.filter(state__exact=kwargs['state'])
+ if 'server' in kwargs and kwargs['server']:
+ query = query.filter(server__exact=kwargs['server'])
+
+ if 'group' in kwargs and kwargs['group']:
+ group = get_object_or_404(Group, name=kwargs['group'])
+ query = query.filter(metadata__groups__id=group.pk)
+ return query
+
+
def config_item(request, pk, type="bad"):
"""
Display a single entry.
@@ -121,47 +170,138 @@ def config_item(request, pk, type="bad"):
@timeview
-def config_item_list(request, type, timestamp=None):
+def config_item_list(request, type, timestamp=None, **kwargs):
"""Render a listing of affected elements"""
mod_or_bad = type.lower()
type = convert_entry_type_to_id(type)
if type < 0:
raise Http404
- current_clients = Interaction.objects.get_interaction_per_client_ids(timestamp)
- item_list_dict = {}
- seen = dict()
- for x in Entries_interactions.objects.filter(interaction__in=current_clients,
- type=type).select_related():
- if (x.entry, x.reason) in seen:
- continue
- seen[(x.entry, x.reason)] = 1
- if item_list_dict.get(x.entry.kind, None):
- item_list_dict[x.entry.kind].append(x)
- else:
- item_list_dict[x.entry.kind] = [x]
+ current_clients = Interaction.objects.interaction_per_client(timestamp)
+ current_clients = [q['id'] for q in _handle_filters(current_clients, **kwargs).values('id')]
+
+ ldata = list(Entries_interactions.objects.filter(
+ interaction__in=current_clients, type=type).values())
+ entry_ids = set([x['entry_id'] for x in ldata])
+ reason_ids = set([x['reason_id'] for x in ldata])
- for kind in item_list_dict:
- item_list_dict[kind].sort(lambda a, b: cmp(a.entry.name, b.entry.name))
+ entries = _in_bulk(Entries, entry_ids)
+ reasons = _in_bulk(Reason, reason_ids)
+
+ kind_list = {}
+ [kind_list.__setitem__(kind, {}) for kind in set([e.kind for e in entries.values()])]
+ for x in ldata:
+ kind = entries[x['entry_id']].kind
+ data_key = (x['entry_id'], x['reason_id'])
+ try:
+ kind_list[kind][data_key].append(x['id'])
+ except KeyError:
+ kind_list[kind][data_key] = [x['id']]
+
+ lists = []
+ for kind in kind_list.keys():
+ lists.append((kind, [(entries[e[0][0]], reasons[e[0][1]], e[1])
+ for e in sorted(kind_list[kind].iteritems(), key=lambda x: entries[x[0][0]].name)]))
return render_to_response('config_items/listing.html',
- {'item_list_dict': item_list_dict,
+ {'item_list': lists,
'mod_or_bad': mod_or_bad,
'timestamp': timestamp},
context_instance=RequestContext(request))
@timeview
-def client_index(request, timestamp=None):
+def entry_status(request, eid, timestamp=None, **kwargs):
+ """Render a listing of affected elements"""
+ entry = get_object_or_404(Entries, pk=eid)
+
+ current_clients = Interaction.objects.interaction_per_client(timestamp)
+ inters = {}
+ [inters.__setitem__(i.id, i) \
+ for i in _handle_filters(current_clients, **kwargs).select_related('client')]
+
+ eis = Entries_interactions.objects.filter(
+ interaction__in=inters.keys(), entry=entry)
+
+ reasons = _in_bulk(Reason, set([x.reason_id for x in eis]))
+
+ item_data = []
+ for ei in eis:
+ item_data.append((ei, inters[ei.interaction_id], reasons[ei.reason_id]))
+
+ return render_to_response('config_items/entry_status.html',
+ {'entry': entry,
+ 'item_data': item_data,
+ 'timestamp': timestamp},
+ context_instance=RequestContext(request))
+
+
+@timeview
+def common_problems(request, timestamp=None, threshold=None):
+ """Mine config entries"""
+
+ if request.method == 'POST':
+ try:
+ threshold = int(request.POST['threshold'])
+ view, args, kw = resolve(request.META['PATH_INFO'])
+ kw['threshold'] = threshold
+ return HttpResponseRedirect(reverse(view,
+ args=args,
+ kwargs=kw))
+ except:
+ pass
+
+ try:
+ threshold = int(threshold)
+ except:
+ threshold = 10
+
+ c_intr = Interaction.objects.get_interaction_per_client_ids(timestamp)
+ data_list = {}
+ [data_list.__setitem__(t_id, {}) \
+ for t_id, t_label in TYPE_CHOICES if t_id != TYPE_GOOD]
+ ldata = list(Entries_interactions.objects.filter(
+ interaction__in=c_intr).exclude(type=TYPE_GOOD).values())
+
+ entry_ids = set([x['entry_id'] for x in ldata])
+ reason_ids = set([x['reason_id'] for x in ldata])
+ for x in ldata:
+ type = x['type']
+ data_key = (x['entry_id'], x['reason_id'])
+ try:
+ data_list[type][data_key].append(x['id'])
+ except KeyError:
+ data_list[type][data_key] = [x['id']]
+
+ entries = _in_bulk(Entries, entry_ids)
+ reasons = _in_bulk(Reason, reason_ids)
+
+ lists = []
+ for type, type_name in TYPE_CHOICES:
+ if type == TYPE_GOOD:
+ continue
+ lists.append([type_name.lower(), [(entries[e[0][0]], reasons[e[0][1]], e[1])
+ for e in sorted(data_list[type].items(), key=lambda x: len(x[1]), reverse=True)
+ if len(e[1]) > threshold]])
+
+ return render_to_response('config_items/common.html',
+ {'lists': lists,
+ 'timestamp': timestamp,
+ 'threshold': threshold},
+ context_instance=RequestContext(request))
+
+
+@timeview
+def client_index(request, timestamp=None, **kwargs):
"""
Render a grid view of active clients.
Keyword parameters:
- timestamp -- datetime objectto render from
+ timestamp -- datetime object to render from
"""
- list = Interaction.objects.interaction_per_client(timestamp).select_related()\
- .order_by("client__name").all()
+ list = _handle_filters(Interaction.objects.interaction_per_client(timestamp), **kwargs).\
+ select_related().order_by("client__name").all()
return render_to_response('clients/index.html',
{'inter_list': list,
@@ -177,8 +317,29 @@ def client_detailed_list(request, timestamp=None, **kwargs):
"""
+ try:
+ sort = request.GET['sort']
+ if sort[0] == '-':
+ sort_key = sort[1:]
+ else:
+ sort_key = sort
+ if not sort_key in __SORT_FIELDS__:
+ raise ValueError
+
+ if sort_key == "client":
+ kwargs['orderby'] = "%s__name" % sort
+ elif sort_key == "good":
+ kwargs['orderby'] = "%scount" % sort
+ elif sort_key in ["bad", "modified", "extra"]:
+ kwargs['orderby'] = "%s_entries" % sort
+ else:
+ kwargs['orderby'] = sort
+ kwargs['sort'] = sort
+ except (ValueError, KeyError):
+ kwargs['orderby'] = "client__name"
+ kwargs['sort'] = "client"
+
kwargs['interaction_base'] = Interaction.objects.interaction_per_client(timestamp).select_related()
- kwargs['orderby'] = "client__name"
kwargs['page_limit'] = 0
return render_history_view(request, 'clients/detailed-list.html', **kwargs)
@@ -187,13 +348,25 @@ def client_detail(request, hostname=None, pk=None):
context = dict()
client = get_object_or_404(Client, name=hostname)
if(pk == None):
- context['interaction'] = client.current_interaction
- return render_history_view(request, 'clients/detail.html', page_limit=5,
- client=client, context=context)
+ inter = client.current_interaction
+ maxdate = None
else:
- context['interaction'] = client.interactions.get(pk=pk)
- return render_history_view(request, 'clients/detail.html', page_limit=5,
- client=client, maxdate=context['interaction'].timestamp, context=context)
+ inter = client.interactions.get(pk=pk)
+ maxdate = inter.timestamp
+
+ ei = Entries_interactions.objects.filter(interaction=inter).select_related('entry').order_by('entry__kind', 'entry__name')
+ #ei = Entries_interactions.objects.filter(interaction=inter).select_related('entry')
+ #ei = sorted(Entries_interactions.objects.filter(interaction=inter).select_related('entry'),
+ # key=lambda x: (x.entry.kind, x.entry.name))
+ context['ei_lists'] = (
+ ('bad', [x for x in ei if x.type == TYPE_BAD]),
+ ('modified', [x for x in ei if x.type == TYPE_MODIFIED]),
+ ('extra', [x for x in ei if x.type == TYPE_EXTRA])
+ )
+
+ context['interaction']=inter
+ return render_history_view(request, 'clients/detail.html', page_limit=5,
+ client=client, maxdate=maxdate, context=context)
def client_manage(request):
@@ -230,9 +403,9 @@ def display_summary(request, timestamp=None):
"""
Display a summary of the bcfg2 world
"""
- query = Interaction.objects.interaction_per_client(timestamp).select_related()
- node_count = query.count()
- recent_data = query.all()
+ recent_data = Interaction.objects.interaction_per_client(timestamp) \
+ .select_related().all()
+ node_count = len(recent_data)
if not timestamp:
timestamp = datetime.now()
@@ -240,18 +413,11 @@ def display_summary(request, timestamp=None):
bad=[],
modified=[],
extra=[],
- stale=[],
- pings=[])
+ stale=[])
for node in recent_data:
if timestamp - node.timestamp > timedelta(hours=24):
collected_data['stale'].append(node)
# If stale check for uptime
- try:
- if node.client.pings.latest().status == 'N':
- collected_data['pings'].append(node)
- except Ping.DoesNotExist:
- collected_data['pings'].append(node)
- continue
if node.bad_entry_count() > 0:
collected_data['bad'].append(node)
else:
@@ -281,9 +447,6 @@ def display_summary(request, timestamp=None):
if len(collected_data['stale']) > 0:
summary_data.append(get_dict('stale',
'nodes did not run within the last 24 hours.'))
- if len(collected_data['pings']) > 0:
- summary_data.append(get_dict('pings',
- 'are down.'))
return render_to_response('displays/summary.html',
{'summary_data': summary_data, 'node_count': node_count,
@@ -299,7 +462,11 @@ def display_timing(request, timestamp=None):
for inter in inters]
for metric in Performance.objects.filter(interaction__in=list(mdict.keys())).all():
for i in metric.interaction.all():
- mdict[i][metric.metric] = metric.value
+ try:
+ mdict[i][metric.metric] = metric.value
+ except KeyError:
+ #In the unlikely event two interactions share a metric, ignore it
+ pass
return render_to_response('displays/timing.html',
{'metrics': list(mdict.values()),
'timestamp': timestamp},
@@ -324,6 +491,7 @@ def render_history_view(request, template='clients/history.html', **kwargs):
not found
server -- Filter interactions by server
state -- Filter interactions by state
+ group -- Filter interactions by group
entry_max -- Most recent interaction to display
orderby -- Sort results using this field
@@ -345,15 +513,15 @@ def render_history_view(request, template='clients/history.html', **kwargs):
# Either filter by client or limit by clients
iquery = kwargs.get('interaction_base', Interaction.objects)
if client:
- iquery = iquery.filter(client__exact=client).select_related()
+ iquery = iquery.filter(client__exact=client)
+ iquery = iquery.select_related()
if 'orderby' in kwargs and kwargs['orderby']:
iquery = iquery.order_by(kwargs['orderby'])
+ if 'sort' in kwargs:
+ context['sort'] = kwargs['sort']
- if 'state' in kwargs and kwargs['state']:
- iquery = iquery.filter(state__exact=kwargs['state'])
- if 'server' in kwargs and kwargs['server']:
- iquery = iquery.filter(server__exact=kwargs['server'])
+ iquery = _handle_filters(iquery, **kwargs)
if entry_max:
iquery = iquery.filter(timestamp__lte=entry_max)
diff --git a/src/lib/Bcfg2/Server/Reports/settings.py b/src/lib/Bcfg2/Server/Reports/settings.py
deleted file mode 100644
index 4d567f1a2..000000000
--- a/src/lib/Bcfg2/Server/Reports/settings.py
+++ /dev/null
@@ -1,161 +0,0 @@
-import django
-import sys
-
-# Compatibility import
-from Bcfg2.Bcfg2Py3k import ConfigParser
-# Django settings for bcfg2 reports project.
-c = ConfigParser.ConfigParser()
-if len(c.read(['/etc/bcfg2.conf', '/etc/bcfg2-web.conf'])) == 0:
- raise ImportError("Please check that bcfg2.conf or bcfg2-web.conf exists "
- "and is readable by your web server.")
-
-try:
- DEBUG = c.getboolean('statistics', 'web_debug')
-except:
- DEBUG = False
-
-if DEBUG:
- print("Warning: Setting web_debug to True causes extraordinary memory "
- "leaks. Only use this setting if you know what you're doing.")
-
-TEMPLATE_DEBUG = DEBUG
-
-ADMINS = (
- ('Root', 'root'),
-)
-
-MANAGERS = ADMINS
-try:
- db_engine = c.get('statistics', 'database_engine')
-except ConfigParser.NoSectionError:
- e = sys.exc_info()[1]
- raise ImportError("Failed to determine database engine: %s" % e)
-db_name = ''
-if c.has_option('statistics', 'database_name'):
- db_name = c.get('statistics', 'database_name')
-if db_engine == 'sqlite3' and db_name == '':
- db_name = "%s/etc/brpt.sqlite" % c.get('server', 'repository')
-
-DATABASES = {
- 'default': {
- 'ENGINE': "django.db.backends.%s" % db_engine,
- 'NAME': db_name
- }
-}
-
-if db_engine != 'sqlite3':
- DATABASES['default']['USER'] = c.get('statistics', 'database_user')
- DATABASES['default']['PASSWORD'] = c.get('statistics', 'database_password')
- DATABASES['default']['HOST'] = c.get('statistics', 'database_host')
- try:
- DATABASES['default']['PORT'] = c.get('statistics', 'database_port')
- except: # An empty string tells Django to use the default port.
- DATABASES['default']['PORT'] = ''
-
-if django.VERSION[0] == 1 and django.VERSION[1] < 2:
- DATABASE_ENGINE = db_engine
- DATABASE_NAME = DATABASES['default']['NAME']
- if DATABASE_ENGINE != 'sqlite3':
- DATABASE_USER = DATABASES['default']['USER']
- DATABASE_PASSWORD = DATABASES['default']['PASSWORD']
- DATABASE_HOST = DATABASES['default']['HOST']
- DATABASE_PORT = DATABASES['default']['PORT']
-
-
-# Local time zone for this installation. All choices can be found here:
-# http://docs.djangoproject.com/en/dev/ref/settings/#time-zone
-if django.VERSION[0] == 1 and django.VERSION[1] > 2:
- try:
- TIME_ZONE = c.get('statistics', 'time_zone')
- except:
- TIME_ZONE = None
-
-# Language code for this installation. All choices can be found here:
-# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
-# http://blogs.law.harvard.edu/tech/stories/storyReader$15
-LANGUAGE_CODE = 'en-us'
-
-SITE_ID = 1
-
-# Absolute path to the directory that holds media.
-# Example: "/home/media/media.lawrence.com/"
-MEDIA_ROOT = ''
-
-# URL that handles the media served from MEDIA_ROOT.
-# Example: "http://media.lawrence.com"
-MEDIA_URL = '/site_media'
-if c.has_option('statistics', 'web_prefix'):
- MEDIA_URL = c.get('statistics', 'web_prefix').rstrip('/') + MEDIA_URL
-
-# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
-# trailing slash.
-# Examples: "http://foo.com/media/", "/media/".
-ADMIN_MEDIA_PREFIX = '/media/'
-
-# Make this unique, and don't share it with anybody.
-SECRET_KEY = 'eb5+y%oy-qx*2+62vv=gtnnxg1yig_odu0se5$h0hh#pc*lmo7'
-
-# List of callables that know how to import templates from various sources.
-TEMPLATE_LOADERS = (
- 'django.template.loaders.filesystem.load_template_source',
- 'django.template.loaders.app_directories.load_template_source',
-)
-
-MIDDLEWARE_CLASSES = (
- 'django.middleware.common.CommonMiddleware',
- 'django.contrib.sessions.middleware.SessionMiddleware',
- 'django.contrib.auth.middleware.AuthenticationMiddleware',
- 'django.middleware.doc.XViewMiddleware',
-)
-
-ROOT_URLCONF = 'Bcfg2.Server.Reports.urls'
-
-# Authentication Settings
-# Use NIS authentication backend defined in backends.py
-AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',
- 'Bcfg2.Server.Reports.backends.NISBackend')
-# The NIS group authorized to login to BCFG2's reportinvg system
-AUTHORIZED_GROUP = ''
-#create login url area:
-try:
- import django.contrib.auth
-except ImportError:
- raise ImportError('Import of Django module failed. Is Django installed?')
-django.contrib.auth.LOGIN_URL = '/login'
-
-SESSION_EXPIRE_AT_BROWSER_CLOSE = True
-
-
-
-TEMPLATE_DIRS = (
- # Put strings here, like "/home/html/django_templates".
- # Always use forward slashes, even on Windows.
- '/usr/share/python-support/python-django/django/contrib/admin/templates/',
- 'Bcfg2.Server.Reports.reports'
-)
-
-if django.VERSION[0] == 1 and django.VERSION[1] < 2:
- TEMPLATE_CONTEXT_PROCESSORS = (
- 'django.core.context_processors.auth',
- 'django.core.context_processors.debug',
- 'django.core.context_processors.i18n',
- 'django.core.context_processors.media',
- 'django.core.context_processors.request'
- )
-else:
- TEMPLATE_CONTEXT_PROCESSORS = (
- 'django.contrib.auth.context_processors.auth',
- 'django.core.context_processors.debug',
- 'django.core.context_processors.i18n',
- 'django.core.context_processors.media',
- 'django.core.context_processors.request'
- )
-
-INSTALLED_APPS = (
- 'django.contrib.auth',
- 'django.contrib.contenttypes',
- 'django.contrib.sessions',
- 'django.contrib.sites',
- 'django.contrib.admin',
- 'Bcfg2.Server.Reports.reports'
-)
diff --git a/src/lib/Bcfg2/Server/Reports/updatefix.py b/src/lib/Bcfg2/Server/Reports/updatefix.py
deleted file mode 100644
index 192b94b61..000000000
--- a/src/lib/Bcfg2/Server/Reports/updatefix.py
+++ /dev/null
@@ -1,281 +0,0 @@
-import Bcfg2.Server.Reports.settings
-
-from django.db import connection, DatabaseError
-import django.core.management
-import logging
-import sys
-import traceback
-from Bcfg2.Server.Reports.reports.models import InternalDatabaseVersion, \
- TYPE_BAD, TYPE_MODIFIED, TYPE_EXTRA
-logger = logging.getLogger('Bcfg2.Server.Reports.UpdateFix')
-
-
-# all update function should go here
-def _merge_database_table_entries():
- cursor = connection.cursor()
- insert_cursor = connection.cursor()
- find_cursor = connection.cursor()
- cursor.execute("""
- Select name, kind from reports_bad
- union
- select name, kind from reports_modified
- union
- select name, kind from reports_extra
- """)
- # this fetch could be better done
- entries_map = {}
- for row in cursor.fetchall():
- insert_cursor.execute("insert into reports_entries (name, kind) \
- values (%s, %s)", (row[0], row[1]))
- entries_map[(row[0], row[1])] = insert_cursor.lastrowid
-
- cursor.execute("""
- Select name, kind, reason_id, interaction_id, 1 from reports_bad
- inner join reports_bad_interactions on reports_bad.id=reports_bad_interactions.bad_id
- union
- Select name, kind, reason_id, interaction_id, 2 from reports_modified
- inner join reports_modified_interactions on reports_modified.id=reports_modified_interactions.modified_id
- union
- Select name, kind, reason_id, interaction_id, 3 from reports_extra
- inner join reports_extra_interactions on reports_extra.id=reports_extra_interactions.extra_id
- """)
- for row in cursor.fetchall():
- key = (row[0], row[1])
- if entries_map.get(key, None):
- entry_id = entries_map[key]
- else:
- find_cursor.execute("Select id from reports_entries where name=%s and kind=%s", key)
- rowe = find_cursor.fetchone()
- entry_id = rowe[0]
- insert_cursor.execute("insert into reports_entries_interactions \
- (entry_id, interaction_id, reason_id, type) values (%s, %s, %s, %s)", (entry_id, row[3], row[2], row[4]))
-
-
-def _interactions_constraint_or_idx():
- """sqlite doesn't support alter tables.. or constraints"""
- cursor = connection.cursor()
- try:
- cursor.execute('alter table reports_interaction add constraint reports_interaction_20100601 unique (client_id,timestamp)')
- except:
- cursor.execute('create unique index reports_interaction_20100601 on reports_interaction (client_id,timestamp)')
-
-
-def _remove_table_column(tbl, col):
- """sqlite doesn't support deleting a column via alter table"""
- cursor = connection.cursor()
- db_engine = Bcfg2.Server.Reports.settings.DATABASES['default']['ENGINE']
- if db_engine == 'django.db.backends.mysql':
- db_name = Bcfg2.Server.Reports.settings.DATABASES['default']['NAME']
- column_exists = cursor.execute('select * from information_schema.columns '
- 'where table_schema="%s" and '
- 'table_name="%s" '
- 'and column_name="%s";' % (db_name, tbl, col))
- if not column_exists:
- # column doesn't exist
- return
- # if column exists from previous database, remove it
- cursor.execute('alter table %s '
- 'drop column %s;' % (tbl, col))
- elif db_engine == 'django.db.backends.sqlite3':
- # check if table exists
- try:
- cursor.execute('select * from sqlite_master where name=%s and type="table";' % tbl)
- except DatabaseError:
- # table doesn't exist
- return
-
- # sqlite wants us to create a new table containing the columns we want
- # and copy into it http://www.sqlite.org/faq.html#q11
- tmptbl_name = "t_backup"
- _tmptbl_create = \
-"""create temporary table "%s" (
- "id" integer NOT NULL PRIMARY KEY,
- "client_id" integer NOT NULL REFERENCES "reports_client" ("id"),
- "timestamp" datetime NOT NULL,
- "state" varchar(32) NOT NULL,
- "repo_rev_code" varchar(64) NOT NULL,
- "goodcount" integer NOT NULL,
- "totalcount" integer NOT NULL,
- "server" varchar(256) NOT NULL,
- "bad_entries" integer NOT NULL,
- "modified_entries" integer NOT NULL,
- "extra_entries" integer NOT NULL,
- UNIQUE ("client_id", "timestamp")
-);""" % tmptbl_name
- _newtbl_create = \
-"""create table "%s" (
- "id" integer NOT NULL PRIMARY KEY,
- "client_id" integer NOT NULL REFERENCES "reports_client" ("id"),
- "timestamp" datetime NOT NULL,
- "state" varchar(32) NOT NULL,
- "repo_rev_code" varchar(64) NOT NULL,
- "goodcount" integer NOT NULL,
- "totalcount" integer NOT NULL,
- "server" varchar(256) NOT NULL,
- "bad_entries" integer NOT NULL,
- "modified_entries" integer NOT NULL,
- "extra_entries" integer NOT NULL,
- UNIQUE ("client_id", "timestamp")
-);""" % tbl
- new_cols = "id,\
- client_id,\
- timestamp,\
- state,\
- repo_rev_code,\
- goodcount,\
- totalcount,\
- server,\
- bad_entries,\
- modified_entries,\
- extra_entries"
-
- delete_col = [_tmptbl_create,
- "insert into %s select %s from %s;" % (tmptbl_name, new_cols, tbl),
- "drop table %s" % tbl,
- _newtbl_create,
- "create index reports_interaction_client_id on %s (client_id);" % tbl,
- "insert into %s select %s from %s;" % (tbl, new_cols,
- tmptbl_name),
- "drop table %s;" % tmptbl_name]
-
- for sql in delete_col:
- cursor.execute(sql)
-
-
-def _populate_interaction_entry_counts():
- '''Populate up the type totals for the interaction table'''
- cursor = connection.cursor()
- count_field = {TYPE_BAD: 'bad_entries',
- TYPE_MODIFIED: 'modified_entries',
- TYPE_EXTRA: 'extra_entries'}
-
- for type in list(count_field.keys()):
- cursor.execute("select count(type), interaction_id " +
- "from reports_entries_interactions where type = %s group by interaction_id" % type)
- updates = []
- for row in cursor.fetchall():
- updates.append(row)
- try:
- cursor.executemany("update reports_interaction set " + count_field[type] + "=%s where id = %s", updates)
- except Exception:
- e = sys.exc_info()[1]
- print(e)
- cursor.close()
-
-
-# be sure to test your upgrade query before reflecting the change in the models
-# the list of function and sql command to do should go here
-_fixes = [_merge_database_table_entries,
- # this will remove unused tables
- "drop table reports_bad;",
- "drop table reports_bad_interactions;",
- "drop table reports_extra;",
- "drop table reports_extra_interactions;",
- "drop table reports_modified;",
- "drop table reports_modified_interactions;",
- "drop table reports_repository;",
- "drop table reports_metadata;",
- "alter table reports_interaction add server varchar(256) not null default 'N/A';",
- # fix revision data type to support $VCS hashes
- "alter table reports_interaction add repo_rev_code varchar(64) default '';",
- # Performance enhancements for large sites
- 'alter table reports_interaction add column bad_entries integer not null default -1;',
- 'alter table reports_interaction add column modified_entries integer not null default -1;',
- 'alter table reports_interaction add column extra_entries integer not null default -1;',
- _populate_interaction_entry_counts,
- _interactions_constraint_or_idx,
- 'alter table reports_reason add is_binary bool NOT NULL default False;',
- 'alter table reports_reason add is_sensitive bool NOT NULL default False;',
- _remove_table_column('reports_interaction', 'client_version'),
- "alter table reports_reason add unpruned varchar(1280) not null default '';",
-]
-
-# this will calculate the last possible version of the database
-lastversion = len(_fixes)
-
-
-def rollupdate(current_version):
- """function responsible to coordinates all the updates
- need current_version as integer
- """
- ret = None
- if current_version < lastversion:
- for i in range(current_version, lastversion):
- try:
- if type(_fixes[i]) == str:
- connection.cursor().execute(_fixes[i])
- else:
- _fixes[i]()
- except:
- logger.error("Failed to perform db update %s" % (_fixes[i]),
- exc_info=1)
- # since the array starts at 0 but version
- # starts at 1 we add 1 to the normal count
- ret = InternalDatabaseVersion.objects.create(version=i + 1)
- return ret
- else:
- return None
-
-
-def dosync():
- """Function to do the syncronisation for the models"""
- # try to detect if it's a fresh new database
- try:
- cursor = connection.cursor()
- # If this table goes missing,
- # don't forget to change it to the new one
- cursor.execute("Select * from reports_client")
- # if we get here with no error then the database has existing tables
- fresh = False
- except:
- logger.debug("there was an error while detecting "
- "the freshness of the database")
- #we should get here if the database is new
- fresh = True
-
- # ensure database connections are closed
- # so that the management can do its job right
- try:
- cursor.close()
- connection.close()
- except:
- # ignore any errors from missing/invalid dbs
- pass
- # Do the syncdb according to the django version
- if "call_command" in dir(django.core.management):
- # this is available since django 1.0 alpha.
- # not yet tested for full functionnality
- django.core.management.call_command("syncdb", interactive=False, verbosity=0)
- if fresh:
- django.core.management.call_command("loaddata", 'initial_version.xml', verbosity=0)
- elif "syncdb" in dir(django.core.management):
- # this exist only for django 0.96.*
- django.core.management.syncdb(interactive=False, verbosity=0)
- if fresh:
- logger.debug("loading the initial_version fixtures")
- django.core.management.load_data(fixture_labels=['initial_version'], verbosity=0)
- else:
- logger.warning("Don't forget to run syncdb")
-
-
-def update_database():
- """method to search where we are in the revision
- of the database models and update them"""
- try:
- logger.debug("Running upgrade of models to the new one")
- dosync()
- know_version = InternalDatabaseVersion.objects.order_by('-version')
- if not know_version:
- logger.debug("No version, creating initial version")
- know_version = InternalDatabaseVersion.objects.create(version=0)
- else:
- know_version = know_version[0]
- logger.debug("Presently at %s" % know_version)
- if know_version.version < lastversion:
- new_version = rollupdate(know_version.version)
- if new_version:
- logger.debug("upgraded to %s" % new_version)
- except:
- logger.error("Error while updating the database")
- for x in traceback.format_exc().splitlines():
- logger.error(x)
diff --git a/src/lib/Bcfg2/Server/Reports/utils.py b/src/lib/Bcfg2/Server/Reports/utils.py
index e0b6ead59..c47763e39 100755
--- a/src/lib/Bcfg2/Server/Reports/utils.py
+++ b/src/lib/Bcfg2/Server/Reports/utils.py
@@ -3,7 +3,7 @@ from django.conf.urls.defaults import *
import re
"""List of filters provided by filteredUrls"""
-filter_list = ('server', 'state')
+filter_list = ('server', 'state', 'group')
class BatchFetch(object):
@@ -97,6 +97,8 @@ def filteredUrls(pattern, view, kwargs=None, name=None):
tail = mtail.group(1)
pattern = pattern[:len(pattern) - len(tail)]
for filter in ('/state/(?P<state>\w+)',
+ '/group/(?P<group>[\w\-\.]+)',
+ '/group/(?P<group>[\w\-\.]+)/(?P<state>[A-Za-z]+)',
'/server/(?P<server>[\w\-\.]+)',
'/server/(?P<server>[\w\-\.]+)/(?P<state>[A-Za-z]+)'):
results += [(pattern + filter + tail, view, kwargs)]
diff --git a/src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_0_x.py b/src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_0_x.py
new file mode 100644
index 000000000..ff4c24328
--- /dev/null
+++ b/src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_0_x.py
@@ -0,0 +1,11 @@
+"""
+1_0_x.py
+
+This file should contain updates relevant to the 1.0.x branches ONLY.
+The updates() method must be defined and it should return an Updater object
+"""
+from Bcfg2.Server.SchemaUpdater import UnsupportedUpdate
+
+def updates():
+ return UnsupportedUpdate("1.0", 10)
+
diff --git a/src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_1_x.py b/src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_1_x.py
new file mode 100644
index 000000000..0d28786fd
--- /dev/null
+++ b/src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_1_x.py
@@ -0,0 +1,59 @@
+"""
+1_1_x.py
+
+This file should contain updates relevant to the 1.1.x branches ONLY.
+The updates() method must be defined and it should return an Updater object
+"""
+from Bcfg2.Server.SchemaUpdater import Updater
+from Bcfg2.Server.SchemaUpdater.Routines import updatercallable
+
+from django.db import connection
+import sys
+import Bcfg2.settings
+from Bcfg2.Server.Reports.reports.models import \
+ TYPE_BAD, TYPE_MODIFIED, TYPE_EXTRA
+
+@updatercallable
+def _interactions_constraint_or_idx():
+ """sqlite doesn't support alter tables.. or constraints"""
+ cursor = connection.cursor()
+ try:
+ cursor.execute('alter table reports_interaction add constraint reports_interaction_20100601 unique (client_id,timestamp)')
+ except:
+ cursor.execute('create unique index reports_interaction_20100601 on reports_interaction (client_id,timestamp)')
+
+
+@updatercallable
+def _populate_interaction_entry_counts():
+ '''Populate up the type totals for the interaction table'''
+ cursor = connection.cursor()
+ count_field = {TYPE_BAD: 'bad_entries',
+ TYPE_MODIFIED: 'modified_entries',
+ TYPE_EXTRA: 'extra_entries'}
+
+ for type in list(count_field.keys()):
+ cursor.execute("select count(type), interaction_id " +
+ "from reports_entries_interactions where type = %s group by interaction_id" % type)
+ updates = []
+ for row in cursor.fetchall():
+ updates.append(row)
+ try:
+ cursor.executemany("update reports_interaction set " + count_field[type] + "=%s where id = %s", updates)
+ except Exception:
+ e = sys.exc_info()[1]
+ print(e)
+ cursor.close()
+
+
+def updates():
+ fixes = Updater("1.1")
+ fixes.override_base_version(12) # Do not do this in new code
+
+ fixes.add('alter table reports_interaction add column bad_entries integer not null default -1;')
+ fixes.add('alter table reports_interaction add column modified_entries integer not null default -1;')
+ fixes.add('alter table reports_interaction add column extra_entries integer not null default -1;')
+ fixes.add(_populate_interaction_entry_counts())
+ fixes.add(_interactions_constraint_or_idx())
+ fixes.add('alter table reports_reason add is_binary bool NOT NULL default False;')
+ return fixes
+
diff --git a/src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_2_x.py b/src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_2_x.py
new file mode 100644
index 000000000..024965bd5
--- /dev/null
+++ b/src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_2_x.py
@@ -0,0 +1,15 @@
+"""
+1_2_x.py
+
+This file should contain updates relevant to the 1.2.x branches ONLY.
+The updates() method must be defined and it should return an Updater object
+"""
+from Bcfg2.Server.SchemaUpdater import Updater
+from Bcfg2.Server.SchemaUpdater.Routines import updatercallable
+
+def updates():
+ fixes = Updater("1.2")
+ fixes.override_base_version(18) # Do not do this in new code
+ fixes.add('alter table reports_reason add is_sensitive bool NOT NULL default False;')
+ return fixes
+
diff --git a/src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_3_0.py b/src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_3_0.py
new file mode 100644
index 000000000..4fc57c653
--- /dev/null
+++ b/src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_3_0.py
@@ -0,0 +1,27 @@
+"""
+1_3_0.py
+
+This file should contain updates relevant to the 1.3.x branches ONLY.
+The updates() method must be defined and it should return an Updater object
+"""
+from Bcfg2.Server.SchemaUpdater import Updater, UpdaterError
+from Bcfg2.Server.SchemaUpdater.Routines import AddColumns, \
+ RemoveColumns, RebuildTable, DropTable
+
+from Bcfg2.Server.Reports.reports.models import Reason, Interaction
+
+
+def updates():
+ fixes = Updater("1.3")
+ fixes.add(RemoveColumns(Interaction, 'client_version'))
+ fixes.add(AddColumns(Reason))
+ fixes.add(RebuildTable(Reason, [
+ 'owner', 'current_owner',
+ 'group', 'current_group',
+ 'perms', 'current_perms',
+ 'status', 'current_status',
+ 'to', 'current_to']))
+ fixes.add(DropTable('reports_ping'))
+
+ return fixes
+
diff --git a/src/lib/Bcfg2/Server/SchemaUpdater/Changes/__init__.py b/src/lib/Bcfg2/Server/SchemaUpdater/Changes/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/lib/Bcfg2/Server/SchemaUpdater/Changes/__init__.py
diff --git a/src/lib/Bcfg2/Server/SchemaUpdater/Routines.py b/src/lib/Bcfg2/Server/SchemaUpdater/Routines.py
new file mode 100644
index 000000000..4fcf0e6bf
--- /dev/null
+++ b/src/lib/Bcfg2/Server/SchemaUpdater/Routines.py
@@ -0,0 +1,279 @@
+import logging
+import traceback
+from django.db.models.fields import NOT_PROVIDED
+from django.db import connection, DatabaseError, backend, models
+from django.core.management.color import no_style
+from django.core.management.sql import sql_create
+import django.core.management
+
+import Bcfg2.settings
+
+logger = logging.getLogger(__name__)
+
+def _quote(value):
+ """
+ Quote a string to use as a table name or column
+ """
+ return backend.DatabaseOperations().quote_name(value)
+
+
+def _rebuild_sqlite_table(model):
+ """Sqlite doesn't support most alter table statments. This streamlines the
+ rebuild process"""
+ try:
+ cursor = connection.cursor()
+ table_name = model._meta.db_table
+
+ # Build create staement from django
+ model._meta.db_table = "%s_temp" % table_name
+ sql, references = connection.creation.sql_create_model(model, no_style())
+ columns = ",".join([_quote(f.column) \
+ for f in model._meta.fields])
+
+ # Create a temp table
+ [cursor.execute(s) for s in sql]
+
+ # Fill the table
+ tbl_name = _quote(table_name)
+ tmp_tbl_name = _quote(model._meta.db_table)
+ # Reset this
+ model._meta.db_table = table_name
+ cursor.execute("insert into %s(%s) select %s from %s;" % (
+ tmp_tbl_name,
+ columns,
+ columns,
+ tbl_name))
+ cursor.execute("drop table %s" % tbl_name)
+
+ # Call syncdb to create the table again
+ django.core.management.call_command("syncdb", interactive=False, verbosity=0)
+ # syncdb closes our cursor
+ cursor = connection.cursor()
+ # Repopulate
+ cursor.execute('insert into %s(%s) select %s from %s;' % (tbl_name,
+ columns,
+ columns,
+ tmp_tbl_name))
+ cursor.execute('DROP TABLE %s;' % tmp_tbl_name)
+ except DatabaseError:
+ logger.error("Failed to rebuild sqlite table %s" % table_name, exc_info=1)
+ raise UpdaterRoutineException
+
+
+class UpdaterRoutineException(Exception):
+ pass
+
+
+class UpdaterRoutine(object):
+ """Base for routines."""
+ def __init__(self):
+ pass
+
+ def __str__(self):
+ return __name__
+
+ def run(self):
+ """Called to execute the action"""
+ raise UpdaterRoutineException
+
+
+
+class AddColumns(UpdaterRoutine):
+ """
+ Routine to add new columns to an existing model
+ """
+ def __init__(self, model):
+ self.model = model
+ self.model_name = model.__name__
+
+ def __str__(self):
+ return "Add new columns for model %s" % self.model_name
+
+ def run(self):
+ try:
+ cursor = connection.cursor()
+ except DatabaseError:
+ logger.error("Failed to connect to the db")
+ raise UpdaterRoutineException
+
+ try:
+ desc = {}
+ for d in connection.introspection.get_table_description(cursor,
+ self.model._meta.db_table):
+ desc[d[0]] = d
+ except DatabaseError:
+ logger.error("Failed to get table description", exc_info=1)
+ raise UpdaterRoutineException
+
+ for field in self.model._meta.fields:
+ if field.column in desc:
+ continue
+ logger.debug("Column %s does not exist yet" % field.column)
+ if field.default == NOT_PROVIDED:
+ logger.error("Cannot add a column with out a default value")
+ raise UpdaterRoutineException
+
+ sql = "ALTER TABLE %s ADD %s %s NOT NULL DEFAULT " % (
+ _quote(self.model._meta.db_table),
+ _quote(field.column), field.db_type(), )
+ db_engine = Bcfg2.settings.DATABASES['default']['ENGINE']
+ if db_engine == 'django.db.backends.sqlite3':
+ sql += _quote(field.default)
+ sql_values = ()
+ else:
+ sql += '%s'
+ sql_values = (field.default, )
+ try:
+ cursor.execute(sql, sql_values)
+ logger.debug("Added column %s to %s" %
+ (field.column, self.model._meta.db_table))
+ except DatabaseError:
+ logger.error("Unable to add column %s" % field.column)
+ raise UpdaterRoutineException
+
+
+class RebuildTable(UpdaterRoutine):
+ """
+ Rebuild the table for an existing model. Use this if field types have changed.
+ """
+ def __init__(self, model, columns):
+ self.model = model
+ self.model_name = model.__name__
+
+ if type(columns) == str:
+ self.columns = [columns]
+ elif type(columns) in (tuple, list):
+ self.columns = columns
+ else:
+ logger.error("Columns must be a str, tuple, or list")
+ raise UpdaterRoutineException
+
+
+ def __str__(self):
+ return "Rebuild columns for model %s" % self.model_name
+
+ def run(self):
+ try:
+ cursor = connection.cursor()
+ except DatabaseError:
+ logger.error("Failed to connect to the db")
+ raise UpdaterRoutineException
+
+ db_engine = Bcfg2.settings.DATABASES['default']['ENGINE']
+ if db_engine == 'django.db.backends.sqlite3':
+ """ Sqlite is a special case. Altering columns is not supported. """
+ _rebuild_sqlite_table(self.model)
+ return
+
+ if db_engine == 'django.db.backends.mysql':
+ modify_cmd = 'MODIFY '
+ else:
+ modify_cmd = 'ALTER COLUMN '
+
+ col_strings = []
+ for column in self.columns:
+ col_strings.append("%s %s %s" % ( \
+ modify_cmd,
+ _quote(column),
+ self.model._meta.get_field(column).db_type()
+ ))
+
+ try:
+ cursor.execute('ALTER TABLE %s %s' %
+ (_quote(self.model._meta.db_table), ", ".join(col_strings)))
+ except DatabaseError:
+ logger.debug("Failed modify table %s" % self.model._meta.db_table)
+ raise UpdaterRoutineException
+
+
+
+class RemoveColumns(RebuildTable):
+ """
+ Routine to remove columns from an existing model
+ """
+ def __init__(self, model, columns):
+ super(RemoveColumns, self).__init__(model, columns)
+
+
+ def __str__(self):
+ return "Remove columns from model %s" % self.model_name
+
+ def run(self):
+ try:
+ cursor = connection.cursor()
+ except DatabaseError:
+ logger.error("Failed to connect to the db")
+ raise UpdaterRoutineException
+
+ try:
+ columns = [d[0] for d in connection.introspection.get_table_description(cursor,
+ self.model._meta.db_table)]
+ except DatabaseError:
+ logger.error("Failed to get table description", exc_info=1)
+ raise UpdaterRoutineException
+
+ for column in self.columns:
+ if column not in columns:
+ logger.warning("Cannot drop column %s: does not exist" % column)
+ continue
+
+ logger.debug("Dropping column %s" % column)
+
+ db_engine = Bcfg2.settings.DATABASES['default']['ENGINE']
+ if db_engine == 'django.db.backends.sqlite3':
+ _rebuild_sqlite_table(self.model)
+ else:
+ sql = "alter table %s drop column %s" % \
+ (_quote(self.model._meta.db_table), _quote(column), )
+ try:
+ cursor.execute(sql)
+ except DatabaseError:
+ logger.debug("Failed to drop column %s from %s" %
+ (column, self.model._meta.db_table))
+ raise UpdaterRoutineException
+
+
+class DropTable(UpdaterRoutine):
+ """
+ Drop a table
+ """
+ def __init__(self, table_name):
+ self.table_name = table_name
+
+ def __str__(self):
+ return "Drop table %s" % self.table_name
+
+ def run(self):
+ try:
+ cursor = connection.cursor()
+ cursor.execute('DROP TABLE %s' % _quote(self.table_name))
+ except DatabaseError:
+ logger.error("Failed to drop table: %s" %
+ traceback.format_exc().splitlines()[-1])
+ raise UpdaterRoutineException
+
+
+class UpdaterCallable(UpdaterRoutine):
+ """Helper for routines. Basically delays execution"""
+ def __init__(self, fn):
+ self.fn = fn
+ self.args = []
+ self.kwargs = {}
+
+ def __call__(self, *args, **kwargs):
+ self.args = args
+ self.kwargs = kwargs
+ return self
+
+ def __str__(self):
+ return self.fn.__name__
+
+ def run(self):
+ self.fn(*self.args, **self.kwargs)
+
+def updatercallable(fn):
+ """Decorator for UpdaterCallable. Use for any function passed
+ into the fixes list"""
+ return UpdaterCallable(fn)
+
+
diff --git a/src/lib/Bcfg2/Server/SchemaUpdater/__init__.py b/src/lib/Bcfg2/Server/SchemaUpdater/__init__.py
new file mode 100644
index 000000000..304b36636
--- /dev/null
+++ b/src/lib/Bcfg2/Server/SchemaUpdater/__init__.py
@@ -0,0 +1,257 @@
+from django.db import connection, DatabaseError
+from django.core.exceptions import ImproperlyConfigured
+import django.core.management
+import logging
+import pkgutil
+import re
+import sys
+import traceback
+
+from Bcfg2.Bcfg2Py3k import CmpMixin
+from Bcfg2.Server.models import InternalDatabaseVersion
+from Bcfg2.Server.SchemaUpdater.Routines import UpdaterRoutineException, \
+ UpdaterRoutine
+from Bcfg2.Server.SchemaUpdater import Changes
+
+logger = logging.getLogger(__name__)
+
+class UpdaterError(Exception):
+ pass
+
+
+class SchemaTooOldError(UpdaterError):
+ pass
+
+
+def _walk_packages(paths):
+ """Python 2.4 lacks this routine"""
+ import glob
+ submodules = []
+ for path in paths:
+ for submodule in glob.glob("%s/*.py" % path):
+ mod = '.'.join(submodule.split("/")[-1].split('.')[:-1])
+ if mod != '__init__':
+ submodules.append((None, mod, False))
+ return submodules
+
+
+def _release_to_version(release):
+ """
+ Build a release base for a version
+
+ Expects a string of the form 00.00
+
+ returns an integer of the form MMmm00
+ """
+ regex = re.compile("^(\d+)\.(\d+)$")
+ m = regex.match(release)
+ if not m:
+ logger.error("Invalid release string: %s" % release)
+ raise TypeError
+ return int("%02d%02d00" % (int(m.group(1)), int(m.group(2))))
+
+
+class Updater(CmpMixin):
+ """Database updater to standardize updates"""
+
+ def __init__(self, release):
+ CmpMixin.__init__(self)
+
+ self._cursor = None
+ self._release = release
+ try:
+ self._base_version = _release_to_version(release)
+ except:
+ err = "Invalid release string: %s" % release
+ logger.error(err)
+ raise UpdaterError(err)
+
+ self._fixes = []
+ self._version = -1
+
+ def __cmp__(self, other):
+ return self._base_version - other._base_version
+
+ @property
+ def release(self):
+ return self._release
+
+ @property
+ def version(self):
+ if self._version < 0:
+ try:
+ iv = InternalDatabaseVersion.objects.latest()
+ self._version = iv.version
+ except InternalDatabaseVersion.DoesNotExist:
+ raise UpdaterError("No database version stored internally")
+ return self._version
+
+ @property
+ def cursor(self):
+ if not self._cursor:
+ self._cursor = connection.cursor()
+ return self._cursor
+
+ @property
+ def target_version(self):
+ if(len(self._fixes) == 0):
+ return self._base_version
+ else:
+ return self._base_version + len(self._fixes) - 1
+
+
+ def add(self, update):
+ if type(update) == str or isinstance(update, UpdaterRoutine):
+ self._fixes.append(update)
+ else:
+ raise TypeError
+
+
+ def override_base_version(self, version):
+ """Override our starting point for old releases. New code should
+ not use this method"""
+ self._base_version = int(version)
+
+
+ @staticmethod
+ def get_current_version():
+ """Queries the db for the latest version. Returns 0 for a
+ fresh install"""
+
+ if "call_command" in dir(django.core.management):
+ django.core.management.call_command("syncdb", interactive=False,
+ verbosity=0)
+ else:
+ msg = "Unable to call syndb routine"
+ logger.warning(msg)
+ raise UpdaterError(msg)
+
+ try:
+ iv = InternalDatabaseVersion.objects.latest()
+ version = iv.version
+ except InternalDatabaseVersion.DoesNotExist:
+ version = 0
+
+ return version
+
+
+ def syncdb(self):
+ """Function to do the syncronisation for the models"""
+
+ self._version = Updater.get_current_version()
+ self._cursor = None
+
+
+ def increment(self):
+ """Increment schema version in the database"""
+ if self._version < self._base_version:
+ self._version = self._base_version
+ else:
+ self._version += 1
+ InternalDatabaseVersion.objects.create(version=self._version)
+
+ def apply(self):
+ """Apply pending schema changes"""
+
+ if self.version >= self.target_version:
+ logger.debug("No updates for release %s" % self._release)
+ return
+
+ logger.debug("Applying updates for release %s" % self._release)
+
+ if self.version < self._base_version:
+ start = 0
+ else:
+ start = self.version - self._base_version + 1
+
+ try:
+ for fix in self._fixes[start:]:
+ if type(fix) == str:
+ self.cursor.execute(fix)
+ elif isinstance(fix, UpdaterRoutine):
+ fix.run()
+ else:
+ logger.error("Invalid schema change at %s" % \
+ self._version + 1)
+ self.increment()
+ logger.debug("Applied schema change number %s: %s" % \
+ (self.version, fix))
+ logger.info("Applied schema changes for release %s" % self._release)
+ except:
+ msg = "Failed to perform db update %s (%s): %s" % \
+ (self._version + 1, fix,
+ traceback.format_exc().splitlines()[-1])
+ logger.error(msg)
+ raise UpdaterError(msg)
+
+
+class UnsupportedUpdate(Updater):
+ """Handle an unsupported update"""
+
+ def __init__(self, release, version):
+ super(UnsupportedUpdate, self).__init__(release)
+ self._base_version = version
+
+ def apply(self):
+ """Raise an exception if we're too old"""
+
+ if self.version < self.target_version:
+ logger.error("Upgrade from release %s unsupported" % self._release)
+ raise SchemaTooOldError
+
+
+def update_database():
+ """method to search where we are in the revision
+ of the database models and update them"""
+ try:
+ logger.debug("Verifying database schema")
+
+ updaters = []
+ if hasattr(pkgutil, 'walk_packages'):
+ submodules = pkgutil.walk_packages(path=Changes.__path__)
+ else:
+ #python 2.4
+ submodules = _walk_packages(Changes.__path__)
+ for loader, submodule, ispkg in submodules:
+ if ispkg:
+ continue
+ try:
+ updates = getattr(
+ __import__("%s.%s" % (Changes.__name__, submodule),
+ globals(), locals(), ['*']),
+ "updates")
+ updaters.append(updates())
+ except ImportError:
+ logger.error("Failed to import %s" % submodule)
+ except AttributeError:
+ logger.warning("Module %s does not have an updates function" %
+ submodule)
+ except:
+ msg = "Failed to build updater for %s" % submodule
+ logger.error(msg, exc_info=1)
+ raise UpdaterError(msg)
+
+ current_version = Updater.get_current_version()
+ logger.debug("Database version at %s" % current_version)
+
+ updaters.sort()
+ if current_version > 0:
+ [u.apply() for u in updaters]
+ logger.debug("Database version at %s" %
+ Updater.get_current_version())
+ else:
+ target = updaters[-1].target_version
+ InternalDatabaseVersion.objects.create(version=target)
+ logger.info("A new database was created")
+
+ except UpdaterError:
+ raise
+ except ImproperlyConfigured:
+ logger.error("Django is not properly configured: %s" %
+ traceback.format_exc().splitlines()[-1])
+ raise UpdaterError
+ except:
+ logger.error("Error while updating the database")
+ for x in traceback.format_exc().splitlines():
+ logger.error(x)
+ raise UpdaterError
diff --git a/src/lib/Bcfg2/Server/Snapshots/model.py b/src/lib/Bcfg2/Server/Snapshots/model.py
index 5d7973c16..0bbd206da 100644
--- a/src/lib/Bcfg2/Server/Snapshots/model.py
+++ b/src/lib/Bcfg2/Server/Snapshots/model.py
@@ -6,13 +6,7 @@ import sqlalchemy.exceptions
from sqlalchemy.orm import relation, backref
from sqlalchemy.ext.declarative import declarative_base
-
-# py3k compatibility
-def u_str(string):
- if sys.hexversion >= 0x03000000:
- return string
- else:
- return unicode(string)
+from Bcfg2.Bcfg2Py3k import u_str
class Uniquer(object):
diff --git a/src/lib/Bcfg2/Server/__init__.py b/src/lib/Bcfg2/Server/__init__.py
index 96777b0bf..f79b51dd3 100644
--- a/src/lib/Bcfg2/Server/__init__.py
+++ b/src/lib/Bcfg2/Server/__init__.py
@@ -1,4 +1,13 @@
"""This is the set of modules for Bcfg2.Server."""
+import lxml.etree
+
__all__ = ["Admin", "Core", "FileMonitor", "Plugin", "Plugins",
- "Hostbase", "Reports", "Snapshots"]
+ "Hostbase", "Reports", "Snapshots", "XMLParser",
+ "XI", "XI_NAMESPACE"]
+
+XMLParser = lxml.etree.XMLParser(remove_blank_text=True)
+
+XI = 'http://www.w3.org/2001/XInclude'
+XI_NAMESPACE = '{%s}' % XI
+
diff --git a/src/lib/Bcfg2/Server/models.py b/src/lib/Bcfg2/Server/models.py
new file mode 100644
index 000000000..effd4d298
--- /dev/null
+++ b/src/lib/Bcfg2/Server/models.py
@@ -0,0 +1,77 @@
+import sys
+import logging
+import Bcfg2.Options
+import Bcfg2.Server.Plugins
+from django.db import models
+from Bcfg2.Bcfg2Py3k import ConfigParser
+
+logger = logging.getLogger('Bcfg2.Server.models')
+
+MODELS = []
+
+def load_models(plugins=None, cfile='/etc/bcfg2.conf', quiet=True):
+ global MODELS
+
+ if plugins is None:
+ # we want to provide a different default plugin list --
+ # namely, _all_ plugins, so that the database is guaranteed to
+ # work, even if /etc/bcfg2.conf isn't set up properly
+ plugin_opt = Bcfg2.Options.SERVER_PLUGINS
+ plugin_opt.default = Bcfg2.Server.Plugins.__all__
+
+ setup = Bcfg2.Options.OptionParser(dict(plugins=plugin_opt,
+ configfile=Bcfg2.Options.CFILE),
+ quiet=quiet)
+ setup.parse([Bcfg2.Options.CFILE.cmd, cfile])
+ plugins = setup['plugins']
+
+ if MODELS:
+ # load_models() has been called once, so first unload all of
+ # the models; otherwise we might call load_models() with no
+ # arguments, end up with _all_ models loaded, and then in a
+ # subsequent call only load a subset of models
+ for model in MODELS:
+ delattr(sys.modules[__name__], model)
+ MODELS = []
+
+ for plugin in plugins:
+ try:
+ mod = getattr(__import__("Bcfg2.Server.Plugins.%s" %
+ plugin).Server.Plugins, plugin)
+ except ImportError:
+ try:
+ err = sys.exc_info()[1]
+ mod = __import__(plugin)
+ except:
+ if plugins != Bcfg2.Server.Plugins.__all__:
+ # only produce errors if the default plugin list
+ # was not used -- i.e., if the config file was set
+ # up. don't produce errors when trying to load
+ # all plugins, IOW. the error from the first
+ # attempt to import is probably more accurate than
+ # the second attempt.
+ logger.error("Failed to load plugin %s: %s" % (plugin, err))
+ continue
+ for sym in dir(mod):
+ obj = getattr(mod, sym)
+ if hasattr(obj, "__bases__") and models.Model in obj.__bases__:
+ setattr(sys.modules[__name__], sym, obj)
+ MODELS.append(sym)
+
+# basic invocation to ensure that a default set of models is loaded,
+# and thus that this module will always work.
+load_models(quiet=True)
+
+# Monitor our internal db version
+class InternalDatabaseVersion(models.Model):
+ """Object that tell us to witch version is the database."""
+ version = models.IntegerField()
+ updated = models.DateTimeField(auto_now_add=True)
+
+ def __str__(self):
+ return "version %d updated the %s" % (self.version, self.updated.isoformat())
+
+ class Meta:
+ app_label = "reports"
+ get_latest_by = "version"
+
diff --git a/src/lib/Bcfg2/manage.py b/src/lib/Bcfg2/manage.py
new file mode 100755
index 000000000..3e4eedc9f
--- /dev/null
+++ b/src/lib/Bcfg2/manage.py
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+from django.core.management import execute_manager
+import imp
+try:
+ imp.find_module('settings') # Assumed to be in the same directory.
+except ImportError:
+ import sys
+ sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n" % __file__)
+ sys.exit(1)
+
+import settings
+
+if __name__ == "__main__":
+ execute_manager(settings)
diff --git a/src/lib/Bcfg2/settings.py b/src/lib/Bcfg2/settings.py
new file mode 100644
index 000000000..05e85bb9a
--- /dev/null
+++ b/src/lib/Bcfg2/settings.py
@@ -0,0 +1,161 @@
+import os
+import sys
+import Bcfg2.Options
+
+try:
+ import django
+ has_django = True
+except:
+ has_django = False
+
+DATABASES = dict()
+
+# Django < 1.2 compat
+DATABASE_ENGINE = None
+DATABASE_NAME = None
+DATABASE_USER = None
+DATABASE_PASSWORD = None
+DATABASE_HOST = None
+DATABASE_PORT = None
+
+TIME_ZONE = None
+
+DEBUG = False
+TEMPLATE_DEBUG = DEBUG
+
+MEDIA_URL = '/site_media'
+
+# default config file is /etc/bcfg2-web.conf, UNLESS /etc/bcfg2.conf
+# exists AND /etc/bcfg2-web.conf does not exist.
+DEFAULT_CONFIG = Bcfg2.Options.WEB_CFILE.default
+if (not os.path.exists(Bcfg2.Options.WEB_CFILE.default) and
+ os.path.exists(Bcfg2.Options.CFILE.default)):
+ DEFAULT_CONFIG = Bcfg2.Options.CFILE.default
+
+def read_config(cfile=DEFAULT_CONFIG, repo=None, quiet=False):
+ global DATABASE_ENGINE, DATABASE_NAME, DATABASE_USER, DATABASE_PASSWORD, \
+ DATABASE_HOST, DATABASE_PORT, DEBUG, TEMPLATE_DEBUG, TIME_ZONE, \
+ MEDIA_URL
+
+ optinfo = Bcfg2.Options.DATABASE_COMMON_OPTIONS
+ optinfo['repo'] = Bcfg2.Options.SERVER_REPOSITORY
+ setup = Bcfg2.Options.OptionParser(optinfo, quiet=quiet)
+ setup.parse([Bcfg2.Options.WEB_CFILE.cmd, cfile])
+
+ if repo is None:
+ repo = setup['repo']
+
+ DATABASES['default'] = \
+ dict(ENGINE="django.db.backends.%s" % setup['db_engine'],
+ NAME=setup['db_name'],
+ USER=setup['db_user'],
+ PASSWORD=setup['db_password'],
+ HOST=setup['db_host'],
+ PORT=setup['db_port'])
+
+ if has_django and django.VERSION[0] == 1 and django.VERSION[1] < 2:
+ DATABASE_ENGINE = setup['db_engine']
+ DATABASE_NAME = DATABASES['default']['NAME']
+ DATABASE_USER = DATABASES['default']['USER']
+ DATABASE_PASSWORD = DATABASES['default']['PASSWORD']
+ DATABASE_HOST = DATABASES['default']['HOST']
+ DATABASE_PORT = DATABASES['default']['PORT']
+
+ # dropping the version check. This was added in 1.1.2
+ TIME_ZONE = setup['time_zone']
+
+ DEBUG = setup['django_debug']
+ TEMPLATE_DEBUG = DEBUG
+ if DEBUG:
+ print("Warning: Setting web_debug to True causes extraordinary memory "
+ "leaks. Only use this setting if you know what you're doing.")
+
+ if setup['web_prefix']:
+ MEDIA_URL = setup['web_prefix'].rstrip('/') + MEDIA_URL
+ else:
+ MEDIA_URL = '/site_media'
+
+
+# initialize settings from /etc/bcfg2.conf, or set up basic defaults.
+# this lets manage.py work in all cases
+read_config(quiet=True)
+
+ADMINS = (('Root', 'root'))
+MANAGERS = ADMINS
+
+# Language code for this installation. All choices can be found here:
+# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
+# http://blogs.law.harvard.edu/tech/stories/storyReader$15
+LANGUAGE_CODE = 'en-us'
+
+SITE_ID = 1
+
+# TODO - sanitize this
+INSTALLED_APPS = (
+ 'django.contrib.auth',
+ 'django.contrib.contenttypes',
+ 'django.contrib.sessions',
+ 'django.contrib.sites',
+ 'django.contrib.admin',
+ 'Bcfg2.Server.Reports.reports',
+ 'Bcfg2.Server'
+)
+
+# Imported from Bcfg2.Server.Reports
+MEDIA_ROOT = ''
+
+# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
+# trailing slash.
+ADMIN_MEDIA_PREFIX = '/media/'
+
+#TODO - make this unique
+# Make this unique, and don't share it with anybody.
+SECRET_KEY = 'eb5+y%oy-qx*2+62vv=gtnnxg1yig_odu0se5$h0hh#pc*lmo7'
+
+TEMPLATE_LOADERS = (
+ 'django.template.loaders.filesystem.load_template_source',
+ 'django.template.loaders.app_directories.load_template_source',
+)
+
+#TODO - review these. auth and sessions aren't really used
+MIDDLEWARE_CLASSES = (
+ 'django.middleware.common.CommonMiddleware',
+ 'django.contrib.sessions.middleware.SessionMiddleware',
+ 'django.contrib.auth.middleware.AuthenticationMiddleware',
+ 'django.middleware.doc.XViewMiddleware',
+)
+
+# TODO - move this to a higher root and dynamically import
+ROOT_URLCONF = 'Bcfg2.Server.Reports.urls'
+
+# TODO - this isn't usable
+# Authentication Settings
+AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend')
+
+LOGIN_URL = '/login'
+
+SESSION_EXPIRE_AT_BROWSER_CLOSE = True
+
+TEMPLATE_DIRS = (
+ # App loaders should take care of this.. not sure why this is here
+ '/usr/share/python-support/python-django/django/contrib/admin/templates/',
+)
+
+# TODO - sanitize this
+if has_django and django.VERSION[0] == 1 and django.VERSION[1] < 2:
+ TEMPLATE_CONTEXT_PROCESSORS = (
+ 'django.core.context_processors.auth',
+ 'django.core.context_processors.debug',
+ 'django.core.context_processors.i18n',
+ 'django.core.context_processors.media',
+ 'django.core.context_processors.request'
+ )
+else:
+ TEMPLATE_CONTEXT_PROCESSORS = (
+ 'django.contrib.auth.context_processors.auth',
+ 'django.core.context_processors.debug',
+ 'django.core.context_processors.i18n',
+ 'django.core.context_processors.media',
+ 'django.core.context_processors.request'
+ )
+
diff --git a/src/lib/Bcfg2/version.py b/src/lib/Bcfg2/version.py
new file mode 100644
index 000000000..ac10dac94
--- /dev/null
+++ b/src/lib/Bcfg2/version.py
@@ -0,0 +1,115 @@
+import re
+
+__version__ = "1.3.0"
+
+class Bcfg2VersionInfo(tuple):
+ v_re = re.compile(r'(\d+)(\w+)(\d+)')
+
+ def __new__(cls, vstr):
+ (major, minor, rest) = vstr.split(".")
+ match = cls.v_re.match(rest)
+ if match:
+ micro, releaselevel, serial = match.groups()
+ else:
+ micro = rest
+ releaselevel = 'final'
+ serial = 0
+ return tuple.__new__(cls, [int(major), int(minor), int(micro),
+ releaselevel, int(serial)])
+
+ def __init__(self, vstr):
+ tuple.__init__(self)
+ self.major, self.minor, self.micro, self.releaselevel, self.serial = \
+ tuple(self)
+
+ def __repr__(self):
+ return "(major=%s, minor=%s, micro=%s, releaselevel=%s, serial=%s)" % \
+ tuple(self)
+
+ def _release_cmp(self, r1, r2):
+ if r1 == r2:
+ return 0
+ elif r1 == "final":
+ return -1
+ elif r2 == "final":
+ return 1
+ elif r1 == "rc":
+ return -1
+ elif r2 == "rc":
+ return 1
+ # should never get to anything past this point
+ elif r1 == "pre":
+ return -1
+ elif r2 == "pre":
+ return 1
+ else:
+ # wtf?
+ return 0
+
+ def __gt__(self, version):
+ if version is None:
+ # older bcfg2 clients didn't report their version, so we
+ # handle this case specially and assume that any reported
+ # version is newer than any indeterminate version
+ return True
+ try:
+ for i in range(3):
+ if self[i] > version[i]:
+ return True
+ elif self[i] < version[i]:
+ return False
+ rel = self._release_cmp(self[3], version[3])
+ if rel < 0:
+ return True
+ elif rel > 0:
+ return False
+ if self[4] > version[4]:
+ return True
+ else:
+ return False
+ except TypeError:
+ return self > Bcfg2VersionInfo(version)
+
+ def __lt__(self, version):
+ if version is None:
+ # older bcfg2 clients didn't report their version, so we
+ # handle this case specially and assume that any reported
+ # version is newer than any indeterminate version
+ return False
+ try:
+ for i in range(3):
+ if self[i] < version[i]:
+ return True
+ elif self[i] > version[i]:
+ return False
+ rel = self._release_cmp(self[3], version[3])
+ if rel > 0:
+ return True
+ elif rel < 0:
+ return False
+ if self[4] < version[4]:
+ return True
+ else:
+ return False
+ except TypeError:
+ return self < Bcfg2VersionInfo(version)
+
+ def __eq__(self, version):
+ if version is None:
+ # older bcfg2 clients didn't report their version, so we
+ # handle this case specially and assume that any reported
+ # version is newer than any indeterminate version
+ return False
+ try:
+ rv = True
+ for i in range(len(self)):
+ rv &= self[i] == version[i]
+ return rv
+ except TypeError:
+ return self == Bcfg2VersionInfo(version)
+
+ def __ge__(self, version):
+ return not self < version
+
+ def __le__(self, version):
+ return not self > version
diff --git a/src/sbin/bcfg2 b/src/sbin/bcfg2
index fb34e627b..3fbeb0a62 100755
--- a/src/sbin/bcfg2
+++ b/src/sbin/bcfg2
@@ -18,6 +18,8 @@ import Bcfg2.Client.Tools
# Compatibility imports
from Bcfg2.Bcfg2Py3k import xmlrpclib
+from Bcfg2.version import __version__
+
import Bcfg2.Proxy
import Bcfg2.Logger
@@ -27,10 +29,6 @@ def cb_sigint_handler(signum, frame):
"""Exit upon CTRL-C."""
os._exit(1)
-DECISION_LIST = Bcfg2.Options.Option('Decision List', default=False,
- cmd="--decision-list", odesc='<file>',
- long_arg=True)
-
class Client:
"""The main bcfg2 client class"""
@@ -38,46 +36,8 @@ class Client:
def __init__(self):
self.toolset = None
self.config = None
-
- optinfo = {
- # 'optname': (('-a', argdesc, optdesc),
- # env, cfpath, default, boolean)),
- 'verbose': Bcfg2.Options.VERBOSE,
- 'extra': Bcfg2.Options.CLIENT_EXTRA_DISPLAY,
- 'quick': Bcfg2.Options.CLIENT_QUICK,
- 'debug': Bcfg2.Options.DEBUG,
- 'lockfile': Bcfg2.Options.LOCKFILE,
- 'drivers': Bcfg2.Options.CLIENT_DRIVERS,
- 'dryrun': Bcfg2.Options.CLIENT_DRYRUN,
- 'paranoid': Bcfg2.Options.CLIENT_PARANOID,
- 'bundle': Bcfg2.Options.CLIENT_BUNDLE,
- 'bundle-quick': Bcfg2.Options.CLIENT_BUNDLEQUICK,
- 'indep': Bcfg2.Options.CLIENT_INDEP,
- 'file': Bcfg2.Options.CLIENT_FILE,
- 'interactive': Bcfg2.Options.INTERACTIVE,
- 'cache': Bcfg2.Options.CLIENT_CACHE,
- 'profile': Bcfg2.Options.CLIENT_PROFILE,
- 'remove': Bcfg2.Options.CLIENT_REMOVE,
- 'help': Bcfg2.Options.HELP,
- 'setup': Bcfg2.Options.CFILE,
- 'server': Bcfg2.Options.SERVER_LOCATION,
- 'user': Bcfg2.Options.CLIENT_USER,
- 'password': Bcfg2.Options.SERVER_PASSWORD,
- 'retries': Bcfg2.Options.CLIENT_RETRIES,
- 'kevlar': Bcfg2.Options.CLIENT_KEVLAR,
- 'decision-list': DECISION_LIST,
- 'encoding': Bcfg2.Options.ENCODING,
- 'omit-lock-check': Bcfg2.Options.OMIT_LOCK_CHECK,
- 'filelog': Bcfg2.Options.LOGGING_FILE_PATH,
- 'decision': Bcfg2.Options.CLIENT_DLIST,
- 'servicemode': Bcfg2.Options.CLIENT_SERVICE_MODE,
- 'key': Bcfg2.Options.CLIENT_KEY,
- 'certificate': Bcfg2.Options.CLIENT_CERT,
- 'ca': Bcfg2.Options.CLIENT_CA,
- 'serverCN': Bcfg2.Options.CLIENT_SCNS,
- 'timeout': Bcfg2.Options.CLIENT_TIMEOUT,
- }
-
+
+ optinfo = Bcfg2.Options.CLIENT_COMMON_OPTIONS
self.setup = Bcfg2.Options.OptionParser(optinfo)
self.setup.parse(sys.argv[1:])
@@ -85,38 +45,38 @@ class Client:
print("Bcfg2 takes no arguments, only options")
print(self.setup.buildHelpMessage())
raise SystemExit(1)
- level = 30
- if self.setup['verbose']:
- level = 20
if self.setup['debug']:
- level = 0
+ level = logging.DEBUG
+ elif self.setup['verbose']:
+ level = logging.INFO
+ else:
+ level = logging.WARNING
Bcfg2.Logger.setup_logging('bcfg2',
- to_syslog=False,
+ to_syslog=self.setup['syslog'],
level=level,
- to_file=self.setup['filelog'])
+ to_file=self.setup['logging'])
self.logger = logging.getLogger('bcfg2')
self.logger.debug(self.setup)
- if self.setup['bundle-quick']:
- if self.setup['bundle'] == []:
- self.logger.error("-Q option requires -b")
+ if self.setup['bundle_quick']:
+ if not self.setup['bundle'] and not self.setup['skipbundle']:
+ self.logger.error("-Q option requires -b or -B")
raise SystemExit(1)
- elif self.setup['remove'] != False:
+ elif self.setup['remove']:
self.logger.error("-Q option incompatible with -r")
raise SystemExit(1)
if 'drivers' in self.setup and self.setup['drivers'] == 'help':
self.logger.info("The following drivers are available:")
self.logger.info(Bcfg2.Client.Tools.drivers)
raise SystemExit(0)
- if self.setup['remove'] and 'services' in self.setup['remove']:
- self.logger.error("Service removal is nonsensical; removed services will only be disabled")
- if self.setup['remove'] not in [False,
- 'all',
- 'Services',
- 'Packages',
- 'services',
- 'packages']:
- self.logger.error("Got unknown argument %s for -r" % (self.setup['remove']))
- if (self.setup["file"] != False) and (self.setup["cache"] != False):
+ if self.setup['remove'] and 'services' in self.setup['remove'].lower():
+ self.logger.error("Service removal is nonsensical; "
+ "removed services will only be disabled")
+ if (self.setup['remove'] and
+ self.setup['remove'].lower() not in ['all', 'services',
+ 'packages']):
+ self.logger.error("Got unknown argument %s for -r" %
+ self.setup['remove'])
+ if self.setup["file"] and self.setup["cache"]:
print("cannot use -f and -c together")
raise SystemExit(1)
if not self.setup['server'].startswith('https://'):
@@ -138,11 +98,13 @@ class Client:
script.write(probe.text)
script.close()
os.close(scripthandle)
- os.chmod(script.name, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH |
- stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH |
- stat.S_IWUSR) # 0755
+ os.chmod(script.name,
+ stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH |
+ stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH |
+ stat.S_IWUSR) # 0755
ret.text = os.popen(script.name).read().strip()
- self.logger.info("Probe %s has result:\n%s" % (name, ret.text))
+ self.logger.info("Probe %s has result:" % name)
+ self.logger.info(ret.text)
finally:
os.unlink(script.name)
except:
@@ -162,6 +124,8 @@ class Client:
# begin configuration
times['start'] = time.time()
+ self.logger.info("Starting Bcfg2 client run at %s" % times['start'])
+
if self.setup['file']:
# read config from file
try:
@@ -176,14 +140,17 @@ class Client:
return(1)
else:
# retrieve config from server
- proxy = Bcfg2.Proxy.ComponentProxy(self.setup['server'],
- self.setup['user'],
- self.setup['password'],
- key=self.setup['key'],
- cert=self.setup['certificate'],
- ca=self.setup['ca'],
- allowedServerCNs=self.setup['serverCN'],
- timeout=self.setup['timeout'])
+ proxy = \
+ Bcfg2.Proxy.ComponentProxy(self.setup['server'],
+ self.setup['user'],
+ self.setup['password'],
+ key=self.setup['key'],
+ cert=self.setup['certificate'],
+ ca=self.setup['ca'],
+ allowedServerCNs=self.setup['serverCN'],
+ timeout=self.setup['timeout'],
+ retries=int(self.setup['retries']),
+ delay=int(self.setup['retry_delay']))
if self.setup['profile']:
try:
@@ -195,6 +162,24 @@ class Client:
raise SystemExit(1)
try:
+ probe_data = proxy.DeclareVersion(__version__)
+ except xmlrpclib.Fault:
+ err = sys.exc_info()[1]
+ if (err.faultCode == xmlrpclib.METHOD_NOT_FOUND or
+ (err.faultCode == 7 and
+ err.faultString.startswith("Unknown method"))):
+ self.logger.debug("Server does not support declaring "
+ "client version")
+ else:
+ self.logger.error("Failed to declare version: %s" % err)
+ except (Bcfg2.Proxy.ProxyError,
+ Bcfg2.Proxy.CertificateError,
+ socket.gaierror,
+ socket.error):
+ err = sys.exc_info()[1]
+ self.logger.error("Failed to declare version: %s" % err)
+
+ try:
probe_data = proxy.GetProbes()
except (Bcfg2.Proxy.ProxyError,
Bcfg2.Proxy.CertificateError,
@@ -208,7 +193,7 @@ class Client:
times['probe_download'] = time.time()
try:
- probes = Bcfg2.Client.XML.XML(probe_data)
+ probes = Bcfg2.Client.XML.XML(str(probe_data))
except Bcfg2.Client.XML.ParseError:
syntax_error = sys.exc_info()[1]
self.fatal_error(
@@ -229,8 +214,7 @@ class Client:
try:
# upload probe responses
proxy.RecvProbeData(Bcfg2.Client.XML.tostring(probedata,
- encoding='UTF-8',
- xml_declaration=True))
+ xml_declaration=False).decode('UTF-8'))
except Bcfg2.Proxy.ProxyError:
err = sys.exc_info()[1]
self.logger.error("Failed to upload probe data: %s" % err)
@@ -282,10 +266,15 @@ class Client:
self.fatal_error("Server error: %s" % (self.config.text))
return(1)
- if self.setup['bundle-quick']:
+ if self.setup['bundle_quick']:
newconfig = Bcfg2.Client.XML.XML('<Configuration/>')
- [newconfig.append(bundle) for bundle in self.config.getchildren() if \
- bundle.tag == 'Bundle' and bundle.get('name') in self.setup['bundle']]
+ [newconfig.append(bundle)
+ for bundle in self.config.getchildren()
+ if (bundle.tag == 'Bundle' and
+ ((self.setup['bundle'] and
+ bundle.get('name') in self.setup['bundle']) or
+ (self.setup['skipbundle'] and
+ bundle.get('name') not in self.setup['skipbundle'])))]
self.config = newconfig
self.tools = Bcfg2.Client.Frame.Frame(self.config,
@@ -293,7 +282,7 @@ class Client:
times, self.setup['drivers'],
self.setup['dryrun'])
- if not self.setup['omit-lock-check']:
+ if not self.setup['omit_lock_check']:
#check lock here
try:
lockfile = open(self.setup['lockfile'], 'w')
@@ -309,7 +298,7 @@ class Client:
# execute the said configuration
self.tools.Execute()
- if not self.setup['omit-lock-check']:
+ if not self.setup['omit_lock_check']:
#unlock here
if lockfile:
try:
@@ -318,20 +307,21 @@ class Client:
except OSError:
self.logger.error("Failed to unlock lockfile %s" % lockfile.name)
- if not self.setup['file'] and not self.setup['bundle-quick']:
+ if not self.setup['file'] and not self.setup['bundle_quick']:
# upload statistics
feedback = self.tools.GenerateStats()
try:
proxy.RecvStats(Bcfg2.Client.XML.tostring(feedback,
- encoding='UTF-8',
- xml_declaration=True))
+ xml_declaration=False).decode('UTF-8'))
except Bcfg2.Proxy.ProxyError:
err = sys.exc_info()[1]
self.logger.error("Failed to upload configuration statistics: "
"%s" % err)
raise SystemExit(2)
+ self.logger.info("Finished Bcfg2 client run at %s" % time.time())
+
if __name__ == '__main__':
signal.signal(signal.SIGINT, cb_sigint_handler)
client = Client()
diff --git a/src/sbin/bcfg2-admin b/src/sbin/bcfg2-admin
index 007dd0af3..24e9eaac4 100755
--- a/src/sbin/bcfg2-admin
+++ b/src/sbin/bcfg2-admin
@@ -11,6 +11,7 @@ from Bcfg2.Bcfg2Py3k import StringIO
log = logging.getLogger('bcfg2-admin')
+
def mode_import(modename):
"""Load Bcfg2.Server.Admin.<mode>."""
modname = modename.capitalize()
@@ -18,10 +19,12 @@ def mode_import(modename):
(modname)).Server.Admin, modname)
return getattr(mod, modname)
+
def get_modes():
"""Get all available modes, except for the base mode."""
return [x.lower() for x in Bcfg2.Server.Admin.__all__ if x != 'mode']
+
def create_description():
"""Create the description string from the list of modes."""
modes = get_modes()
@@ -35,26 +38,25 @@ def create_description():
pass
return description.getvalue()
+
def main():
- optinfo = {
- 'configfile': Bcfg2.Options.CFILE,
- 'help': Bcfg2.Options.HELP,
- 'verbose': Bcfg2.Options.VERBOSE,
- 'repo': Bcfg2.Options.SERVER_REPOSITORY,
- 'plugins': Bcfg2.Options.SERVER_PLUGINS,
- 'event debug': Bcfg2.Options.DEBUG,
- 'filemonitor': Bcfg2.Options.SERVER_FILEMONITOR,
- 'password': Bcfg2.Options.SERVER_PASSWORD,
- 'encoding': Bcfg2.Options.ENCODING,
- }
+ optinfo = dict()
+ optinfo.update(Bcfg2.Options.CLI_COMMON_OPTIONS)
+ optinfo.update(Bcfg2.Options.SERVER_COMMON_OPTIONS)
setup = Bcfg2.Options.OptionParser(optinfo)
# override default help message to include description of all modes
- setup.hm = "%s\n%s" % (setup.buildHelpMessage(), create_description())
+ setup.hm = "Usage:\n\n%s\n%s" % (setup.buildHelpMessage(),
+ create_description())
setup.parse(sys.argv[1:])
- log_args = dict(to_syslog=False, to_console=logging.WARNING)
- if setup['verbose']:
- log_args['to_console'] = logging.DEBUG
+ if setup['debug']:
+ level = logging.DEBUG
+ elif setup['verbose']:
+ level = logging.INFO
+ else:
+ level = logging.WARNING
+ Bcfg2.Logger.setup_logging('bcfg2-admin', to_syslog=setup['syslog'],
+ level=level)
# Provide help if requested or no args were specified
if (not setup['args'] or len(setup['args']) < 1 or
@@ -83,8 +85,7 @@ def main():
if hasattr(mode, 'bcore'):
mode.bcore.shutdown()
else:
- log.error("Unknown mode %s" % setup['args'][0])
- print("Usage:\n %s" % setup.buildHelpMessage())
+ log.error("Error: Unknown mode '%s'\n" % setup['args'][0])
print(create_description())
raise SystemExit(1)
diff --git a/src/sbin/bcfg2-build-reports b/src/sbin/bcfg2-build-reports
index 7fa08110a..318e9de5d 100755
--- a/src/sbin/bcfg2-build-reports
+++ b/src/sbin/bcfg2-build-reports
@@ -110,7 +110,7 @@ def rss(reportxml, delivery, report):
for item in items:
channel.append(item)
- tree = tostring(rssdata, encoding='UTF-8', xml_declaration=True)
+ tree = tostring(rssdata, xml_declaration=False).decode('UTF-8')
fil.write(tree)
fil.close()
@@ -260,7 +260,7 @@ if __name__ == '__main__':
# Apply XSLT, different ones based on report type, and options
if deliverymechanism == 'null-operator': # Special Cases
- fileout(tostring(ElementTree(procnodereport).getroot(), encoding='UTF-8', xml_declaration=True), deliv)
+ fileout(tostring(ElementTree(procnodereport).getroot(), xml_declaration=False).decode('UTF-8'), deliv)
break
transform = delivtype + '-' + deliverymechanism + '.xsl'
@@ -312,7 +312,7 @@ if __name__ == '__main__':
(toastring, socket.getfqdn(), outputstring)
mail(outputstring, c) #call function to send
else:
- outputstring = tostring(stylesheet.apply(ElementTree(procnodereport)).getroot(), encoding='UTF-8', xml_declaration=True)
+ outputstring = tostring(stylesheet.apply(ElementTree(procnodereport)).getroot(), xml_declaration=False).decode('UTF-8')
if deliverymechanism == 'rss':
rss(outputstring, deliv, reprt)
else: # Must be deliverymechanism == 'www':
diff --git a/src/sbin/bcfg2-crypt b/src/sbin/bcfg2-crypt
new file mode 100755
index 000000000..9ce21da82
--- /dev/null
+++ b/src/sbin/bcfg2-crypt
@@ -0,0 +1,362 @@
+#!/usr/bin/env python
+""" helper for encrypting/decrypting Cfg and Properties files """
+
+import os
+import sys
+import logging
+import lxml.etree
+import Bcfg2.Logger
+import Bcfg2.Options
+try:
+ import Bcfg2.Encryption
+except ImportError:
+ err = sys.exc_info()[1]
+ print("Import failed '%s'. Is M2Crypto installed?" %
+ err)
+ raise SystemExit(1)
+
+LOGGER = None
+
+def get_logger(verbose=0):
+ """ set up logging according to the verbose level given on the
+ command line """
+ global LOGGER
+ if LOGGER is None:
+ LOGGER = logging.getLogger(sys.argv[0])
+ stderr = logging.StreamHandler()
+ if verbose:
+ level = logging.DEBUG
+ else:
+ level = logging.WARNING
+ LOGGER.setLevel(level)
+ LOGGER.addHandler(stderr)
+ syslog = logging.handlers.SysLogHandler("/dev/log")
+ syslog.setFormatter(logging.Formatter("%(name)s: %(message)s"))
+ LOGGER.addHandler(syslog)
+ return LOGGER
+
+
+class Encryptor(object):
+ def __init__(self, setup):
+ self.setup = setup
+ self.logger = get_logger()
+ self.passphrase = None
+ self.pname = None
+
+ def get_encrypted_filename(self, plaintext_filename):
+ return plaintext_filename
+
+ def get_plaintext_filename(self, encrypted_filename):
+ return encrypted_filename
+
+ def chunk(self, data):
+ yield data
+
+ def unchunk(self, data, original):
+ return data[0]
+
+ def set_passphrase(self):
+ if (not self.setup.cfp.has_section("encryption") or
+ self.setup.cfp.options("encryption") == 0):
+ self.logger.error("No passphrases available in %s" %
+ self.setup['configfile'])
+ return False
+
+ if self.passphrase:
+ self.logger.debug("Using previously determined passphrase %s" %
+ self.pname)
+ return True
+
+ if self.setup['passphrase']:
+ self.pname = self.setup['passphrase']
+
+ if self.pname:
+ if self.setup.cfp.has_option("encryption", self.pname):
+ self.passphrase = self.setup.cfp.get("encryption",
+ self.pname)
+ self.logger.debug("Using passphrase %s specified on command "
+ "line" % self.pname)
+ return True
+ else:
+ self.logger.error("Could not find passphrase %s in %s" %
+ (self.pname, self.setup['configfile']))
+ return False
+ else:
+ pnames = self.setup.cfp.options("encryption")
+ if len(pnames) == 1:
+ self.passphrase = self.setup.cfp.get(pnames[0])
+ self.pname = pnames[0]
+ self.logger.info("Using passphrase %s" % pnames[0])
+ return True
+ self.logger.info("No passphrase could be determined")
+ return False
+
+ def encrypt(self, fname):
+ try:
+ plaintext = open(fname).read()
+ except IOError:
+ err = sys.exc_info()[1]
+ self.logger.error("Error reading %s, skipping: %s" % (fname, err))
+ return False
+
+ self.set_passphrase()
+
+ crypted = []
+ for chunk in self.chunk(plaintext):
+ try:
+ passphrase, pname = self.get_passphrase(chunk)
+ except TypeError:
+ return False
+
+ crypted.append(self._encrypt(chunk, passphrase, name=pname))
+
+ new_fname = self.get_encrypted_filename(fname)
+ try:
+ open(new_fname, "wb").write(self.unchunk(crypted, plaintext))
+ self.logger.info("Wrote encrypted data to %s" % new_fname)
+ return True
+ except IOError:
+ err = sys.exc_info()[1]
+ self.logger.error("Error writing encrypted data from %s to %s: %s" %
+ (fname, new_fname, err))
+ return False
+
+ def _encrypt(self, plaintext, passphrase, name=None):
+ return Bcfg2.Encryption.ssl_encrypt(plaintext, passphrase)
+
+ def decrypt(self, fname):
+ try:
+ crypted = open(fname).read()
+ except IOError:
+ err = sys.exc_info()[1]
+ self.logger.error("Error reading %s, skipping: %s" % (fname, err))
+ return False
+
+ self.set_passphrase()
+
+ plaintext = []
+ for chunk in self.chunk(crypted):
+ try:
+ passphrase, pname = self.get_passphrase(chunk)
+ try:
+ plaintext.append(self._decrypt(chunk, passphrase))
+ except Bcfg2.Encryption.EVPError:
+ self.logger.info("Could not decrypt %s with the specified "
+ "passphrase" % fname)
+ return False
+ except:
+ err = sys.exc_info()[1]
+ self.logger.error("Error decrypting %s: %s" % (fname, err))
+ return False
+ except TypeError:
+ pchunk = None
+ for pname in self.setup.cfp.options('encryption'):
+ self.logger.debug("Trying passphrase %s" % pname)
+ passphrase = self.setup.cfp.get('encryption', pname)
+ try:
+ pchunk = self._decrypt(chunk, passphrase)
+ break
+ except Bcfg2.Encryption.EVPError:
+ pass
+ except:
+ err = sys.exc_info()[1]
+ self.logger.error("Error decrypting %s: %s" %
+ (fname, err))
+ if pchunk is not None:
+ plaintext.append(pchunk)
+ else:
+ self.logger.error("Could not decrypt %s with any "
+ "passphrase in %s" %
+ (fname, self.setup['configfile']))
+ return False
+
+ new_fname = self.get_plaintext_filename(fname)
+ try:
+ open(new_fname, "wb").write(self.unchunk(plaintext, crypted))
+ self.logger.info("Wrote decrypted data to %s" % new_fname)
+ return True
+ except IOError:
+ err = sys.exc_info()[1]
+ self.logger.error("Error writing encrypted data from %s to %s: %s" %
+ (fname, new_fname, err))
+ return False
+
+ def get_passphrase(self, chunk):
+ pname = self._get_passphrase(chunk)
+ if not self.pname:
+ if not pname:
+ self.logger.info("No passphrase given on command line or "
+ "found in file")
+ return False
+ elif self.setup.cfp.has_option("encryption", pname):
+ passphrase = self.setup.cfp.get("encryption", pname)
+ else:
+ self.logger.error("Could not find passphrase %s in %s" %
+ (pname, self.setup['configfile']))
+ return False
+ else:
+ pname = self.pname
+ passphrase = self.passphrase
+ if self.pname != pname:
+ self.logger.warning("Passphrase given on command line (%s) "
+ "differs from passphrase embedded in "
+ "file (%s), using command-line option" %
+ (self.pname, pname))
+ return (passphrase, pname)
+
+ def _get_passphrase(self, chunk):
+ return None
+
+ def _decrypt(self, crypted, passphrase):
+ return Bcfg2.Encryption.ssl_decrypt(crypted, passphrase)
+
+
+class CfgEncryptor(Encryptor):
+ def get_encrypted_filename(self, plaintext_filename):
+ return plaintext_filename + ".crypt"
+
+ def get_plaintext_filename(self, encrypted_filename):
+ if encrypted_filename.endswith(".crypt"):
+ return encrypted_filename[:-6]
+ else:
+ return Encryptor.get_plaintext_filename(self, encrypted_filename)
+
+
+class PropertiesEncryptor(Encryptor):
+ def _encrypt(self, plaintext, passphrase, name=None):
+ # plaintext is an lxml.etree._Element
+ if name is None:
+ name = "true"
+ if plaintext.text and plaintext.text.strip():
+ plaintext.text = Bcfg2.Encryption.ssl_encrypt(plaintext.text,
+ passphrase)
+ plaintext.set("encrypted", name)
+ return plaintext
+
+ def chunk(self, data):
+ xdata = lxml.etree.XML(data)
+ if self.setup['xpath']:
+ elements = xdata.xpath(self.setup['xpath'])
+ else:
+ elements = xdata.xpath('//*[@encrypted]')
+ if not elements:
+ elements = list(xdata.getiterator())
+ # this is not a good use of a generator, but we need to
+ # generate the full list of elements in order to ensure that
+ # some exist before we know what to return
+ for elt in elements:
+ yield elt
+
+ def unchunk(self, data, original):
+ # Properties elements are modified in-place, so we don't
+ # actually need to unchunk anything
+ xdata = data[0]
+ # find root element
+ while xdata.getparent() != None:
+ xdata = xdata.getparent()
+ xdata.set("encryption", "true")
+ return lxml.etree.tostring(xdata, xml_declaration=False).decode('UTF-8')
+
+ def _get_passphrase(self, chunk):
+ pname = chunk.get("encrypted") or chunk.get("encryption")
+ if pname and pname.lower() != "true":
+ return pname
+ return None
+
+ def _decrypt(self, crypted, passphrase):
+ # crypted is in lxml.etree._Element
+ if not crypted.text or not crypted.text.strip():
+ self.logger.warning("Skipping empty element %s" % crypted.tag)
+ return crypted
+ rv = Bcfg2.Encryption.ssl_decrypt(crypted.text, passphrase)
+ crypted.text = rv
+ return crypted
+
+
+def main():
+ optinfo = dict()
+ optinfo.update(Bcfg2.Options.CRYPT_OPTIONS)
+ optinfo.update(Bcfg2.Options.CLI_COMMON_OPTIONS)
+ setup = Bcfg2.Options.OptionParser(optinfo)
+ setup.hm = " bcfg2-crypt [options] <filename>\nOptions:\n%s" % \
+ setup.buildHelpMessage()
+ setup.parse(sys.argv[1:])
+
+ if not setup['args']:
+ print(setup.hm)
+ raise SystemExit(1)
+ elif setup['encrypt'] and setup['decrypt']:
+ print("You cannot specify both --encrypt) and --decrypt")
+ raise SystemExit(1)
+ elif setup['cfg'] and setup['properties']:
+ print("You cannot specify both --cfg and --properties")
+ raise SystemExit(1)
+ elif setup['cfg'] and setup['properties']:
+ print("Specifying --xpath with --cfg is nonsensical, ignoring --xpath")
+ setup['xpath'] = Bcfg2.Options.CRYPT_XPATH.default
+ elif setup['decrypt'] and setup['remove']:
+ print("--remove cannot be used with --decrypt, ignoring")
+ setup['remove'] = Bcfg2.Options.CRYPT_REMOVE.default
+
+ logger = get_logger(setup['verbose'])
+
+ props_crypt = PropertiesEncryptor(setup)
+ cfg_crypt = CfgEncryptor(setup)
+
+ for fname in setup['args']:
+ if not os.path.exists(fname):
+ logger.error("%s does not exist, skipping" % fname)
+ continue
+
+ # figure out if we need to encrypt this as a Properties file
+ # or as a Cfg file
+ props = False
+ if setup['properties']:
+ props = True
+ elif setup['cfg']:
+ props = False
+ elif fname.endswith(".xml"):
+ try:
+ xroot = lxml.etree.parse(fname).getroot()
+ if xroot.tag == "Properties":
+ props = True
+ else:
+ props = False
+ except IOError:
+ err = sys.exc_info()[1]
+ logger.error("Error reading %s, skipping: %s" % (fname, err))
+ continue
+ except lxml.etree.XMLSyntaxError:
+ props = False
+ else:
+ props = False
+
+ if props:
+ encryptor = props_crypt
+ else:
+ encryptor = cfg_crypt
+
+ if setup['encrypt']:
+ if not encryptor.encrypt(fname):
+ print("Failed to encrypt %s, skipping" % fname)
+ elif setup['decrypt']:
+ if not encryptor.decrypt(fname):
+ print("Failed to decrypt %s, skipping" % fname)
+ else:
+ logger.info("Neither --encrypt nor --decrypt specified, "
+ "determining mode")
+ if not encryptor.decrypt(fname):
+ logger.info("Failed to decrypt %s, trying encryption" % fname)
+ if not encryptor.encrypt(fname):
+ print("Failed to encrypt %s, skipping" % fname)
+
+ if setup['remove'] and encryptor.get_encrypted_filename(fname) != fname:
+ try:
+ os.unlink(fname)
+ except IOError:
+ err = sys.exc_info()[1]
+ logger.error("Error removing %s: %s" % (fname, err))
+ continue
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/src/sbin/bcfg2-info b/src/sbin/bcfg2-info
index 8598a58eb..7cc361a1c 100755
--- a/src/sbin/bcfg2-info
+++ b/src/sbin/bcfg2-info
@@ -10,6 +10,7 @@ import fnmatch
import logging
import tempfile
import lxml.etree
+import traceback
from code import InteractiveConsole
try:
@@ -26,9 +27,14 @@ import Bcfg2.Logger
import Bcfg2.Options
import Bcfg2.Server.Core
import Bcfg2.Server.Plugins.Metadata
-import Bcfg2.Server.Plugins.SGenshi
import Bcfg2.Server.Plugin
+try:
+ from Bcfg2.Server.Plugins.Bundler import BundleTemplateFile
+ has_genshi = True
+except ImportError:
+ has_genshi = False
+
logger = logging.getLogger('bcfg2-info')
USAGE = """Commands:
build <hostname> <filename> - Build config for hostname, writing to filename
@@ -96,7 +102,7 @@ def getClientList(hostglobs):
""" given a host glob, get a list of clients that match it """
# special cases to speed things up:
if '*' in hostglobs:
- return list(self.metadata.clients.keys())
+ return self.metadata.clients
has_wildcards = False
for glob in hostglobs:
# check if any wildcard characters are in the string
@@ -107,7 +113,7 @@ def getClientList(hostglobs):
return hostglobs
rv = set()
- clist = set(self.metadata.clients.keys())
+ clist = set(self.metadata.clients)
for glob in hostglobs:
for client in clist:
if fnmatch.fnmatch(client, glob):
@@ -131,20 +137,50 @@ def displayTrace(trace, num=80, sort=('time', 'calls')):
stats.sort_stats('cumulative', 'calls', 'time')
stats.print_stats(200)
-class infoCore(cmd.Cmd, Bcfg2.Server.Core.Core):
+def load_interpreters():
+ interpreters = dict(python=lambda v: InteractiveConsole(v).interact())
+ best = "python"
+ try:
+ import bpython.cli
+ interpreters["bpython"] = lambda v: bpython.cli.main(args=[], locals_=v)
+ best = "bpython"
+ except ImportError:
+ pass
+
+ try:
+ # whether ipython is actually better than bpython is
+ # up for debate, but this is the behavior that existed
+ # before --interpreter was added, so we call IPython
+ # better
+ import IPython
+ if hasattr(IPython, "Shell"):
+ interpreters["ipython"] = lambda v: \
+ IPython.Shell.IPShell(argv=[], user_ns=v).mainloop()
+ best = "ipython"
+ elif hasattr(IPython, "embed"):
+ interpreters["ipython"] = lambda v: IPython.embed(user_ns=v)
+ best = "ipython"
+ else:
+ print("Unknown IPython API version")
+ except ImportError:
+ pass
+
+ interpreters['best'] = interpreters[best]
+ return interpreters
+
+
+class infoCore(cmd.Cmd, Bcfg2.Server.Core.BaseCore):
"""Main class for bcfg2-info."""
def __init__(self, repo, plgs, passwd, encoding, event_debug,
filemonitor='default', setup=None):
cmd.Cmd.__init__(self)
try:
- Bcfg2.Server.Core.Core.__init__(self, repo, plgs, passwd,
- encoding, filemonitor=filemonitor,
- setup=setup)
+ Bcfg2.Server.Core.BaseCore.__init__(self, setup=setup)
if event_debug:
self.fam.debug = True
except Bcfg2.Server.Core.CoreInitError:
msg = sys.exc_info()[1]
- print("Core load failed because %s" % msg)
+ print("Core load failed: %s" % msg)
raise SystemExit(1)
self.prompt = '> '
self.cont = True
@@ -185,24 +221,21 @@ class infoCore(cmd.Cmd, Bcfg2.Server.Core.Core):
spath = opt[1]
elif opt[0] == '-n':
interactive = False
- sh = InteractiveConsole(locals())
if scriptmode:
+ sh = InteractiveConsole(locals())
for command in [c.strip() for c in open(spath).readlines()]:
if command:
sh.push(command)
if interactive:
- print("Dropping to python interpreter; press ^D to resume")
- try:
- import IPython
- if hasattr(IPython, "Shell"):
- shell = IPython.Shell.IPShell(argv=[], user_ns=locals())
- shell.mainloop()
- elif hasattr(IPython, "embed"):
- IPython.embed(user_ns=locals())
- else:
- raise ImportError
- except ImportError:
- sh.interact()
+ interpreters = load_interpreters()
+ if setup['interpreter'] in interpreters:
+ print("Dropping to %s interpreter; press ^D to resume" %
+ setup['interpreter'])
+ interpreters[setup['interpreter']](locals())
+ else:
+ logger.error("Invalid interpreter %s" % setup['interpreter'])
+ logger.error("Valid interpreters are: %s" %
+ ", ".join(interpeters.keys()))
def do_quit(self, _):
"""
@@ -295,7 +328,7 @@ class infoCore(cmd.Cmd, Bcfg2.Server.Core.Core):
if len(alist) > 1:
clients = getClientList(alist[1:])
else:
- clients = list(self.metadata.clients.keys())
+ clients = self.metadata.clients
for client in clients:
self.do_build("%s %s" % (client, os.path.join(destdir,
client + ".xml")))
@@ -327,7 +360,7 @@ class infoCore(cmd.Cmd, Bcfg2.Server.Core.Core):
if len(args) > 2:
clients = getClientList(args[1:])
else:
- clients = list(self.metadata.clients.keys())
+ clients = self.metadata.clients
if altsrc:
args = "--altsrc %s -f %%s %%s %%s" % altsrc
else:
@@ -362,8 +395,8 @@ class infoCore(cmd.Cmd, Bcfg2.Server.Core.Core):
try:
metadata = self.build_metadata(client)
self.Bind(entry, metadata)
- data = lxml.etree.tostring(entry, encoding="UTF-8",
- xml_declaration=True)
+ data = lxml.etree.tostring(entry,
+ xml_declaration=False).decode('UTF-8')
if outfile:
open(outfile, 'w').write(data)
else:
@@ -373,7 +406,8 @@ class infoCore(cmd.Cmd, Bcfg2.Server.Core.Core):
print("Could not write to %s: %s" % (outfile, err))
print(data)
except Exception:
- print("Failed to build entry %s for host %s" % (fname, client))
+ print("Failed to build entry %s for host %s: %s" %
+ (fname, client, traceback.format_exc().splitlines()[-1]))
raise
def do_buildbundle(self, args):
@@ -384,8 +418,9 @@ class infoCore(cmd.Cmd, Bcfg2.Server.Core.Core):
metadata = self.build_metadata(client)
if bname in self.plugins['Bundler'].entries:
bundle = self.plugins['Bundler'].entries[bname]
- if isinstance(bundle,
- Bcfg2.Server.Plugins.SGenshi.SGenshiTemplateFile):
+ if (has_genshi and
+ isinstance(bundle,
+ BundleTemplateFile)):
stream = bundle.template.generate(metadata=metadata)
print(stream.render("xml"))
else:
@@ -413,10 +448,11 @@ class infoCore(cmd.Cmd, Bcfg2.Server.Core.Core):
def do_clients(self, _):
"""Print out client info."""
data = [('Client', 'Profile')]
- clist = list(self.metadata.clients.keys())
+ clist = self.metadata.clients
clist.sort()
for client in clist:
- data.append((client, self.metadata.clients[client]))
+ imd = self.metadata.get_initial_metadata(client)
+ data.append((client, imd.profile))
printTabular(data)
def do_config(self, _):
@@ -466,22 +502,18 @@ class infoCore(cmd.Cmd, Bcfg2.Server.Core.Core):
def do_groups(self, _):
"""Print out group info."""
- data = [("Groups", "Profile", "Category", "Contains")]
+ # FIXME: Contains doesn't work. Not sure what it was used for
+ #data = [("Groups", "Profile", "Category", "Contains")]
+ data = [("Groups", "Profile", "Category")]
grouplist = list(self.metadata.groups.keys())
grouplist.sort()
for group in grouplist:
- if group in self.metadata.profiles:
+ if self.metadata.groups[group].is_profile:
prof = 'yes'
else:
prof = 'no'
- if group in self.metadata.categories:
- cat = self.metadata.categories[group]
- else:
- cat = ''
- gdata = [grp for grp in self.metadata.groups[group][1]]
- if group in gdata:
- gdata.remove(group)
- data.append((group, prof, cat, ','.join(gdata)))
+ cat = self.metadata.groups[group].category
+ data.append((group, prof, cat))
printTabular(data)
def do_showclient(self, args):
@@ -496,21 +528,34 @@ class infoCore(cmd.Cmd, Bcfg2.Server.Core.Core):
except:
print("Client %s not defined" % client)
continue
- print("Hostname:\t%s" % client_meta.hostname)
- print("Profile:\t%s" % client_meta.profile)
- print("Groups:\t\t%s" % list(client_meta.groups)[0])
- for grp in list(client_meta.groups)[1:]:
- print("\t\t%s" % grp)
+ fmt = "%-10s %s"
+ print(fmt % ("Hostname:", client_meta.hostname))
+ print(fmt % ("Profile:", client_meta.profile))
+
+ group_fmt = "%-10s %-30s %s"
+ header = False
+ for group in list(client_meta.groups):
+ category = ""
+ for cat, grp in client_meta.categories.items():
+ if grp == group:
+ category = "Category: %s" % cat
+ break
+ if not header:
+ print(group_fmt % ("Groups:", group, category))
+ header = True
+ else:
+ print(group_fmt % ("", group, category))
+
if client_meta.bundles:
- print("Bundles:\t%s" % list(client_meta.bundles)[0])
+ print(fmt % ("Bundles:", list(client_meta.bundles)[0]))
for bnd in list(client_meta.bundles)[1:]:
- print("\t\t%s" % bnd)
+ print(fmt % ("", bnd))
if client_meta.connectors:
print("Connector data")
print("=" * 80)
for conn in client_meta.connectors:
if getattr(client_meta, conn):
- print("%s:\t%s" % (conn, getattr(client_meta, conn)))
+ print(fmt % (conn + ":", getattr(client_meta, conn)))
print("=" * 80)
def do_mappings(self, args):
@@ -568,6 +613,9 @@ class infoCore(cmd.Cmd, Bcfg2.Server.Core.Core):
print("Usage: packageresolve <hostname> <package> [<package>...]")
return
+ if 'Packages' not in self.plugins:
+ print("Packages plugin not enabled")
+ return
hostname = arglist[0]
initial = arglist[1:]
metadata = self.build_metadata(hostname)
@@ -585,42 +633,28 @@ class infoCore(cmd.Cmd, Bcfg2.Server.Core.Core):
print(" %s" % "\n ".join(unknown))
def do_packagesources(self, args):
+ if not args:
+ print("Usage: packagesources <hostname>")
+ return
+ if 'Packages' not in self.plugins:
+ print("Packages plugin not enabled")
+ return
try:
metadata = self.build_metadata(args)
except Bcfg2.Server.Plugins.Metadata.MetadataConsistencyError:
print("Unable to build metadata for host %s" % args)
return
collection = self.plugins['Packages']._get_collection(metadata)
- for source in collection.sources:
- # get_urls() loads url_map as a side-effect
- source.get_urls()
- for url_map in source.url_map:
- for arch in url_map['arches']:
- # make sure client is in all the proper arch groups
- if arch not in metadata.groups:
- continue
- reponame = source.get_repo_name(url_map)
- print("Name: %s" % reponame)
- print(" Type: %s" % source.ptype)
- if url_map['url'] != '':
- print(" URL: %s" % url_map['url'])
- elif url_map['rawurl'] != '':
- print(" RAWURL: %s" % url_map['rawurl'])
- if source.gpgkeys:
- print(" GPG Key(s): %s" % ", ".join(source.gpgkeys))
- else:
- print(" GPG Key(s): None")
- if len(source.blacklist):
- print(" Blacklist: %s" % ", ".join(source.blacklist))
- if len(source.whitelist):
- print(" Whitelist: %s" % ", ".join(source.whitelist))
- print("")
+ print(collection.sourcelist())
def do_profile(self, arg):
"""."""
if not have_profile:
print("Profiling functionality not available.")
return
+ if len(arg) == 0:
+ print("Usage: profile <command> <args>")
+ return
tracefname = tempfile.mktemp()
p = profile.Profile()
p.runcall(self.onecmd, arg)
@@ -635,34 +669,27 @@ class infoCore(cmd.Cmd, Bcfg2.Server.Core.Core):
self.do_loop()
if __name__ == '__main__':
- Bcfg2.Logger.setup_logging('bcfg2-info', to_syslog=False)
- optinfo = {
- 'configfile': Bcfg2.Options.CFILE,
- 'help': Bcfg2.Options.HELP,
- 'event debug': Bcfg2.Options.DEBUG,
- 'profile': Bcfg2.Options.CORE_PROFILE,
- 'encoding': Bcfg2.Options.ENCODING,
- # Server options
- 'repo': Bcfg2.Options.SERVER_REPOSITORY,
- 'plugins': Bcfg2.Options.SERVER_PLUGINS,
- 'password': Bcfg2.Options.SERVER_PASSWORD,
- 'mconnect': Bcfg2.Options.SERVER_MCONNECT,
- 'filemonitor': Bcfg2.Options.SERVER_FILEMONITOR,
- 'location': Bcfg2.Options.SERVER_LOCATION,
- 'static': Bcfg2.Options.SERVER_STATIC,
- 'key': Bcfg2.Options.SERVER_KEY,
- 'cert': Bcfg2.Options.SERVER_CERT,
- 'ca': Bcfg2.Options.SERVER_CA,
- 'password': Bcfg2.Options.SERVER_PASSWORD,
- 'protocol': Bcfg2.Options.SERVER_PROTOCOL,
- # More options
- 'logging': Bcfg2.Options.LOGGING_FILE_PATH
- }
+ optinfo = dict(profile=Bcfg2.Options.CORE_PROFILE,
+ mconnect=Bcfg2.Options.SERVER_MCONNECT,
+ interactive=Bcfg2.Options.INTERACTIVE,
+ interpreter=Bcfg2.Options.INTERPRETER)
+ optinfo.update(Bcfg2.Options.CLI_COMMON_OPTIONS)
+ optinfo.update(Bcfg2.Options.SERVER_COMMON_OPTIONS)
setup = Bcfg2.Options.OptionParser(optinfo)
- setup.hm = "Usage:\n %s\n%s" % (setup.buildHelpMessage(),
- USAGE)
+ setup.hm = "\n".join([" bcfg2-info [options] [command <command args>]",
+ "Options:",
+ setup.buildHelpMessage(),
+ USAGE])
setup.parse(sys.argv[1:])
+ if setup['debug']:
+ level = logging.DEBUG
+ elif setup['verbose']:
+ level = logging.INFO
+ else:
+ level = logging.WARNING
+ Bcfg2.Logger.setup_logging('bcfg2-info', to_syslog=False,
+ level=level)
if setup['args'] and setup['args'][0] == 'help':
print(setup.hm)
sys.exit(0)
@@ -670,14 +697,14 @@ if __name__ == '__main__':
prof = profile.Profile()
loop = prof.runcall(infoCore, setup['repo'], setup['plugins'],
setup['password'], setup['encoding'],
- setup['event debug'], setup['filemonitor'],
+ setup['debug'], setup['filemonitor'],
setup)
displayTrace(prof)
else:
if setup['profile']:
print("Profiling functionality not available.")
loop = infoCore(setup['repo'], setup['plugins'], setup['password'],
- setup['encoding'], setup['event debug'],
+ setup['encoding'], setup['debug'],
setup['filemonitor'], setup)
loop.Run(setup['args'])
diff --git a/src/sbin/bcfg2-lint b/src/sbin/bcfg2-lint
index 78b833f02..1038beca8 100755
--- a/src/sbin/bcfg2-lint
+++ b/src/sbin/bcfg2-lint
@@ -61,51 +61,32 @@ def get_errorhandler(config):
def load_server(setup):
""" load server """
- core = Bcfg2.Server.Core.Core(setup['repo'], setup['plugins'],
- setup['password'], setup['encoding'],
- filemonitor=setup['filemonitor'],
- setup=setup)
- if setup['event debug']:
- core.fam.debug = True
+ core = Bcfg2.Server.Core.BaseCore(setup)
core.fam.handle_events_in_interval(4)
return core
+def load_plugin(module, obj_name=None):
+ parts = module.split(".")
+ if obj_name is None:
+ obj_name = parts[-1]
+
+ mod = __import__(module)
+ for p in parts[1:]:
+ mod = getattr(mod, p)
+ return getattr(mod, obj_name)
+
if __name__ == '__main__':
- optinfo = {
- 'configfile': Bcfg2.Options.CFILE,
- 'help': Bcfg2.Options.HELP,
- 'verbose': Bcfg2.Options.VERBOSE,
- 'event debug': Bcfg2.Options.DEBUG,
- 'encoding': Bcfg2.Options.ENCODING,
- # Server options
- 'repo': Bcfg2.Options.SERVER_REPOSITORY,
- 'plugins': Bcfg2.Options.SERVER_PLUGINS,
- 'mconnect': Bcfg2.Options.SERVER_MCONNECT,
- 'filemonitor': Bcfg2.Options.SERVER_FILEMONITOR,
- 'location': Bcfg2.Options.SERVER_LOCATION,
- 'static': Bcfg2.Options.SERVER_STATIC,
- 'key': Bcfg2.Options.SERVER_KEY,
- 'cert': Bcfg2.Options.SERVER_CERT,
- 'ca': Bcfg2.Options.SERVER_CA,
- 'password': Bcfg2.Options.SERVER_PASSWORD,
- 'protocol': Bcfg2.Options.SERVER_PROTOCOL,
- # More options
- 'logging': Bcfg2.Options.LOGGING_FILE_PATH,
- 'stdin': Bcfg2.Options.FILES_ON_STDIN,
- 'schema': Bcfg2.Options.SCHEMA_PATH,
- 'config': Bcfg2.Options.Option('Specify bcfg2-lint configuration file',
- '/etc/bcfg2-lint.conf',
- cmd='--lint-config',
- odesc='<conffile>',
- long_arg=True),
- 'showerrors': Bcfg2.Options.Option('Show error handling', False,
- cmd='--list-errors',
- long_arg=True),
- }
+ optinfo = dict(config=Bcfg2.Options.LINT_CONFIG,
+ showerrors=Bcfg2.Options.LINT_SHOW_ERRORS,
+ stdin=Bcfg2.Options.LINT_FILES_ON_STDIN,
+ schema=Bcfg2.Options.SCHEMA_PATH,
+ plugins=Bcfg2.Options.SERVER_PLUGINS)
+ optinfo.update(Bcfg2.Options.CLI_COMMON_OPTIONS)
+ optinfo.update(Bcfg2.Options.SERVER_COMMON_OPTIONS)
setup = Bcfg2.Options.OptionParser(optinfo)
setup.parse(sys.argv[1:])
- log_args = dict(to_syslog=False, to_console=logging.WARNING)
+ log_args = dict(to_syslog=setup['syslog'], to_console=logging.WARNING)
if setup['verbose']:
log_args['to_console'] = logging.DEBUG
Bcfg2.Logger.setup_logging('bcfg2-info', **log_args)
@@ -116,36 +97,40 @@ if __name__ == '__main__':
# get list of plugins to run
if setup['args']:
- allplugins = setup['args']
+ plugin_list = setup['args']
elif "bcfg2-repo-validate" in sys.argv[0]:
- allplugins = 'Duplicates,RequiredAttrs,Validate'.split(',')
+ plugin_list = 'Duplicates,RequiredAttrs,Validate'.split(',')
else:
try:
- allplugins = config.get('lint', 'plugins').split(',')
+ plugin_list = config.get('lint', 'plugins').split(',')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
- allplugins = Bcfg2.Server.Lint.__all__
+ plugin_list = Bcfg2.Server.Lint.__all__
if setup['stdin']:
files = [s.strip() for s in sys.stdin.readlines()]
else:
files = None
- # load plugins
- serverplugins = {}
- serverlessplugins = {}
- for plugin_name in allplugins:
+ allplugins = dict()
+ for plugin in plugin_list:
try:
- mod = getattr(__import__("Bcfg2.Server.Lint.%s" %
- (plugin_name)).Server.Lint, plugin_name)
+ allplugins[plugin] = load_plugin("Bcfg2.Server.Lint." + plugin)
except ImportError:
try:
- mod = __import__(plugin_name)
- except Exception:
- err = sys.exc_info()[1]
- logger.error("Failed to load plugin %s: %s" % (plugin_name,
- err))
- raise SystemExit(1)
- plugin = getattr(mod, plugin_name)
+ allplugins[plugin] = \
+ load_plugin("Bcfg2.Server.Plugins." + plugin,
+ obj_name=plugin + "Lint")
+ except (ImportError, AttributeError):
+ err = sys.exc_info()[1]
+ logger.error("Failed to load plugin %s: %s" % (plugin + "Lint",
+ err))
+ except AttributeError:
+ err = sys.exc_info()[1]
+ logger.error("Failed to load plugin %s: %s" % (obj_name, err))
+
+ serverplugins = dict()
+ serverlessplugins = dict()
+ for plugin_name, plugin in allplugins.items():
if [c for c in inspect.getmro(plugin)
if c == Bcfg2.Server.Lint.ServerPlugin]:
serverplugins[plugin_name] = plugin
diff --git a/src/sbin/bcfg2-ping-sweep b/src/sbin/bcfg2-ping-sweep
deleted file mode 100755
index be8994be3..000000000
--- a/src/sbin/bcfg2-ping-sweep
+++ /dev/null
@@ -1,70 +0,0 @@
-#!/usr/bin/env python
-#GenerateHostInfo - Joey Hagedorn - hagedorn@mcs.anl.gov
-
-"""Generates hostinfo.xml at a regular interval."""
-
-from os import dup2, execl, fork, uname, wait
-import sys
-import time
-import lxml.etree
-
-import Bcfg2.Options
-
-if __name__ == '__main__':
- opts = {'repo': Bcfg2.Options.SERVER_REPOSITORY,
- 'configfile': Bcfg2.Options.CFILE}
- setup = Bcfg2.Options.OptionParser(opts)
- setup.parse(sys.argv[1:])
-
- cfpath = setup['configfile']
- clientdatapath = "%s/Metadata/clients.xml" % setup['repo']
-
- clientElement = lxml.etree.parse(clientdatapath)
- hostlist = [client.get('name')
- for client in clientElement.findall("Client")]
-
- pids = {}
- null = open('/dev/null', 'w+')
-
- #use uname to detect OS and use -t for darwin and -w for linux
- #/bin/ping on linux /sbin/ping on os x
- osname = uname()[0]
-
- while hostlist or pids:
- if hostlist and len(list(pids.keys())) < 15:
- host = hostlist.pop()
- pid = fork()
- if pid == 0:
- # in child
- dup2(null.fileno(), sys.__stdin__.fileno())
- dup2(null.fileno(), sys.__stdout__.fileno())
- dup2(null.fileno(), sys.__stderr__.fileno())
- if osname == 'Linux':
- execl('/bin/ping', 'ping', '-w', '5', '-c', '1', host)
- elif osname in ['Darwin', 'FreeBSD']:
- execl('/sbin/ping', 'ping', '-t', '5', '-c', '1', host)
- elif osname == 'SunOS':
- execl('/usr/sbin/ping', 'ping', host, '56', '1')
- else: # default
- execl('/bin/ping', 'ping', '-w', '5', '-c', '1', host)
- else:
- pids[pid] = host
- else:
- try:
- (cpid, status) = wait()
- except OSError:
- continue
- chost = pids[cpid]
- del pids[cpid]
- elm = clientElement.xpath("//Client[@name='%s']" % chost)[0]
- if status == 0:
- elm.set("pingable", 'Y')
- elm.set("pingtime", str(time.time()))
- else:
- elm.set("pingable", 'N')
-
- fout = open(clientdatapath, 'w')
- fout.write(lxml.etree.tostring(clientElement.getroot(),
- encoding='UTF-8',
- xml_declaration=True))
- fout.close()
diff --git a/src/sbin/bcfg2-reports b/src/sbin/bcfg2-reports
index 1f101b9a7..cb553c0ba 100755
--- a/src/sbin/bcfg2-reports
+++ b/src/sbin/bcfg2-reports
@@ -3,6 +3,9 @@
import os
import sys
+import datetime
+from optparse import OptionParser, OptionGroup, make_option
+from Bcfg2.Bcfg2Py3k import ConfigParser
try:
import Bcfg2.Server.Reports.settings
@@ -20,376 +23,277 @@ sys.path.pop()
# Set DJANGO_SETTINGS_MODULE appropriately.
os.environ['DJANGO_SETTINGS_MODULE'] = '%s.settings' % project_name
-from Bcfg2.Server.Reports.reports.models import Client
-import getopt
-import datetime
-import fileinput
-
-usage = """Usage: bcfg2-reports [option] ...
-
-Options and arguments (and corresponding environment variables):
--a : shows all hosts, including expired hosts
--b NAME : single-host mode - shows bad entries from the
- current interaction of NAME
--c : shows only clean hosts
--d : shows only dirty hosts
--e NAME : single-host mode - shows extra entries from the
- current interaction of NAME
--h : shows help and usage info about bcfg2-reports
--m NAME : single-host mode - shows modified entries from the
- current interaction of NAME
--s NAME : single-host mode - shows bad, modified, and extra
- entries from the current interaction of NAME
--t NAME : single-host mode - shows total number of managed and
- good entries from the current interaction of NAME
--x NAME : toggles expired/unexpired state of NAME
---badentry=KIND,NAME : shows only hosts whose current interaction has bad
- entries in of KIND kind and NAME name; if a single
- argument ARG1 is given, then KIND,NAME pairs will be
- read from a file of name ARG1
---modifiedentry=KIND,NAME : shows only hosts whose current interaction has
- modified entries in of KIND kind and NAME name; if a
- single argument ARG1 is given, then KIND,NAME pairs
- will be read from a file of name ARG1
---extraentry=KIND,NAME : shows only hosts whose current interaction has extra
- entries in of KIND kind and NAME name; if a single
- argument ARG1 is given, then KIND,NAME pairs will be
- read from a file of name ARG1
---fields=ARG1,ARG2,... : only displays the fields ARG1,ARG2,...
- (name,time,state)
---sort=ARG1,ARG2,... : sorts output on ARG1,ARG2,... (name,time,state)
---stale : shows hosts which haven't run in the last 24 hours
-"""
-
-def timecompare(client1, client2):
- """Compares two clients by their timestamps."""
- return cmp(client1.current_interaction.timestamp, \
- client2.current_interaction.timestamp)
-
-def namecompare(client1, client2):
- """Compares two clients by their names."""
- return cmp(client1.name, client2.name)
-
-def statecompare(client1, client2):
- """Compares two clients by their states."""
- clean1 = client1.current_interaction.isclean()
- clean2 = client2.current_interaction.isclean()
-
- if clean1 and not clean2:
- return -1
- elif clean2 and not clean1:
- return 1
- else:
- return 0
-
-def totalcompare(client1, client2):
- """Compares two clients by their total entry counts."""
- return cmp(client2.current_interaction.totalcount, \
- client1.current_interaction.totalcount)
-
-def goodcompare(client1, client2):
- """Compares two clients by their good entry counts."""
- return cmp(client2.current_interaction.goodcount, \
- client1.current_interaction.goodcount)
+from Bcfg2.Server.Reports.reports.models import (Client, Entries_interactions,
+ Entries, TYPE_CHOICES)
-def badcompare(client1, client2):
- """Compares two clients by their bad entry counts."""
- return cmp(client2.current_interaction.totalcount - \
- client2.current_interaction.goodcount, \
- client1.current_interaction.totalcount - \
- client1.current_interaction.goodcount)
+def hosts_by_entry_type(clients, etype, entryspec):
+ result = []
+ for entry in entryspec:
+ for client in clients:
+ items = getattr(client.current_interaction, etype)()
+ for item in items:
+ if (item.entry.kind == entry[0] and
+ item.entry.name == entry[1]):
+ result.append(client)
+ return result
-def crit_compare(criterion, client1, client2):
- """Compares two clients by the criteria provided in criterion."""
- for crit in criterion:
- comp = 0
- if crit == 'name':
- comp = namecompare(client1, client2)
- elif crit == 'state':
- comp = statecompare(client1, client2)
- elif crit == 'time':
- comp = timecompare(client1, client2)
- elif crit == 'total':
- comp = totalcompare(client1, client2)
- elif crit == 'good':
- comp = goodcompare(client1, client2)
- elif crit == 'bad':
- comp = badcompare(client1, client2)
-
- if comp != 0:
- return comp
-
- return 0
-
-def print_fields(fields, cli, max_name, entrydict):
+def print_fields(fields, client, fmt, extra=None):
"""
- Prints the fields specified in fields of cli, max_name
+ Prints the fields specified in fields of client, max_name
specifies the column width of the name column.
"""
- fmt = ''
- for field in fields:
- if field == 'name':
- fmt += ("%%-%ds " % (max_name))
- else:
- fmt += "%s "
fdata = []
+ if extra is None:
+ extra = dict()
for field in fields:
if field == 'time':
- fdata.append(str(cli.current_interaction.timestamp))
+ fdata.append(str(client.current_interaction.timestamp))
elif field == 'state':
- if cli.current_interaction.isclean():
+ if client.current_interaction.isclean():
fdata.append("clean")
else:
fdata.append("dirty")
elif field == 'total':
- fdata.append("%5d" % cli.current_interaction.totalcount)
+ fdata.append(client.current_interaction.totalcount)
elif field == 'good':
- fdata.append("%5d" % cli.current_interaction.goodcount)
+ fdata.append(client.current_interaction.goodcount)
+ elif field == 'modified':
+ fdata.append(client.current_interaction.modified_entry_count())
+ elif field == 'extra':
+ fdata.append(client.current_interaction.extra_entry_count())
elif field == 'bad':
- fdata.append("%5d" % cli.current_interaction.totalcount \
- - cli.current_interaction.goodcount)
+ fdata.append((client.current_interaction.badcount()))
else:
try:
- fdata.append(getattr(cli, field))
+ fdata.append(getattr(client, field))
except:
- fdata.append("N/A")
+ fdata.append(extra.get(field, "N/A"))
- display = fmt % tuple(fdata)
- if len(entrydict) > 0:
- display += " "
- display += str(entrydict[cli])
- print(display)
+ print(fmt % tuple(fdata))
-def print_entry(item, max_name):
- fmt = ("%%-%ds " % (max_name))
- fdata = item.entry.kind + ":" + item.entry.name
- display = fmt % (fdata)
- print(display)
-
-fields = ""
-sort = ""
-badentry = ""
-modifiedentry = ""
-extraentry = ""
-expire = ""
-singlehost = ""
+def print_entries(interaction, etype):
+ items = getattr(interaction, etype)()
+ for item in items:
+ print("%-70s %s" % (item.entry.kind + ":" + item.entry.name, etype))
-c_list = Client.objects.all()
+def main():
+ parser = OptionParser(usage="%prog [options] <mode> [arg]")
-result = list()
-entrydict = dict()
+ # single host modes
+ multimodes = []
+ singlemodes = []
+ multimodes.append(make_option("-b", "--bad", action="store_true",
+ default=False,
+ help="Show bad entries from HOST"))
+ multimodes.append(make_option("-e", "--extra", action="store_true",
+ default=False,
+ help="Show extra entries from HOST"))
+ multimodes.append(make_option("-m", "--modified", action="store_true",
+ default=False,
+ help="Show modified entries from HOST"))
+ multimodes.append(make_option("-s", "--show", action="store_true",
+ default=False,
+ help="Equivalent to --bad --extra --modified"))
+ singlemodes.append(make_option("-t", "--total", action="store_true",
+ default=False,
+ help="Show total number of managed and good "
+ "entries from HOST"))
+ singlemodes.append(make_option("-x", "--expire", action="store_true",
+ default=False,
+ help="Toggle expired/unexpired state of "
+ "HOST"))
+ hostmodes = \
+ OptionGroup(parser, "Single-Host Modes",
+ "The following mode flags require a single HOST argument")
+ hostmodes.add_options(multimodes)
+ hostmodes.add_options(singlemodes)
+ parser.add_option_group(hostmodes)
-args = sys.argv[1:]
-try:
- opts, pargs = getopt.getopt(args, 'ab:cde:hm:s:t:x:',
- ['stale',
- 'sort=',
- 'fields=',
- 'badentry=',
- 'modifiedentry=',
- 'extraentry='])
-except getopt.GetoptError:
- msg = sys.exc_info()[1]
- print(msg)
- print(usage)
- sys.exit(2)
+ # all host modes
+ allhostmodes = OptionGroup(parser, "Host Selection Modes",
+ "The following mode flags require no arguments")
+ allhostmodes.add_option("-a", "--all", action="store_true", default=False,
+ help="Show all hosts, including expired hosts")
+ allhostmodes.add_option("-c", "--clean", action="store_true", default=False,
+ help="Show only clean hosts")
+ allhostmodes.add_option("-d", "--dirty", action="store_true", default=False,
+ help="Show only dirty hosts")
+ allhostmodes.add_option("--stale", action="store_true", default=False,
+ help="Show hosts that haven't run in the last 24 "
+ "hours")
+ parser.add_option_group(allhostmodes)
+
+ # entry modes
+ entrymodes = \
+ OptionGroup(parser, "Entry Modes",
+ "The following mode flags require either any number of "
+ "TYPE:NAME arguments describing entries, or the --file "
+ "option")
+ entrymodes.add_option("--badentry", action="store_true", default=False,
+ help="Show hosts that have bad entries that match "
+ "the argument")
+ entrymodes.add_option("--modifiedentry", action="store_true", default=False,
+ help="Show hosts that have modified entries that "
+ "match the argument")
+ entrymodes.add_option("--extraentry", action="store_true", default=False,
+ help="Show hosts that have extra entries that match "
+ "the argument")
+ entrymodes.add_option("--entrystatus", action="store_true", default=False,
+ help="Show the status of the named entry on all "
+ "hosts. Only supports a single entry.")
+ parser.add_option_group(entrymodes)
+
+ # entry options
+ entryopts = OptionGroup(parser, "Entry Options",
+ "Options that can be used with entry modes")
+ entryopts.add_option("--fields", metavar="FIELD,FIELD,...",
+ help="Only display the listed fields",
+ default='name,time,state')
+ entryopts.add_option("--file", metavar="FILE",
+ help="Read TYPE:NAME pairs from the specified file "
+ "instead of the command line")
+ parser.add_option_group(entryopts)
-for option in opts:
- if len(option) > 0:
- if option[0] == '--fields':
- fields = option[1]
- if option[0] == '--sort':
- sort = option[1]
- if option[0] == '--badentry':
- badentry = option[1]
- if option[0] == '--modifiedentry':
- modifiedentry = option[1]
- if option[0] == '--extraentry':
- extraentry = option[1]
- if option[0] == '-x':
- expire = option[1]
- if option[0] == '-s' or \
- option[0] == '-t' or \
- option[0] == '-b' or \
- option[0] == '-m' or \
- option[0] == '-e':
- singlehost = option[1]
+ options, args = parser.parse_args()
-if expire != "":
- for c_inst in c_list:
- if expire == c_inst.name:
- if c_inst.expiration == None:
- c_inst.expiration = datetime.datetime.now()
+ # make sure we've specified exactly one mode
+ mode_family = None
+ mode = None
+ for opt in allhostmodes.option_list + entrymodes.option_list + \
+ singlemodes:
+ if getattr(options, opt.dest):
+ if mode is not None:
+ parser.error("Only one mode can be specified; found %s and %s" %
+ (mode.get_opt_string(), opt.get_opt_string()))
+ mode = opt
+ mode_family = parser.get_option_group(opt.get_opt_string())
+
+ # you can specify more than one of --bad, --extra, --modified, --show, so
+ # consider single-host options separately
+ if not mode_family:
+ for opt in multimodes:
+ if getattr(options, opt.dest):
+ mode_family = parser.get_option_group(opt.get_opt_string())
+ break
+
+ if not mode_family:
+ parser.error("You must specify a mode")
+
+ if mode_family == hostmodes:
+ try:
+ cname = args.pop()
+ client = Client.objects.select_related().get(name=cname)
+ except IndexError:
+ parser.error("%s require a single HOST argument" % hostmodes.title)
+ except Client.DoesNotExist:
+ print("No such host: %s" % cname)
+ return 2
+
+ if options.expire:
+ if client.expiration == None:
+ client.expiration = datetime.datetime.now()
print("Host expired.")
else:
- c_inst.expiration = None
+ client.expiration = None
print("Host un-expired.")
- c_inst.save()
+ client.save()
+ elif options.total:
+ managed = client.current_interaction.totalcount
+ good = client.current_interaction.goodcount
+ print("Total managed entries: %d (good: %d)" % (managed, good))
+ elif mode_family == hostmodes:
+ if options.bad or options.show:
+ print_entries(client.current_interaction, "bad")
-elif '-h' in args:
- print(usage)
-elif singlehost != "":
- for c_inst in c_list:
- if singlehost == c_inst.name:
- if '-t' in args:
- managed = c_inst.current_interaction.totalcount
- good = c_inst.current_interaction.goodcount
- print("Total managed entries: %d (good: %d)" % (managed, good))
- baditems = c_inst.current_interaction.bad()
- if len(baditems) > 0 and ('-b' in args or '-s' in args):
- print("Bad Entries:")
- max_name = -1
- for item in baditems:
- if len(item.entry.name) > max_name:
- max_name = len(item.entry.name)
- for item in baditems:
- print_entry(item, max_name)
- modifieditems = c_inst.current_interaction.modified()
- if len(modifieditems) > 0 and ('-m' in args or '-s' in args):
- print "Modified Entries:"
- max_name = -1
- for item in modifieditems:
- if len(item.entry.name) > max_name:
- max_name = len(item.entry.name)
- for item in modifieditems:
- print_entry(item, max_name)
- extraitems = c_inst.current_interaction.extra()
- if len(extraitems) > 0 and ('-e' in args or '-s' in args):
- print("Extra Entries:")
- max_name = -1
- for item in extraitems:
- if len(item.entry.name) > max_name:
- max_name = len(item.entry.name)
- for item in extraitems:
- print_entry(item, max_name)
-
+ if options.modified or options.show:
+ print_entries(client.current_interaction, "modified")
-else:
- if fields == "":
- fields = ['name', 'time', 'state']
+ if options.extra or options.show:
+ print_entries(client.current_interaction, "extra")
else:
- fields = fields.split(',')
-
- if sort != "":
- sort = sort.split(',')
+ clients = Client.objects.exclude(current_interaction__isnull=True)
+ result = list()
+ edata = dict()
+ fields = options.fields.split(',')
- if badentry != "":
- badentry = badentry.split(',')
+ if mode_family == allhostmodes:
+ if args:
+ print("%s do not take any arguments, ignoring" %
+ allhostmodes.title)
- if modifiedentry != "":
- modifiedentry = modifiedentry.split(',')
+ for client in clients:
+ interaction = client.current_interaction
+ if (options.all or
+ (options.stale and interaction.isstale()) or
+ (options.clean and interaction.isclean()) or
+ (options.dirty and not interaction.isclean())):
+ result.append(client)
+ else:
+ # entry query modes
+ if options.file:
+ try:
+ entries = [l.strip().split(":")
+ for l in open(options.file)]
+ except IOError, err:
+ print("Cannot read entries from %s: %s" % (options.file,
+ err))
+ return 2
+ elif args:
+ entries = [a.split(":") for a in args]
+ else:
+ parser.error("%s require either a list of entries on the "
+ "command line or the --file options" %
+ mode_family.title)
+
+ if options.badentry:
+ result = hosts_by_entry_type(clients, "bad", entries)
+ elif options.modifiedentry:
+ result = hosts_by_entry_type(clients, "modified", entries)
+ elif options.extraentry:
+ result = hosts_by_entry_type(clients, "extra", entries)
+ elif options.entrystatus:
+ if 'state' in fields:
+ fields.remove('state')
+ fields.append("entry state")
+ try:
+ entry_obj = Entries.objects.get(
+ kind=entries[0][0],
+ name=entries[0][1])
+ except Entries.DoesNotExist:
+ print("No entry %s found" % ":".join(entries[0]))
+ return 2
- if extraentry != "":
- extraentry = extraentry.split(',')
-
- # stale hosts
- if '--stale' in args:
- for c_inst in c_list:
- if c_inst.current_interaction.isstale():
- result.append(c_inst)
- # clean hosts
- elif '-c' in args:
- for c_inst in c_list:
- if c_inst.current_interaction.isclean():
- result.append(c_inst)
- # dirty hosts
- elif '-d' in args:
- for c_inst in c_list:
- if not c_inst.current_interaction.isclean():
- result.append(c_inst)
+ for client in clients:
+ try:
+ entry = \
+ Entries_interactions.objects.select_related().get(
+ interaction=client.current_interaction,
+ entry=entry_obj)
+ edata[client] = \
+ {"entry state":dict(TYPE_CHOICES)[entry.type],
+ "reason":entry.reason}
+ result.append(client)
+ except Entries_interactions.DoesNotExist:
+ pass
- elif badentry != "":
- if len(badentry) == 1:
- fileread = fileinput.input(badentry[0])
- try:
- for line in fileread:
- badentry = line.strip().split(',')
- for c_inst in c_list:
- baditems = c_inst.current_interaction.bad()
- for item in baditems:
- if item.entry.name == badentry[1] and item.entry.kind == badentry[0]:
- result.append(c_inst)
- if c_inst in entrydict:
- entrydict.get(c_inst).append(badentry[1])
- else:
- entrydict[c_inst] = [badentry[1]]
- break
- except IOError:
- e = sys.exc_info()[1]
- print("Cannot read %s: %s" % (e.filename, e.strerror))
- else:
- for c_inst in c_list:
- baditems = c_inst.current_interaction.bad()
- for item in baditems:
- if item.entry.name == badentry[1] and item.entry.kind == badentry[0]:
- result.append(c_inst)
- break
- elif modifiedentry != "":
- if len(modifiedentry) == 1:
- fileread = fileinput.input(modifiedentry[0])
- try:
- for line in fileread:
- modifiedentry = line.strip().split(',')
- for c_inst in c_list:
- modifieditems = c_inst.current_interaction.modified()
- for item in modifieditems:
- if item.entry.name == modifiedentry[1] and item.entry.kind == modifiedentry[0]:
- result.append(c_inst)
- if c_inst in entrydict:
- entrydict.get(c_inst).append(modifiedentry[1])
- else:
- entrydict[c_inst] = [modifiedentry[1]]
- break
- except IOError:
- e = sys.exc_info()[1]
- print("Cannot read %s: %s" % (e.filename, e.strerror))
- else:
- for c_inst in c_list:
- modifieditems = c_inst.current_interaction.modified()
- for item in modifieditems:
- if item.entry.name == modifiedentry[1] and item.entry.kind == modifiedentry[0]:
- result.append(c_inst)
- break
- elif extraentry != "":
- if len(extraentry) == 1:
- fileread = fileinput.input(extraentry[0])
- try:
- for line in fileread:
- extraentry = line.strip().split(',')
- for c_inst in c_list:
- extraitems = c_inst.current_interaction.extra()
- for item in extraitems:
- if item.entry.name == extraentry[1] and item.entry.kind == extraentry[0]:
- result.append(c_inst)
- if c_inst in entrydict:
- entrydict.get(c_inst).append(extraentry[1])
- else:
- entrydict[c_inst] = [extraentry[1]]
- break
- except IOError:
- e = sys.exc_info()[1]
- print("Cannot read %s: %s" % (e.filename, e.strerror))
- else:
- for c_inst in c_list:
- extraitems = c_inst.current_interaction.extra()
- for item in extraitems:
- if item.entry.name == extraentry[1] and item.entry.kind == extraentry[0]:
- result.append(c_inst)
- break
- else:
- for c_inst in c_list:
- result.append(c_inst)
- max_name = -1
- if 'name' in fields:
- for c_inst in result:
- if len(c_inst.name) > max_name:
- max_name = len(c_inst.name)
+ if 'name' not in fields:
+ fields.insert(0, "name")
+ max_name = max(len(c.name) for c in result)
+ ffmt = []
+ for field in fields:
+ if field == "name":
+ ffmt.append("%%-%ds" % max_name)
+ elif field == "time":
+ ffmt.append("%-19s")
+ else:
+ ffmt.append("%%-%ds" % len(field))
+ fmt = " ".join(ffmt)
+ print(fmt % tuple(f.title() for f in fields))
+ for client in result:
+ if not client.expiration:
+ print_fields(fields, client, fmt,
+ extra=edata.get(client, None))
- if sort != "":
- result.sort(lambda x, y: crit_compare(sort, x, y))
-
- if fields != "":
- for c_inst in result:
- if '-a' in args or c_inst.expiration == None:
- print_fields(fields, c_inst, max_name, entrydict)
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/src/sbin/bcfg2-server b/src/sbin/bcfg2-server
index 757172464..32c97b63d 100755
--- a/src/sbin/bcfg2-server
+++ b/src/sbin/bcfg2-server
@@ -2,74 +2,46 @@
"""The XML-RPC Bcfg2 server."""
-import logging
-import os.path
+import os
import sys
-
+import logging
import Bcfg2.Logger
import Bcfg2.Options
-import Bcfg2.Component
-import Bcfg2.Server.Plugins.Metadata
from Bcfg2.Server.Core import CoreInitError
logger = logging.getLogger('bcfg2-server')
if __name__ == '__main__':
- OPTINFO = {
- 'configfile': Bcfg2.Options.CFILE,
- 'daemon' : Bcfg2.Options.DAEMON,
- 'debug' : Bcfg2.Options.DEBUG,
- 'help' : Bcfg2.Options.HELP,
- 'verbose' : Bcfg2.Options.VERBOSE,
- 'to_file' : Bcfg2.Options.LOGGING_FILE_PATH,
- 'repo' : Bcfg2.Options.SERVER_REPOSITORY,
- 'plugins' : Bcfg2.Options.SERVER_PLUGINS,
- 'password' : Bcfg2.Options.SERVER_PASSWORD,
- 'fm' : Bcfg2.Options.SERVER_FILEMONITOR,
- 'key' : Bcfg2.Options.SERVER_KEY,
- 'cert' : Bcfg2.Options.SERVER_CERT,
- 'ca' : Bcfg2.Options.SERVER_CA,
- 'listen_all': Bcfg2.Options.SERVER_LISTEN_ALL,
- 'location' : Bcfg2.Options.SERVER_LOCATION,
- 'passwd' : Bcfg2.Options.SERVER_PASSWORD,
- 'static' : Bcfg2.Options.SERVER_STATIC,
- 'encoding' : Bcfg2.Options.ENCODING,
- 'filelog' : Bcfg2.Options.LOGGING_FILE_PATH,
- 'protocol' : Bcfg2.Options.SERVER_PROTOCOL,
- }
-
- setup = Bcfg2.Options.OptionParser(OPTINFO)
+ optinfo = dict()
+ optinfo.update(Bcfg2.Options.CLI_COMMON_OPTIONS)
+ optinfo.update(Bcfg2.Options.SERVER_COMMON_OPTIONS)
+ optinfo.update(Bcfg2.Options.DAEMON_COMMON_OPTIONS)
+ setup = Bcfg2.Options.OptionParser(optinfo)
setup.parse(sys.argv[1:])
+ # check whether the specified bcfg2.conf exists
+ if not os.path.exists(setup['configfile']):
+ print("Could not read %s" % setup['configfile'])
+ sys.exit(1)
+
+ if setup['backend'] not in ['best', 'cherrypy', 'builtin']:
+ print("Unknown server backend %s, using 'best'" % setup['backend'])
+ setup['backend'] = 'best'
+ if setup['backend'] == 'cherrypy':
+ try:
+ from Bcfg2.Server.CherryPyCore import Core
+ except ImportError:
+ err = sys.exc_info()[1]
+ print("Unable to import CherryPy server core: %s" % err)
+ raise
+ elif setup['backend'] == 'builtin' or setup['backend'] == 'best':
+ from Bcfg2.Server.BuiltinCore import Core
+
try:
- # check whether the specified bcfg2.conf exists
- if not os.path.exists(setup['configfile']):
- print("Could not read %s" % setup['configfile'])
- sys.exit(1)
- Bcfg2.Component.run_component(Bcfg2.Server.Core.Core,
- listen_all=setup['listen_all'],
- location=setup['location'],
- daemon=setup['daemon'],
- pidfile_name=setup['daemon'],
- protocol=setup['protocol'],
- to_file=setup['to_file'],
- cfile=setup['configfile'],
- register=False,
- cls_kwargs={'repo':setup['repo'],
- 'plugins':setup['plugins'],
- 'password':setup['password'],
- 'encoding':setup['encoding'],
- 'ca':setup['ca'],
- 'filemonitor':setup['fm'],
- 'start_fam_thread':True,
- 'setup':setup},
- keyfile=setup['key'],
- certfile=setup['cert'],
- ca=setup['ca']
- )
+ core = Core(setup, start_fam_thread=True)
+ core.run()
except CoreInitError:
msg = sys.exc_info()[1]
logger.error(msg)
- logger.error("exiting")
sys.exit(1)
except KeyboardInterrupt:
sys.exit(1)
diff --git a/src/sbin/bcfg2-test b/src/sbin/bcfg2-test
index e3cfd27cc..8323eeb22 100755
--- a/src/sbin/bcfg2-test
+++ b/src/sbin/bcfg2-test
@@ -61,18 +61,11 @@ class ClientTest(TestCase):
id = __str__
def main():
- optinfo = {
- 'configfile': Bcfg2.Options.CFILE,
- 'help': Bcfg2.Options.HELP,
- 'encoding': Bcfg2.Options.ENCODING,
- 'repo': Bcfg2.Options.SERVER_REPOSITORY,
- 'plugins': Bcfg2.Options.SERVER_PLUGINS,
- 'password': Bcfg2.Options.SERVER_PASSWORD,
- 'verbose': Bcfg2.Options.VERBOSE,
- 'noseopts': Bcfg2.Options.TEST_NOSEOPTS,
- 'ignore': Bcfg2.Options.TEST_IGNORE,
- 'validate': Bcfg2.Options.CFG_VALIDATION,
- }
+ optinfo = dict(noseopts=Bcfg2.Options.TEST_NOSEOPTS,
+ test_ignore=Bcfg2.Options.TEST_IGNORE,
+ validate=Bcfg2.Options.CFG_VALIDATION)
+ optinfo.update(Bcfg2.Options.CLI_COMMON_OPTIONS)
+ optinfo.update(Bcfg2.Options.SERVER_COMMON_OPTIONS)
setup = Bcfg2.Options.OptionParser(optinfo)
setup.hm = \
"bcfg2-test [options] [client] [client] [...]\nOptions:\n %s" % \
@@ -80,19 +73,12 @@ def main():
setup.parse(sys.argv[1:])
if setup['verbose']:
- Bcfg2.Logger.setup_logging("bcfg2-test", to_syslog=False)
+ Bcfg2.Logger.setup_logging("bcfg2-test", to_syslog=setup['syslog'])
- core = Bcfg2.Server.Core.Core(
- setup['repo'],
- setup['plugins'],
- setup['password'],
- setup['encoding'],
- filemonitor='pseudo',
- setup=setup
- )
+ core = Bcfg2.Server.Core.BaseCore(setup)
ignore = dict()
- for entry in setup['ignore']:
+ for entry in setup['test_ignore']:
tag, name = entry.split(":")
try:
ignore[tag].append(name)
diff --git a/src/sbin/bcfg2-yum-helper b/src/sbin/bcfg2-yum-helper
index 2da7c6336..a0698cc90 100755
--- a/src/sbin/bcfg2-yum-helper
+++ b/src/sbin/bcfg2-yum-helper
@@ -9,8 +9,7 @@ import os
import sys
import yum
import logging
-import Bcfg2.Logger
-from optparse import OptionParser, OptionError
+from optparse import OptionParser
try:
import json
@@ -37,6 +36,24 @@ def get_logger(verbose=0):
LOGGER.addHandler(syslog)
return LOGGER
+def pkg_to_tuple(package):
+ """ json doesn't distinguish between tuples and lists, but yum
+ does, so we convert a package in list format to one in tuple
+ format """
+ if isinstance(package, list):
+ return tuple(package)
+ else:
+ return package
+
+def pkgtup_to_string(package):
+ rv = [package[0], "-"]
+ if package[2]:
+ rv.extend([package[2], ':'])
+ rv.extend([package[3], '-', package[4]])
+ if package[1]:
+ rv.extend(['.', package[1]])
+ return ''.join(str(e) for e in rv)
+
class DepSolver(object):
def __init__(self, cfgfile, verbose=1):
@@ -64,27 +81,28 @@ class DepSolver(object):
def is_package(self, package):
if isinstance(package, tuple):
if package[1] is None and package[2] == (None, None, None):
- package = package[0]
- else:
- return None
-
- return bool(self.get_package_object(package, silent=True))
+ pkgtup = (package[0], None, None, None, None)
+ elif len(package) == 5:
+ pkgtup = package
+ else:
+ pkgtup = (package, None, None, None, None)
+ return bool(self.get_package_object(pkgtup, silent=True))
def is_virtual_package(self, package):
return bool(self.get_provides(package, silent=True))
- def get_package_object(self, package, silent=False):
+ def get_package_object(self, pkgtup, silent=False):
try:
- matches = self.yumbase.pkgSack.returnNewestByName(name=package)
+ matches = yum.packageSack.packagesNewestByName(self.yumbase.pkgSack.searchPkgTuple(pkgtup))
except yum.Errors.PackageSackError:
if not silent:
self.logger.warning("Package '%s' not found" %
- self.get_package_name(package))
+ self.get_package_name(pkgtup))
matches = []
except yum.Errors.RepoError:
err = sys.exc_info()[1]
self.logger.error("Temporary failure loading metadata for %s: %s" %
- (self.get_package_name(package), err))
+ (self.get_package_name(pkgtup), err))
matches = []
pkgs = self._filter_arch(matches)
@@ -100,7 +118,7 @@ class DepSolver(object):
deps = set(pkg.requires)
# filter out things the package itself provides
deps.difference_update([dep for dep in deps
- if pkg.checkPrco('provides', dep)])
+ if pkg.checkPrco('provides', dep)])
else:
self.logger.error("No package available: %s" %
self.get_package_name(package))
@@ -120,7 +138,7 @@ class DepSolver(object):
return []
if prov and not all:
- prov = self._filter_provides(required, prov)
+ prov = self._filter_provides(prov)
elif not prov and not silent:
self.logger.error("No package provides %s" %
self.get_package_name(required))
@@ -134,7 +152,7 @@ class DepSolver(object):
if self.yumbase.comps.has_group(group):
group = self.yumbase.comps.return_group(group)
else:
- self.logger.warning("%s is not a valid group" % group)
+ self.logger.error("%s is not a valid group" % group)
return []
except yum.Errors.GroupsError:
err = sys.exc_info()[1]
@@ -155,7 +173,7 @@ class DepSolver(object):
self.logger.warning("Unknown group package type '%s'" % ptype)
return []
- def _filter_provides(self, package, providers):
+ def _filter_provides(self, providers):
providers = [pkg for pkg in self._filter_arch(providers)]
if len(providers) > 1:
# go through each provider and make sure it's the newest
@@ -174,7 +192,7 @@ class DepSolver(object):
# provider of perl(lib).
rv = []
for pkg in providers:
- found = self.get_package_object(pkg.name)
+ found = self.get_package_object(pkg.pkgtup)
if found == pkg or found.pkgtup == pkg.pkgtup:
rv.append(pkg)
else:
@@ -182,7 +200,7 @@ class DepSolver(object):
(pkg, found))
else:
rv = providers
- return [p.name for p in rv]
+ return rv
def _filter_arch(self, packages):
matching = []
@@ -204,115 +222,38 @@ class DepSolver(object):
""" get the name of a package or virtual package from the
internal representation used by this Collection class """
if isinstance(package, tuple):
- return yum.misc.prco_tuple_to_string(package)
+ if len(package) == 3:
+ return yum.misc.prco_tuple_to_string(package)
+ else:
+ return pkgtup_to_string(package)
else:
return str(package)
def complete(self, packagelist):
packages = set()
- pkgs = set(packagelist)
- requires = set()
- satisfied = set()
unknown = set()
- final_pass = False
-
- while requires or pkgs:
- # infinite loop protection
- start_reqs = len(requires)
-
- while pkgs:
- package = pkgs.pop()
- if package in packages:
- continue
-
- if not self.is_package(package):
- # try this package out as a requirement
- self.logger.debug("Adding requirement %s" % package)
- requires.add((package, None, (None, None, None)))
- continue
-
- packages.add(package)
- reqs = set(self.get_deps(package)).difference(satisfied)
- if reqs:
- self.logger.debug("Adding requirements for %s: %s" %
- (package,
- ",".join([self.get_package_name(r)
- for r in reqs])))
- requires.update(reqs)
-
- reqs_satisfied = set()
- for req in requires:
- if req in satisfied:
- reqs_satisfied.add(req)
- continue
-
- if req[1] is None and self.is_package(req[0]):
- if req[0] not in packages:
- pkgs.add(req[0])
- reqs_satisfied.add(req)
- continue
-
- self.logger.debug("Handling requirement '%s'" %
- self.get_package_name(req))
- providers = list(set(self.get_provides(req)))
- if len(providers) > 1:
- # hopefully one of the providing packages is already
- # included
- best = [p for p in providers if p in packages]
- if best:
- providers = best
- else:
- # pick a provider whose name matches the requirement
- best = [p for p in providers if p == req[0]]
- if len(best) == 1:
- providers = best
- elif not final_pass:
- self.logger.debug("%s has multiple providers: %s" %
- (self.get_package_name(req),
- providers))
- self.logger.debug("No provider is obviously the "
- "best; deferring")
- providers = None
- else:
- # found no "best" package, but it's the
- # final pass, so include them all
- self.logger.debug("Found multiple providers for %s,"
- "including all" %
- self.get_package_name(req))
-
- if providers:
- self.logger.debug("Requirement '%s' satisfied by %s" %
- (self.get_package_name(req),
- ",".join([self.get_package_name(p)
- for p in providers])))
- newpkgs = set(providers).difference(packages)
- if newpkgs:
- for package in newpkgs:
- if self.is_package(package):
- pkgs.add(package)
- else:
- unknown.add(package)
- reqs_satisfied.add(req)
- elif providers is not None:
- # nothing provided this requirement at all
- self.logger.debug("Nothing provides %s" %
- self.get_package_name(req))
- unknown.add(req)
- reqs_satisfied.add(req)
- # else, defer
- requires.difference_update(reqs_satisfied)
-
- # infinite loop protection
- if len(requires) == start_reqs and len(pkgs) == 0:
- final_pass = True
-
- if final_pass and requires:
- unknown.update(requires)
- requires = set()
-
- unknown = [self.get_package_name(p) for p in unknown]
+ for pkg in packagelist:
+ if isinstance(pkg, tuple):
+ pkgtup = pkg
+ else:
+ pkgtup = (pkg, None, None, None, None)
+ po = self.get_package_object(pkgtup)
+ if not po:
+ self.logger.debug("Unknown package %s" %
+ self.get_package_name(pkg))
+ unknown.add(pkg)
+ else:
+ if self.yumbase.tsInfo.exists(pkgtup=po.pkgtup):
+ self.logger.debug("%s added to transaction multiple times" %
+ po)
+ else:
+ self.logger.debug("Adding %s to transaction" % po)
+ self.yumbase.tsInfo.addInstall(po)
+ self.yumbase.resolveDeps()
- return packages, unknown
+ for txmbr in self.yumbase.tsInfo:
+ packages.add(txmbr.pkgtup)
+ return list(packages), list(unknown)
def clean_cache(self):
for mdtype in ["Headers", "Packages", "Sqlite", "Metadata",
@@ -345,29 +286,41 @@ def main():
depsolver = DepSolver(options.config, options.verbose)
if cmd == "clean":
depsolver.clean_cache()
- print json.dumps(True)
+ print(json.dumps(True))
elif cmd == "complete":
data = json.loads(sys.stdin.read())
depsolver.groups = data['groups']
- (packages, unknown) = depsolver.complete(data['packages'])
- print json.dumps(dict(packages=list(packages),
- unknown=list(unknown)))
+ (packages, unknown) = depsolver.complete([pkg_to_tuple(p)
+ for p in data['packages']])
+ print(json.dumps(dict(packages=list(packages),
+ unknown=list(unknown))))
elif cmd == "is_virtual_package":
- package = json.loads(sys.stdin.read())
- print json.dumps(bool(depsolver.get_provides(package, silent=True)))
+ package = pkg_to_tuple(json.loads(sys.stdin.read()))
+ print(json.dumps(bool(depsolver.get_provides(package, silent=True))))
elif cmd == "get_deps" or cmd == "get_provides":
- package = json.loads(sys.stdin.read())
- print json.dumps(list(getattr(depsolver, cmd)(package)))
+ package = pkg_to_tuple(json.loads(sys.stdin.read()))
+ print(json.dumps([p.name for p in getattr(depsolver, cmd)(package)]))
elif cmd == "get_group":
data = json.loads(sys.stdin.read())
if "type" in data:
packages = depsolver.get_group(data['group'], ptype=data['type'])
else:
packages = depsolver.get_group(data['group'])
- print json.dumps(list(packages))
+ print(json.dumps(list(packages)))
+ elif cmd == "get_groups":
+ data = json.loads(sys.stdin.read())
+ rv = dict()
+ for gdata in data:
+ if "type" in gdata:
+ packages = depsolver.get_group(gdata['group'],
+ ptype=gdata['type'])
+ else:
+ packages = depsolver.get_group(gdata['group'])
+ rv[gdata['group']] = list(packages)
+ print(json.dumps(rv))
elif cmd == "is_package":
- package = json.loads(sys.stdin.read())
- print json.dumps(getattr(depsolver, cmd)(package))
+ package = pkg_to_tuple(json.loads(sys.stdin.read()))
+ print(json.dumps(getattr(depsolver, cmd)(package)))
if __name__ == '__main__':
diff --git a/testsuite/Testlib/TestClient/TestTools/TestPOSIX/TestDevice.py b/testsuite/Testlib/TestClient/TestTools/TestPOSIX/TestDevice.py
new file mode 100644
index 000000000..fb80991d7
--- /dev/null
+++ b/testsuite/Testlib/TestClient/TestTools/TestPOSIX/TestDevice.py
@@ -0,0 +1,144 @@
+import os
+import sys
+import copy
+import lxml.etree
+from mock import Mock, MagicMock, patch
+from Bcfg2.Client.Tools.POSIX.Device import *
+
+# add all parent testsuite directories to sys.path to allow (most)
+# relative imports in python 2.4
+path = os.path.dirname(__file__)
+while path != "/":
+ if os.path.basename(path).lower().startswith("test"):
+ sys.path.append(path)
+ if os.path.basename(path) == "testsuite":
+ break
+ path = os.path.dirname(path)
+from Test__init import get_posix_object
+from Testbase import TestPOSIXTool
+from common import XI_NAMESPACE, XI, inPy3k, call, builtins, u, can_skip, \
+ skip, skipIf, skipUnless, Bcfg2TestCase, DBModelTestCase, syncdb, \
+ patchIf, datastore
+
+class TestPOSIXDevice(TestPOSIXTool):
+ test_obj = POSIXDevice
+
+ def test_fully_specified(self):
+ ptool = self.get_obj()
+ orig_entry = lxml.etree.Element("Path", name="/test", type="device",
+ dev_type="fifo")
+ self.assertTrue(ptool.fully_specified(orig_entry))
+ for dtype in ["block", "char"]:
+ for attr in ["major", "minor"]:
+ entry = copy.deepcopy(orig_entry)
+ entry.set("dev_type", dtype)
+ entry.set(attr, "0")
+ self.assertFalse(ptool.fully_specified(entry))
+ entry = copy.deepcopy(orig_entry)
+ entry.set("dev_type", dtype)
+ entry.set("major", "0")
+ entry.set("minor", "0")
+ self.assertTrue(ptool.fully_specified(entry))
+
+ @patch("os.major")
+ @patch("os.minor")
+ @patch("Bcfg2.Client.Tools.POSIX.base.POSIXTool._exists")
+ @patch("Bcfg2.Client.Tools.POSIX.base.POSIXTool.verify")
+ def test_verify(self, mock_verify, mock_exists, mock_minor, mock_major):
+ entry = lxml.etree.Element("Path", name="/test", type="device",
+ perms='0644', owner='root', group='root',
+ dev_type="block", major="0", minor="10")
+ ptool = self.get_obj()
+
+ def reset():
+ mock_exists.reset_mock()
+ mock_verify.reset_mock()
+ mock_minor.reset_mock()
+ mock_major.reset_mock()
+
+ mock_exists.return_value = False
+ self.assertFalse(ptool.verify(entry, []))
+ mock_exists.assert_called_with(entry)
+
+ reset()
+ mock_exists.return_value = MagicMock()
+ mock_major.return_value = 0
+ mock_minor.return_value = 10
+ mock_verify.return_value = True
+ self.assertTrue(ptool.verify(entry, []))
+ mock_verify.assert_called_with(ptool, entry, [])
+ mock_exists.assert_called_with(entry)
+ mock_major.assert_called_with(mock_exists.return_value.st_rdev)
+ mock_minor.assert_called_with(mock_exists.return_value.st_rdev)
+
+ reset()
+ mock_exists.return_value = MagicMock()
+ mock_major.return_value = 0
+ mock_minor.return_value = 10
+ mock_verify.return_value = False
+ self.assertFalse(ptool.verify(entry, []))
+ mock_verify.assert_called_with(ptool, entry, [])
+ mock_exists.assert_called_with(entry)
+ mock_major.assert_called_with(mock_exists.return_value.st_rdev)
+ mock_minor.assert_called_with(mock_exists.return_value.st_rdev)
+
+ reset()
+ mock_verify.return_value = True
+ entry = lxml.etree.Element("Path", name="/test", type="device",
+ perms='0644', owner='root', group='root',
+ dev_type="fifo")
+ self.assertTrue(ptool.verify(entry, []))
+ mock_exists.assert_called_with(entry)
+ mock_verify.assert_called_with(ptool, entry, [])
+ self.assertFalse(mock_major.called)
+ self.assertFalse(mock_minor.called)
+
+ @patch("os.makedev")
+ @patch("os.mknod")
+ @patch("Bcfg2.Client.Tools.POSIX.Device.%s._exists" % test_obj.__name__)
+ @patch("Bcfg2.Client.Tools.POSIX.base.POSIXTool.install")
+ def test_install(self, mock_install, mock_exists, mock_mknod, mock_makedev):
+ entry = lxml.etree.Element("Path", name="/test", type="device",
+ perms='0644', owner='root', group='root',
+ dev_type="block", major="0", minor="10")
+ ptool = self.get_obj()
+
+ mock_exists.return_value = False
+ mock_makedev.return_value = Mock()
+ mock_install.return_value = True
+ self.assertTrue(ptool.install(entry))
+ mock_exists.assert_called_with(entry, remove=True)
+ mock_makedev.assert_called_with(0, 10)
+ mock_mknod.assert_called_with(entry.get("name"), # 0o644
+ device_map[entry.get("dev_type")] | 420,
+ mock_makedev.return_value)
+ mock_install.assert_called_with(ptool, entry)
+
+ mock_makedev.reset_mock()
+ mock_mknod.reset_mock()
+ mock_exists.reset_mock()
+ mock_install.reset_mock()
+ mock_makedev.side_effect = OSError
+ self.assertFalse(ptool.install(entry))
+
+ mock_makedev.reset_mock()
+ mock_mknod.reset_mock()
+ mock_exists.reset_mock()
+ mock_install.reset_mock()
+ mock_mknod.side_effect = OSError
+ self.assertFalse(ptool.install(entry))
+
+ mock_makedev.reset_mock()
+ mock_mknod.reset_mock()
+ mock_exists.reset_mock()
+ mock_install.reset_mock()
+ mock_mknod.side_effect = None
+ entry = lxml.etree.Element("Path", name="/test", type="device",
+ perms='0644', owner='root', group='root',
+ dev_type="fifo")
+
+ self.assertTrue(ptool.install(entry))
+ mock_exists.assert_called_with(entry, remove=True)
+ mock_mknod.assert_called_with(entry.get("name"), # 0o644
+ device_map[entry.get("dev_type")] | 420)
+ mock_install.assert_called_with(ptool, entry)
diff --git a/testsuite/Testlib/TestClient/TestTools/TestPOSIX/TestDirectory.py b/testsuite/Testlib/TestClient/TestTools/TestPOSIX/TestDirectory.py
new file mode 100644
index 000000000..e01bd7453
--- /dev/null
+++ b/testsuite/Testlib/TestClient/TestTools/TestPOSIX/TestDirectory.py
@@ -0,0 +1,159 @@
+import os
+import sys
+import stat
+import copy
+import lxml.etree
+from mock import Mock, MagicMock, patch
+from Bcfg2.Client.Tools.POSIX.Directory import *
+
+# add all parent testsuite directories to sys.path to allow (most)
+# relative imports in python 2.4
+path = os.path.dirname(__file__)
+while path != "/":
+ if os.path.basename(path).lower().startswith("test"):
+ sys.path.append(path)
+ if os.path.basename(path) == "testsuite":
+ break
+ path = os.path.dirname(path)
+from Test__init import get_posix_object
+from Testbase import TestPOSIXTool
+from common import XI_NAMESPACE, XI, inPy3k, call, builtins, u, can_skip, \
+ skip, skipIf, skipUnless, Bcfg2TestCase, DBModelTestCase, syncdb, \
+ patchIf, datastore
+
+class TestPOSIXDirectory(TestPOSIXTool):
+ test_obj = POSIXDirectory
+
+ @patch("os.listdir")
+ @patch("Bcfg2.Client.Tools.POSIX.base.POSIXTool.verify")
+ @patch("Bcfg2.Client.Tools.POSIX.Directory.%s._exists" % test_obj.__name__)
+ def test_verify(self, mock_exists, mock_verify, mock_listdir):
+ entry = lxml.etree.Element("Path", name="/test", type="directory",
+ perms='0644', owner='root', group='root')
+
+ mock_exists.return_value = False
+ self.assertFalse(self.ptool.verify(entry, []))
+ mock_exists.assert_called_with(entry)
+
+ mock_exists.reset_mock()
+ exists_rv = MagicMock()
+ exists_rv.__getitem__.return_value = stat.S_IFREG | 420 # 0o644
+ mock_exists.return_value = exists_rv
+ self.assertFalse(self.ptool.verify(entry, []))
+ mock_exists.assert_called_with(entry)
+
+ mock_exists.reset_mock()
+ mock_verify.return_value = False
+ exists_rv.__getitem__.return_value = stat.S_IFDIR | 420 # 0o644
+ self.assertFalse(self.ptool.verify(entry, []))
+ mock_exists.assert_called_with(entry)
+ mock_verify.assert_called_with(self.ptool, entry, [])
+
+ mock_exists.reset_mock()
+ mock_verify.reset_mock()
+ mock_verify.return_value = True
+ self.assertTrue(self.ptool.verify(entry, []))
+ mock_exists.assert_called_with(entry)
+ mock_verify.assert_called_with(self.ptool, entry, [])
+
+ mock_exists.reset_mock()
+ mock_verify.reset_mock()
+ entry.set("prune", "true")
+ orig_entry = copy.deepcopy(entry)
+
+ entries = ["foo", "bar", "bar/baz"]
+ mock_listdir.return_value = entries
+ modlist = [os.path.join(entry.get("name"), entries[0])]
+ self.assertFalse(self.ptool.verify(entry, modlist))
+ mock_exists.assert_called_with(entry)
+ mock_verify.assert_called_with(self.ptool, entry, modlist)
+ mock_listdir.assert_called_with(entry.get("name"))
+ expected = [os.path.join(entry.get("name"), e)
+ for e in entries
+ if os.path.join(entry.get("name"), e) not in modlist]
+ actual = [e.get("path") for e in entry.findall("Prune")]
+ self.assertItemsEqual(expected, actual)
+
+ mock_verify.reset_mock()
+ mock_exists.reset_mock()
+ mock_listdir.reset_mock()
+ entry = copy.deepcopy(orig_entry)
+ modlist = [os.path.join(entry.get("name"), e)
+ for e in entries]
+ self.assertTrue(self.ptool.verify(entry, modlist))
+ mock_exists.assert_called_with(entry)
+ mock_verify.assert_called_with(self.ptool, entry, modlist)
+ mock_listdir.assert_called_with(entry.get("name"))
+ self.assertEqual(len(entry.findall("Prune")), 0)
+
+ @patch("os.unlink")
+ @patch("os.path.isdir")
+ @patch("shutil.rmtree")
+ @patch("Bcfg2.Client.Tools.POSIX.base.POSIXTool.install")
+ @patch("Bcfg2.Client.Tools.POSIX.Directory.%s._exists" % test_obj.__name__)
+ @patch("Bcfg2.Client.Tools.POSIX.Directory.%s._makedirs" %
+ test_obj.__name__)
+ def test_install(self, mock_makedirs, mock_exists, mock_install,
+ mock_rmtree, mock_isdir, mock_unlink):
+ entry = lxml.etree.Element("Path", name="/test/foo/bar",
+ type="directory", perms='0644',
+ owner='root', group='root')
+
+ def reset():
+ mock_exists.reset_mock()
+ mock_install.reset_mock()
+ mock_unlink.reset_mock()
+ mock_rmtree.reset_mock()
+ mock_rmtree.mock_makedirs()
+
+ mock_makedirs.return_value = True
+ mock_exists.return_value = False
+ mock_install.return_value = True
+ self.assertTrue(self.ptool.install(entry))
+ mock_exists.assert_called_with(entry)
+ mock_install.assert_called_with(self.ptool, entry)
+ mock_makedirs.assert_called_with(entry)
+
+ reset()
+ exists_rv = MagicMock()
+ exists_rv.__getitem__.return_value = stat.S_IFREG | 420 # 0o644
+ mock_exists.return_value = exists_rv
+ self.assertTrue(self.ptool.install(entry))
+ mock_unlink.assert_called_with(entry.get("name"))
+ mock_exists.assert_called_with(entry)
+ mock_makedirs.assert_called_with(entry)
+ mock_install.assert_called_with(self.ptool, entry)
+
+ reset()
+ exists_rv.__getitem__.return_value = stat.S_IFDIR | 420 # 0o644
+ mock_install.return_value = True
+ self.assertTrue(self.ptool.install(entry))
+ mock_exists.assert_called_with(entry)
+ mock_install.assert_called_with(self.ptool, entry)
+
+ reset()
+ mock_install.return_value = False
+ self.assertFalse(self.ptool.install(entry))
+ mock_install.assert_called_with(self.ptool, entry)
+
+ entry.set("prune", "true")
+ prune = ["/test/foo/bar/prune1", "/test/foo/bar/prune2"]
+ for path in prune:
+ lxml.etree.SubElement(entry, "Prune", path=path)
+
+ reset()
+ mock_install.return_value = True
+
+ def isdir_rv(path):
+ if path.endswith("prune2"):
+ return True
+ else:
+ return False
+ mock_isdir.side_effect = isdir_rv
+ self.assertTrue(self.ptool.install(entry))
+ mock_exists.assert_called_with(entry)
+ mock_install.assert_called_with(self.ptool, entry)
+ self.assertItemsEqual(mock_isdir.call_args_list,
+ [call(p) for p in prune])
+ mock_unlink.assert_called_with("/test/foo/bar/prune1")
+ mock_rmtree.assert_called_with("/test/foo/bar/prune2")
diff --git a/testsuite/Testlib/TestClient/TestTools/TestPOSIX/TestFile.py b/testsuite/Testlib/TestClient/TestTools/TestPOSIX/TestFile.py
new file mode 100644
index 000000000..5b6d3b1dc
--- /dev/null
+++ b/testsuite/Testlib/TestClient/TestTools/TestPOSIX/TestFile.py
@@ -0,0 +1,447 @@
+# -*- coding: utf-8 -*-
+import os
+import sys
+import copy
+import difflib
+import binascii
+import lxml.etree
+from Bcfg2.Bcfg2Py3k import b64encode, b64decode, u_str
+from mock import Mock, MagicMock, patch
+from Bcfg2.Client.Tools.POSIX.File import *
+
+# add all parent testsuite directories to sys.path to allow (most)
+# relative imports in python 2.4
+path = os.path.dirname(__file__)
+while path != "/":
+ if os.path.basename(path).lower().startswith("test"):
+ sys.path.append(path)
+ if os.path.basename(path) == "testsuite":
+ break
+ path = os.path.dirname(path)
+from Test__init import get_posix_object
+from Testbase import TestPOSIXTool
+from common import XI_NAMESPACE, XI, inPy3k, call, builtins, u, can_skip, \
+ skip, skipIf, skipUnless, Bcfg2TestCase, DBModelTestCase, syncdb, \
+ patchIf, datastore
+
+def get_file_object(posix=None):
+ if posix is None:
+ posix = get_posix_object()
+ return POSIXFile(posix.logger, posix.setup, posix.config)
+
+class TestPOSIXFile(TestPOSIXTool):
+ test_obj = POSIXFile
+
+ def test_fully_specified(self):
+ entry = lxml.etree.Element("Path", name="/test", type="file")
+ self.assertFalse(self.ptool.fully_specified(entry))
+
+ entry.set("empty", "true")
+ self.assertTrue(self.ptool.fully_specified(entry))
+
+ entry.set("empty", "false")
+ entry.text = "text"
+ self.assertTrue(self.ptool.fully_specified(entry))
+
+ def test_is_string(self):
+ for char in list(range(8)) + list(range(14, 32)):
+ self.assertFalse(self.ptool._is_string("foo" + chr(char) + "bar",
+ 'UTF-8'))
+ for char in list(range(9, 14)) + list(range(33, 128)):
+ self.assertTrue(self.ptool._is_string("foo" + chr(char) + "bar",
+ 'UTF-8'))
+ ustr = 'é'
+ self.assertTrue(self.ptool._is_string(ustr, 'UTF-8'))
+ if not inPy3k:
+ self.assertFalse(self.ptool._is_string("foo" + chr(128) + "bar",
+ 'ascii'))
+ self.assertFalse(self.ptool._is_string(ustr, 'ascii'))
+
+ def test_get_data(self):
+ orig_entry = lxml.etree.Element("Path", name="/test", type="file")
+ setup = dict(encoding="ascii", ppath='/', max_copies=5)
+ ptool = self.get_obj(posix=get_posix_object(setup=setup))
+
+ entry = copy.deepcopy(orig_entry)
+ entry.text = b64encode("test")
+ entry.set("encoding", "base64")
+ self.assertEqual(ptool._get_data(entry), ("test", True))
+
+ entry = copy.deepcopy(orig_entry)
+ entry.set("empty", "true")
+ self.assertEqual(ptool._get_data(entry), ("", False))
+
+ entry = copy.deepcopy(orig_entry)
+ entry.text = "test"
+ self.assertEqual(ptool._get_data(entry), ("test", False))
+
+ if inPy3k:
+ ustr = 'é'
+ else:
+ ustr = u_str('é', 'UTF-8')
+ entry = copy.deepcopy(orig_entry)
+ entry.text = ustr
+ self.assertEqual(ptool._get_data(entry), (ustr, False))
+
+ @patch("%s.open" % builtins)
+ @patch("Bcfg2.Client.Tools.POSIX.base.POSIXTool.verify")
+ @patch("Bcfg2.Client.Tools.POSIX.File.%s._exists" % test_obj.__name__)
+ @patch("Bcfg2.Client.Tools.POSIX.File.%s._get_data" % test_obj.__name__)
+ @patch("Bcfg2.Client.Tools.POSIX.File.%s._get_diffs" % test_obj.__name__)
+ def test_verify(self, mock_get_diffs, mock_get_data, mock_exists,
+ mock_verify, mock_open):
+ entry = lxml.etree.Element("Path", name="/test", type="file")
+ setup = dict(interactive=False, ppath='/', max_copies=5)
+ ptool = self.get_obj(posix=get_posix_object(setup=setup))
+
+ def reset():
+ mock_get_diffs.reset_mock()
+ mock_get_data.reset_mock()
+ mock_exists.reset_mock()
+ mock_verify.reset_mock()
+ mock_open.reset_mock()
+
+ mock_get_data.return_value = ("test", False)
+ mock_exists.return_value = False
+ mock_verify.return_value = True
+ self.assertFalse(ptool.verify(entry, []))
+ mock_exists.assert_called_with(entry)
+ mock_verify.assert_called_with(ptool, entry, [])
+ mock_get_diffs.assert_called_with(entry, interactive=False,
+ sensitive=False,
+ is_binary=False,
+ content="")
+
+ reset()
+ exists_rv = MagicMock()
+ exists_rv.__getitem__.return_value = 5
+ mock_exists.return_value = exists_rv
+ mock_get_data.return_value = ("test", True)
+ self.assertFalse(ptool.verify(entry, []))
+ mock_exists.assert_called_with(entry)
+ mock_verify.assert_called_with(ptool, entry, [])
+ mock_get_diffs.assert_called_with(entry, interactive=False,
+ sensitive=False,
+ is_binary=True,
+ content=None)
+
+ reset()
+ mock_get_data.return_value = ("test", False)
+ exists_rv.__getitem__.return_value = 4
+ entry.set("sensitive", "true")
+ mock_open.return_value.read.return_value = "tart"
+ self.assertFalse(ptool.verify(entry, []))
+ mock_exists.assert_called_with(entry)
+ mock_verify.assert_called_with(ptool, entry, [])
+ mock_open.assert_called_with(entry.get("name"))
+ mock_open.return_value.read.assert_called_with()
+ mock_get_diffs.assert_called_with(entry, interactive=False,
+ sensitive=True,
+ is_binary=False,
+ content="tart")
+
+ reset()
+ mock_open.return_value.read.return_value = "test"
+ self.assertTrue(ptool.verify(entry, []))
+ mock_exists.assert_called_with(entry)
+ mock_verify.assert_called_with(ptool, entry, [])
+ mock_open.assert_called_with(entry.get("name"))
+ mock_open.return_value.read.assert_called_with()
+ self.assertFalse(mock_get_diffs.called)
+
+ reset()
+ mock_open.side_effect = IOError
+ self.assertFalse(ptool.verify(entry, []))
+ mock_exists.assert_called_with(entry)
+ mock_open.assert_called_with(entry.get("name"))
+
+ @patch("os.fdopen")
+ @patch("tempfile.mkstemp")
+ @patch("Bcfg2.Client.Tools.POSIX.File.%s._get_data" % test_obj.__name__)
+ def test_write_tmpfile(self, mock_get_data, mock_mkstemp, mock_fdopen):
+ entry = lxml.etree.Element("Path", name="/test", type="file",
+ perms='0644', owner='root', group='root')
+ newfile = "/foo/bar"
+
+ def reset():
+ mock_get_data.reset_mock()
+ mock_mkstemp.reset_mock()
+ mock_fdopen.reset_mock()
+
+ mock_get_data.return_value = ("test", False)
+ mock_mkstemp.return_value = (5, newfile)
+ self.assertEqual(self.ptool._write_tmpfile(entry), newfile)
+ mock_get_data.assert_called_with(entry)
+ mock_mkstemp.assert_called_with(prefix='test', dir='/')
+ mock_fdopen.assert_called_with(5, 'w')
+ mock_fdopen.return_value.write.assert_called_with("test")
+
+ reset()
+ mock_mkstemp.side_effect = OSError
+ self.assertFalse(self.ptool._write_tmpfile(entry))
+ mock_mkstemp.assert_called_with(prefix='test', dir='/')
+
+ reset()
+ mock_mkstemp.side_effect = None
+ mock_fdopen.side_effect = OSError
+ self.assertFalse(self.ptool._write_tmpfile(entry))
+ mock_mkstemp.assert_called_with(prefix='test', dir='/')
+ mock_get_data.assert_called_with(entry)
+ mock_fdopen.assert_called_with(5, 'w')
+
+ @patch("os.rename")
+ @patch("os.unlink")
+ def test_rename_tmpfile(self, mock_unlink, mock_rename):
+ entry = lxml.etree.Element("Path", name="/test", type="file",
+ perms='0644', owner='root', group='root')
+ newfile = "/foo/bar"
+
+ self.assertTrue(self.ptool._rename_tmpfile(newfile, entry))
+ mock_rename.assert_called_with(newfile, entry.get("name"))
+
+ mock_rename.reset_mock()
+ mock_unlink.reset_mock()
+ mock_rename.side_effect = OSError
+ self.assertFalse(self.ptool._rename_tmpfile(newfile, entry))
+ mock_rename.assert_called_with(newfile, entry.get("name"))
+ mock_unlink.assert_called_with(newfile)
+
+ # even if the unlink fails, return false gracefully
+ mock_rename.reset_mock()
+ mock_unlink.reset_mock()
+ mock_unlink.side_effect = OSError
+ self.assertFalse(self.ptool._rename_tmpfile(newfile, entry))
+ mock_rename.assert_called_with(newfile, entry.get("name"))
+ mock_unlink.assert_called_with(newfile)
+
+ @patch("%s.open" % builtins)
+ @patch("Bcfg2.Client.Tools.POSIX.File.%s._diff" % test_obj.__name__)
+ @patch("Bcfg2.Client.Tools.POSIX.File.%s._get_data" % test_obj.__name__)
+ @patch("Bcfg2.Client.Tools.POSIX.File.%s._is_string" % test_obj.__name__)
+ def test__get_diffs(self, mock_is_string, mock_get_data, mock_diff,
+ mock_open):
+ orig_entry = lxml.etree.Element("Path", name="/test", type="file",
+ perms='0644', owner='root',
+ group='root')
+ orig_entry.text = "test"
+ ondisk = "test2"
+ setup = dict(encoding="utf-8", ppath='/', max_copies=5)
+ ptool = self.get_obj(posix=get_posix_object(setup=setup))
+
+ def reset():
+ mock_is_string.reset_mock()
+ mock_get_data.reset_mock()
+ mock_diff.reset_mock()
+ mock_open.reset_mock()
+ return copy.deepcopy(orig_entry)
+
+ mock_is_string.return_value = True
+ mock_get_data.return_value = (orig_entry.text, False)
+ mock_open.return_value.read.return_value = ondisk
+ mock_diff.return_value = ["-test2", "+test"]
+
+ # binary data in the entry
+ entry = reset()
+ ptool._get_diffs(entry, is_binary=True)
+ mock_open.assert_called_with(entry.get("name"))
+ mock_open.return_value.read.assert_any_call()
+ self.assertFalse(mock_diff.called)
+ self.assertEqual(entry.get("current_bfile"), b64encode(ondisk))
+
+ # binary data on disk
+ entry = reset()
+ mock_is_string.return_value = False
+ ptool._get_diffs(entry, content=ondisk)
+ self.assertFalse(mock_open.called)
+ self.assertFalse(mock_diff.called)
+ self.assertEqual(entry.get("current_bfile"), b64encode(ondisk))
+
+ # sensitive, non-interactive -- do nothing
+ entry = reset()
+ mock_is_string.return_value = True
+ ptool._get_diffs(entry, sensitive=True, interactive=False)
+ self.assertFalse(mock_open.called)
+ self.assertFalse(mock_diff.called)
+ self.assertXMLEqual(entry, orig_entry)
+
+ # sensitive, interactive
+ entry = reset()
+ ptool._get_diffs(entry, sensitive=True, interactive=True)
+ mock_open.assert_called_with(entry.get("name"))
+ mock_open.return_value.read.assert_any_call()
+ mock_diff.assert_called_with(ondisk, entry.text, difflib.unified_diff,
+ filename=entry.get("name"))
+ self.assertIsNotNone(entry.get("qtext"))
+ del entry.attrib['qtext']
+ self.assertItemsEqual(orig_entry.attrib, entry.attrib)
+
+ # non-sensitive, non-interactive
+ entry = reset()
+ ptool._get_diffs(entry, content=ondisk)
+ self.assertFalse(mock_open.called)
+ mock_diff.assert_called_with(ondisk, entry.text, difflib.ndiff,
+ filename=entry.get("name"))
+ self.assertIsNone(entry.get("qtext"))
+ self.assertEqual(entry.get("current_bdiff"),
+ b64encode("\n".join(mock_diff.return_value)))
+ del entry.attrib["current_bdiff"]
+ self.assertItemsEqual(orig_entry.attrib, entry.attrib)
+
+ # non-sensitive, interactive -- do everything. also test
+ # appending to qtext
+ entry = reset()
+ entry.set("qtext", "test")
+ ptool._get_diffs(entry, interactive=True)
+ mock_open.assert_called_with(entry.get("name"))
+ mock_open.return_value.read.assert_any_call()
+ self.assertItemsEqual(mock_diff.call_args_list,
+ [call(ondisk, entry.text, difflib.unified_diff,
+ filename=entry.get("name")),
+ call(ondisk, entry.text, difflib.ndiff,
+ filename=entry.get("name"))])
+ self.assertIsNotNone(entry.get("qtext"))
+ self.assertTrue(entry.get("qtext").startswith("test\n"))
+ self.assertEqual(entry.get("current_bdiff"),
+ b64encode("\n".join(mock_diff.return_value)))
+ del entry.attrib['qtext']
+ del entry.attrib["current_bdiff"]
+ self.assertItemsEqual(orig_entry.attrib, entry.attrib)
+
+ # non-sensitive, interactive with unicode data
+ entry = reset()
+ entry.text = u("tëst")
+ encoded = entry.text.encode(setup['encoding'])
+ mock_get_data.return_value = (encoded, False)
+ ptool._get_diffs(entry, interactive=True)
+ mock_open.assert_called_with(entry.get("name"))
+ mock_open.return_value.read.assert_any_call()
+ self.assertItemsEqual(mock_diff.call_args_list,
+ [call(ondisk, encoded, difflib.unified_diff,
+ filename=entry.get("name")),
+ call(ondisk, encoded, difflib.ndiff,
+ filename=entry.get("name"))])
+ self.assertIsNotNone(entry.get("qtext"))
+ self.assertEqual(entry.get("current_bdiff"),
+ b64encode("\n".join(mock_diff.return_value)))
+ del entry.attrib['qtext']
+ del entry.attrib["current_bdiff"]
+ self.assertItemsEqual(orig_entry.attrib, entry.attrib)
+
+ @patch("os.path.exists")
+ @patch("Bcfg2.Client.Tools.POSIX.base.POSIXTool.install")
+ @patch("Bcfg2.Client.Tools.POSIX.File.%s._makedirs" % test_obj.__name__)
+ @patch("Bcfg2.Client.Tools.POSIX.File.%s._set_perms" % test_obj.__name__)
+ @patch("Bcfg2.Client.Tools.POSIX.File.%s._write_tmpfile" %
+ test_obj.__name__)
+ @patch("Bcfg2.Client.Tools.POSIX.File.%s._rename_tmpfile" %
+ test_obj.__name__)
+ def test_install(self, mock_rename, mock_write, mock_set_perms,
+ mock_makedirs, mock_install, mock_exists):
+ entry = lxml.etree.Element("Path", name="/test", type="file",
+ perms='0644', owner='root', group='root')
+
+ def reset():
+ mock_rename.reset_mock()
+ mock_write.reset_mock()
+ mock_set_perms.reset_mock()
+ mock_makedirs.reset_mock()
+ mock_install.reset_mock()
+ mock_exists.reset_mock()
+
+ mock_exists.return_value = False
+ mock_makedirs.return_value = False
+ self.assertFalse(self.ptool.install(entry))
+ mock_exists.assert_called_with("/")
+ mock_makedirs.assert_called_with(entry, path="/")
+
+ reset()
+ mock_makedirs.return_value = True
+ mock_write.return_value = False
+ self.assertFalse(self.ptool.install(entry))
+ mock_exists.assert_called_with("/")
+ mock_makedirs.assert_called_with(entry, path="/")
+ mock_write.assert_called_with(entry)
+
+ reset()
+ newfile = '/test.X987yS'
+ mock_write.return_value = newfile
+ mock_set_perms.return_value = False
+ mock_rename.return_value = False
+ self.assertFalse(self.ptool.install(entry))
+ mock_exists.assert_called_with("/")
+ mock_makedirs.assert_called_with(entry, path="/")
+ mock_write.assert_called_with(entry)
+ mock_set_perms.assert_called_with(entry, path=newfile)
+ mock_rename.assert_called_with(newfile, entry)
+
+ reset()
+ mock_rename.return_value = True
+ mock_install.return_value = False
+ self.assertFalse(self.ptool.install(entry))
+ mock_exists.assert_called_with("/")
+ mock_makedirs.assert_called_with(entry, path="/")
+ mock_write.assert_called_with(entry)
+ mock_set_perms.assert_called_with(entry, path=newfile)
+ mock_rename.assert_called_with(newfile, entry)
+ mock_install.assert_called_with(self.ptool, entry)
+
+ reset()
+ mock_install.return_value = True
+ self.assertFalse(self.ptool.install(entry))
+ mock_exists.assert_called_with("/")
+ mock_makedirs.assert_called_with(entry, path="/")
+ mock_write.assert_called_with(entry)
+ mock_set_perms.assert_called_with(entry, path=newfile)
+ mock_rename.assert_called_with(newfile, entry)
+ mock_install.assert_called_with(self.ptool, entry)
+
+ reset()
+ mock_set_perms.return_value = True
+ self.assertTrue(self.ptool.install(entry))
+ mock_exists.assert_called_with("/")
+ mock_makedirs.assert_called_with(entry, path="/")
+ mock_write.assert_called_with(entry)
+ mock_set_perms.assert_called_with(entry, path=newfile)
+ mock_rename.assert_called_with(newfile, entry)
+ mock_install.assert_called_with(self.ptool, entry)
+
+ reset()
+ mock_exists.return_value = True
+ self.assertTrue(self.ptool.install(entry))
+ mock_exists.assert_called_with("/")
+ self.assertFalse(mock_makedirs.called)
+ mock_write.assert_called_with(entry)
+ mock_set_perms.assert_called_with(entry, path=newfile)
+ mock_rename.assert_called_with(newfile, entry)
+ mock_install.assert_called_with(self.ptool, entry)
+
+ @patch("time.time")
+ def test_diff(self, mock_time):
+ content1 = "line1\nline2"
+ content2 = "line3"
+
+ self.now = 1345640723
+ def time_rv():
+ self.now += 1
+ return self.now
+ mock_time.side_effect = time_rv
+
+ rv = ["line1", "line2", "line3"]
+ func = Mock()
+ func.return_value = rv
+ self.assertItemsEqual(self.ptool._diff(content1, content2, func), rv)
+ func.assert_called_with(["line1", "line2"], ["line3"])
+
+ func.reset_mock()
+ mock_time.reset_mock()
+ def time_rv():
+ self.now += 5
+ return self.now
+ mock_time.side_effect = time_rv
+
+ def slow_diff(content1, content2):
+ for i in range(1, 10):
+ yield "line%s" % i
+ func.side_effect = slow_diff
+ self.assertFalse(self.ptool._diff(content1, content2, func), rv)
+ func.assert_called_with(["line1", "line2"], ["line3"])
diff --git a/testsuite/Testlib/TestClient/TestTools/TestPOSIX/TestHardlink.py b/testsuite/Testlib/TestClient/TestTools/TestPOSIX/TestHardlink.py
new file mode 100644
index 000000000..d68e15837
--- /dev/null
+++ b/testsuite/Testlib/TestClient/TestTools/TestPOSIX/TestHardlink.py
@@ -0,0 +1,85 @@
+import os
+import sys
+import copy
+import lxml.etree
+from mock import Mock, MagicMock, patch
+from Bcfg2.Client.Tools.POSIX.Hardlink import *
+
+# add all parent testsuite directories to sys.path to allow (most)
+# relative imports in python 2.4
+path = os.path.dirname(__file__)
+while path != "/":
+ if os.path.basename(path).lower().startswith("test"):
+ sys.path.append(path)
+ if os.path.basename(path) == "testsuite":
+ break
+ path = os.path.dirname(path)
+from Test__init import get_posix_object
+from Testbase import TestPOSIXTool
+from common import XI_NAMESPACE, XI, inPy3k, call, builtins, u, can_skip, \
+ skip, skipIf, skipUnless, Bcfg2TestCase, DBModelTestCase, syncdb, \
+ patchIf, datastore
+
+class TestPOSIXHardlink(TestPOSIXTool):
+ test_obj = POSIXHardlink
+
+ @patch("os.path.samefile")
+ @patch("Bcfg2.Client.Tools.POSIX.base.POSIXTool.verify")
+ def test_verify(self, mock_verify, mock_samefile):
+ entry = lxml.etree.Element("Path", name="/test", type="hardlink",
+ to="/dest")
+ ptool = self.get_obj()
+
+ mock_samefile.return_value = True
+ mock_verify.return_value = False
+ self.assertFalse(ptool.verify(entry, []))
+ mock_samefile.assert_called_with(entry.get("name"),
+ entry.get("to"))
+ mock_verify.assert_called_with(ptool, entry, [])
+
+ mock_samefile.reset_mock()
+ mock_verify.reset_mock()
+ mock_verify.return_value = True
+ self.assertTrue(ptool.verify(entry, []))
+ mock_samefile.assert_called_with(entry.get("name"),
+ entry.get("to"))
+ mock_verify.assert_called_with(ptool, entry, [])
+
+ mock_samefile.reset_mock()
+ mock_verify.reset_mock()
+ mock_samefile.return_value = False
+ self.assertFalse(ptool.verify(entry, []))
+ mock_samefile.assert_called_with(entry.get("name"),
+ entry.get("to"))
+ mock_verify.assert_called_with(ptool, entry, [])
+
+ mock_samefile.reset_mock()
+ mock_verify.reset_mock()
+ mock_samefile.side_effect = OSError
+ self.assertFalse(ptool.verify(entry, []))
+ mock_samefile.assert_called_with(entry.get("name"),
+ entry.get("to"))
+
+ @patch("os.link")
+ @patch("Bcfg2.Client.Tools.POSIX.base.POSIXTool.install")
+ @patch("Bcfg2.Client.Tools.POSIX.Hardlink.%s._exists" % test_obj.__name__)
+ def test_install(self, mock_exists, mock_install, mock_link):
+ entry = lxml.etree.Element("Path", name="/test", type="hardlink",
+ to="/dest")
+ ptool = self.get_obj()
+
+ mock_exists.return_value = False
+ mock_install.return_value = True
+ self.assertTrue(ptool.install(entry))
+ mock_exists.assert_called_with(entry, remove=True)
+ mock_link.assert_called_with(entry.get("to"), entry.get("name"))
+ mock_install.assert_called_with(ptool, entry)
+
+ mock_link.reset_mock()
+ mock_exists.reset_mock()
+ mock_install.reset_mock()
+ mock_link.side_effect = OSError
+ self.assertFalse(ptool.install(entry))
+ mock_exists.assert_called_with(entry, remove=True)
+ mock_link.assert_called_with(entry.get("to"), entry.get("name"))
+ mock_install.assert_called_with(ptool, entry)
diff --git a/testsuite/Testlib/TestClient/TestTools/TestPOSIX/TestNonexistent.py b/testsuite/Testlib/TestClient/TestTools/TestPOSIX/TestNonexistent.py
new file mode 100644
index 000000000..375ff00eb
--- /dev/null
+++ b/testsuite/Testlib/TestClient/TestTools/TestPOSIX/TestNonexistent.py
@@ -0,0 +1,91 @@
+import os
+import sys
+import copy
+import lxml.etree
+from mock import Mock, MagicMock, patch
+from Bcfg2.Client.Tools.POSIX.Nonexistent import *
+
+# add all parent testsuite directories to sys.path to allow (most)
+# relative imports in python 2.4
+path = os.path.dirname(__file__)
+while path != "/":
+ if os.path.basename(path).lower().startswith("test"):
+ sys.path.append(path)
+ if os.path.basename(path) == "testsuite":
+ break
+ path = os.path.dirname(path)
+from Test__init import get_config, get_posix_object
+from Testbase import TestPOSIXTool
+from common import XI_NAMESPACE, XI, inPy3k, call, builtins, u, can_skip, \
+ skip, skipIf, skipUnless, Bcfg2TestCase, DBModelTestCase, syncdb, \
+ patchIf, datastore
+
+class TestPOSIXNonexistent(TestPOSIXTool):
+ test_obj = POSIXNonexistent
+
+ @patch("os.path.lexists")
+ def test_verify(self, mock_lexists):
+ entry = lxml.etree.Element("Path", name="/test", type="nonexistent")
+
+ for val in [True, False]:
+ mock_lexists.reset_mock()
+ mock_lexists.return_value = val
+ self.assertEqual(self.ptool.verify(entry, []), not val)
+ mock_lexists.assert_called_with(entry.get("name"))
+
+ @patch("os.rmdir")
+ @patch("os.remove")
+ @patch("os.path.isdir")
+ @patch("shutil.rmtree")
+ def test_install(self, mock_rmtree, mock_isdir, mock_remove, mock_rmdir):
+ entry = lxml.etree.Element("Path", name="/test", type="nonexistent")
+
+ def reset():
+ mock_isdir.reset_mock()
+ mock_remove.reset_mock()
+ mock_rmdir.reset_mock()
+ mock_rmtree.reset_mock()
+
+ mock_isdir.return_value = False
+ self.assertTrue(self.ptool.install(entry))
+ mock_remove.assert_called_with(entry.get("name"))
+
+ reset()
+ mock_remove.side_effect = OSError
+ self.assertFalse(self.ptool.install(entry))
+ mock_remove.assert_called_with(entry.get("name"))
+
+ reset()
+ mock_isdir.return_value = True
+ self.assertTrue(self.ptool.install(entry))
+ mock_rmdir.assert_called_with(entry.get("name"))
+
+ reset()
+ mock_rmdir.side_effect = OSError
+ self.assertFalse(self.ptool.install(entry))
+ mock_rmdir.assert_called_with(entry.get("name"))
+
+ reset()
+ entry.set("recursive", "true")
+ self.assertTrue(self.ptool.install(entry))
+ mock_rmtree.assert_called_with(entry.get("name"))
+
+ reset()
+ mock_rmtree.side_effect = OSError
+ self.assertFalse(self.ptool.install(entry))
+ mock_rmtree.assert_called_with(entry.get("name"))
+
+ reset()
+ child_entry = lxml.etree.Element("Path", name="/test/foo",
+ type="nonexistent")
+ ptool = self.get_obj(posix=get_posix_object(config=get_config([child_entry])))
+ mock_rmtree.side_effect = None
+ self.assertTrue(ptool.install(entry))
+ mock_rmtree.assert_called_with(entry.get("name"))
+
+ reset()
+ child_entry = lxml.etree.Element("Path", name="/test/foo",
+ type="file")
+ ptool = self.get_obj(posix=get_posix_object(config=get_config([child_entry])))
+ mock_rmtree.side_effect = None
+ self.assertFalse(ptool.install(entry))
diff --git a/testsuite/Testlib/TestClient/TestTools/TestPOSIX/TestPermissions.py b/testsuite/Testlib/TestClient/TestTools/TestPOSIX/TestPermissions.py
new file mode 100644
index 000000000..565857437
--- /dev/null
+++ b/testsuite/Testlib/TestClient/TestTools/TestPOSIX/TestPermissions.py
@@ -0,0 +1,5 @@
+from Bcfg2.Client.Tools.POSIX.Permissions import *
+from Testbase import TestPOSIXTool
+
+class TestPOSIXPermissions(TestPOSIXTool):
+ test_obj = POSIXPermissions
diff --git a/testsuite/Testlib/TestClient/TestTools/TestPOSIX/TestSymlink.py b/testsuite/Testlib/TestClient/TestTools/TestPOSIX/TestSymlink.py
new file mode 100644
index 000000000..c825e5476
--- /dev/null
+++ b/testsuite/Testlib/TestClient/TestTools/TestPOSIX/TestSymlink.py
@@ -0,0 +1,81 @@
+import os
+import sys
+import copy
+import lxml.etree
+from mock import Mock, MagicMock, patch
+from Bcfg2.Client.Tools.POSIX.Symlink import *
+
+# add all parent testsuite directories to sys.path to allow (most)
+# relative imports in python 2.4
+path = os.path.dirname(__file__)
+while path != "/":
+ if os.path.basename(path).lower().startswith("test"):
+ sys.path.append(path)
+ if os.path.basename(path) == "testsuite":
+ break
+ path = os.path.dirname(path)
+from Test__init import get_posix_object
+from Testbase import TestPOSIXTool
+from common import XI_NAMESPACE, XI, inPy3k, call, builtins, u, can_skip, \
+ skip, skipIf, skipUnless, Bcfg2TestCase, DBModelTestCase, syncdb, \
+ patchIf, datastore
+
+class TestPOSIXSymlink(TestPOSIXTool):
+ test_obj = POSIXSymlink
+
+ @patch("os.readlink")
+ @patch("Bcfg2.Client.Tools.POSIX.base.POSIXTool.verify")
+ def test_verify(self, mock_verify, mock_readlink):
+ entry = lxml.etree.Element("Path", name="/test", type="symlink",
+ to="/dest")
+ ptool = self.get_obj()
+
+ mock_readlink.return_value = entry.get("to")
+ mock_verify.return_value = False
+ self.assertFalse(ptool.verify(entry, []))
+ mock_readlink.assert_called_with(entry.get("name"))
+ mock_verify.assert_called_with(ptool, entry, [])
+
+ mock_readlink.reset_mock()
+ mock_verify.reset_mock()
+ mock_verify.return_value = True
+ self.assertTrue(ptool.verify(entry, []))
+ mock_readlink.assert_called_with(entry.get("name"))
+ mock_verify.assert_called_with(ptool, entry, [])
+
+ mock_readlink.reset_mock()
+ mock_verify.reset_mock()
+ mock_readlink.return_value = "/bogus"
+ self.assertFalse(ptool.verify(entry, []))
+ mock_readlink.assert_called_with(entry.get("name"))
+ mock_verify.assert_called_with(ptool, entry, [])
+
+ mock_readlink.reset_mock()
+ mock_verify.reset_mock()
+ mock_readlink.side_effect = OSError
+ self.assertFalse(ptool.verify(entry, []))
+ mock_readlink.assert_called_with(entry.get("name"))
+
+ @patch("os.symlink")
+ @patch("Bcfg2.Client.Tools.POSIX.base.POSIXTool.install")
+ @patch("Bcfg2.Client.Tools.POSIX.Symlink.%s._exists" % test_obj.__name__)
+ def test_install(self, mock_exists, mock_install, mock_symlink):
+ entry = lxml.etree.Element("Path", name="/test", type="symlink",
+ to="/dest")
+ ptool = self.get_obj()
+
+ mock_exists.return_value = False
+ mock_install.return_value = True
+ self.assertTrue(ptool.install(entry))
+ mock_exists.assert_called_with(entry, remove=True)
+ mock_symlink.assert_called_with(entry.get("to"), entry.get("name"))
+ mock_install.assert_called_with(ptool, entry)
+
+ mock_symlink.reset_mock()
+ mock_exists.reset_mock()
+ mock_install.reset_mock()
+ mock_symlink.side_effect = OSError
+ self.assertFalse(ptool.install(entry))
+ mock_exists.assert_called_with(entry, remove=True)
+ mock_symlink.assert_called_with(entry.get("to"), entry.get("name"))
+ mock_install.assert_called_with(ptool, entry)
diff --git a/testsuite/Testlib/TestClient/TestTools/TestPOSIX/Test__init.py b/testsuite/Testlib/TestClient/TestTools/TestPOSIX/Test__init.py
new file mode 100644
index 000000000..14a2520df
--- /dev/null
+++ b/testsuite/Testlib/TestClient/TestTools/TestPOSIX/Test__init.py
@@ -0,0 +1,252 @@
+import os
+import sys
+import lxml.etree
+from mock import Mock, MagicMock, patch
+import Bcfg2.Client.Tools
+import Bcfg2.Client.Tools.POSIX
+
+# add all parent testsuite directories to sys.path to allow (most)
+# relative imports in python 2.4
+path = os.path.dirname(__file__)
+while path != "/":
+ if os.path.basename(path).lower().startswith("test"):
+ sys.path.append(path)
+ if os.path.basename(path) == "testsuite":
+ break
+ path = os.path.dirname(path)
+from common import XI_NAMESPACE, XI, inPy3k, call, builtins, u, can_skip, \
+ skip, skipIf, skipUnless, Bcfg2TestCase, DBModelTestCase, syncdb, \
+ patchIf, datastore
+
+def get_config(entries):
+ config = lxml.etree.Element("Configuration")
+ bundle = lxml.etree.SubElement(config, "Bundle", name="test")
+ bundle.extend(entries)
+ return config
+
+def get_posix_object(logger=None, setup=None, config=None):
+ if config is None:
+ config = lxml.etree.Element("Configuration")
+ if not logger:
+ def print_msg(msg):
+ print(msg)
+ logger = Mock()
+ logger.error = Mock(side_effect=print_msg)
+ logger.warning = Mock(side_effect=print_msg)
+ logger.info = Mock(side_effect=print_msg)
+ logger.debug = Mock(side_effect=print_msg)
+ if not setup:
+ setup = MagicMock()
+ return Bcfg2.Client.Tools.POSIX.POSIX(logger, setup, config)
+
+
+class TestPOSIX(Bcfg2TestCase):
+ def setUp(self):
+ self.posix = get_posix_object()
+
+ def tearDown(self):
+ # just to guarantee that we start fresh each time
+ self.posix = None
+
+ def test__init(self):
+ entries = [lxml.etree.Element("Path", name="test", type="file")]
+ posix = get_posix_object(config=get_config(entries))
+ self.assertIsInstance(posix, Bcfg2.Client.Tools.Tool)
+ self.assertIsInstance(posix, Bcfg2.Client.Tools.POSIX.POSIX)
+ self.assertIn('Path', posix.__req__)
+ self.assertGreater(len(posix.__req__['Path']), 0)
+ self.assertGreater(len(posix.__handles__), 0)
+ self.assertItemsEqual(posix.handled, entries)
+
+ @patch("Bcfg2.Client.Tools.Tool.canVerify")
+ def test_canVerify(self, mock_canVerify):
+ entry = lxml.etree.Element("Path", name="test", type="file")
+
+ # first, test superclass canVerify failure
+ mock_canVerify.return_value = False
+ self.assertFalse(self.posix.canVerify(entry))
+ mock_canVerify.assert_called_with(self.posix, entry)
+
+ # next, test fully_specified failure
+ self.posix.logger.error.reset_mock()
+ mock_canVerify.reset_mock()
+ mock_canVerify.return_value = True
+ mock_fully_spec = Mock()
+ mock_fully_spec.return_value = False
+ self.posix._handlers[entry.get("type")].fully_specified = \
+ mock_fully_spec
+ self.assertFalse(self.posix.canVerify(entry))
+ mock_canVerify.assert_called_with(self.posix, entry)
+ mock_fully_spec.assert_called_with(entry)
+ self.assertTrue(self.posix.logger.error.called)
+
+ # finally, test success
+ self.posix.logger.error.reset_mock()
+ mock_canVerify.reset_mock()
+ mock_fully_spec.reset_mock()
+ mock_fully_spec.return_value = True
+ self.assertTrue(self.posix.canVerify(entry))
+ mock_canVerify.assert_called_with(self.posix, entry)
+ mock_fully_spec.assert_called_with(entry)
+ self.assertFalse(self.posix.logger.error.called)
+
+ @patch("Bcfg2.Client.Tools.Tool.canInstall")
+ def test_canInstall(self, mock_canInstall):
+ entry = lxml.etree.Element("Path", name="test", type="file")
+
+ # first, test superclass canInstall failure
+ mock_canInstall.return_value = False
+ self.assertFalse(self.posix.canInstall(entry))
+ mock_canInstall.assert_called_with(self.posix, entry)
+
+ # next, test fully_specified failure
+ self.posix.logger.error.reset_mock()
+ mock_canInstall.reset_mock()
+ mock_canInstall.return_value = True
+ mock_fully_spec = Mock()
+ mock_fully_spec.return_value = False
+ self.posix._handlers[entry.get("type")].fully_specified = \
+ mock_fully_spec
+ self.assertFalse(self.posix.canInstall(entry))
+ mock_canInstall.assert_called_with(self.posix, entry)
+ mock_fully_spec.assert_called_with(entry)
+ self.assertTrue(self.posix.logger.error.called)
+
+ # finally, test success
+ self.posix.logger.error.reset_mock()
+ mock_canInstall.reset_mock()
+ mock_fully_spec.reset_mock()
+ mock_fully_spec.return_value = True
+ self.assertTrue(self.posix.canInstall(entry))
+ mock_canInstall.assert_called_with(self.posix, entry)
+ mock_fully_spec.assert_called_with(entry)
+ self.assertFalse(self.posix.logger.error.called)
+
+ def test_InstallPath(self):
+ entry = lxml.etree.Element("Path", name="test", type="file")
+
+ mock_install = Mock()
+ mock_install.return_value = True
+ self.posix._handlers[entry.get("type")].install = mock_install
+ self.assertTrue(self.posix.InstallPath(entry))
+ mock_install.assert_called_with(entry)
+
+ def test_VerifyPath(self):
+ entry = lxml.etree.Element("Path", name="test", type="file")
+ modlist = []
+
+ mock_verify = Mock()
+ mock_verify.return_value = True
+ self.posix._handlers[entry.get("type")].verify = mock_verify
+ self.assertTrue(self.posix.VerifyPath(entry, modlist))
+ mock_verify.assert_called_with(entry, modlist)
+
+ mock_verify.reset_mock()
+ mock_verify.return_value = False
+ self.posix.setup.__getitem__.return_value = True
+ self.assertFalse(self.posix.VerifyPath(entry, modlist))
+ self.assertIsNotNone(entry.get('qtext'))
+
+ @patch('os.remove')
+ def test_prune_old_backups(self, mock_remove):
+ entry = lxml.etree.Element("Path", name="/etc/foo", type="file")
+ setup = dict(ppath='/', max_copies=5, paranoid=True)
+ posix = get_posix_object(setup=setup)
+
+ remove = ["_etc_foo_2012-07-20T04:13:22.364989",
+ "_etc_foo_2012-07-31T04:13:23.894958",
+ "_etc_foo_2012-07-17T04:13:22.493316",]
+ keep = ["_etc_foo_bar_2011-08-07T04:13:22.519978",
+ "_etc_foo_2012-08-04T04:13:22.519978",
+ "_etc_Foo_2011-08-07T04:13:22.519978",
+ "_etc_foo_2012-08-06T04:13:22.519978",
+ "_etc_foo_2012-08-03T04:13:22.191895",
+ "_etc_test_2011-08-07T04:13:22.519978",
+ "_etc_foo_2012-08-07T04:13:22.519978",]
+
+ @patch('os.listdir')
+ def inner(mock_listdir):
+ mock_listdir.side_effect = OSError
+ posix._prune_old_backups(entry)
+ self.assertTrue(posix.logger.error.called)
+ self.assertFalse(mock_remove.called)
+ mock_listdir.assert_called_with(setup['ppath'])
+
+ mock_listdir.reset_mock()
+ mock_remove.reset_mock()
+ mock_listdir.side_effect = None
+ mock_listdir.return_value = keep + remove
+
+ posix._prune_old_backups(entry)
+ mock_listdir.assert_called_with(setup['ppath'])
+ self.assertItemsEqual(mock_remove.call_args_list,
+ [call(os.path.join(setup['ppath'], p))
+ for p in remove])
+
+ mock_listdir.reset_mock()
+ mock_remove.reset_mock()
+ mock_remove.side_effect = OSError
+ posix.logger.error.reset_mock()
+ # test to ensure that we call os.remove() for all files that
+ # need to be removed even if we get an error
+ posix._prune_old_backups(entry)
+ mock_listdir.assert_called_with(setup['ppath'])
+ self.assertItemsEqual(mock_remove.call_args_list,
+ [call(os.path.join(setup['ppath'], p))
+ for p in remove])
+ self.assertTrue(posix.logger.error.called)
+
+ inner()
+
+ @patch("shutil.copy")
+ @patch("os.path.isdir")
+ @patch("Bcfg2.Client.Tools.POSIX.POSIX._prune_old_backups")
+ def test_paranoid_backup(self, mock_prune, mock_isdir, mock_copy):
+ entry = lxml.etree.Element("Path", name="/etc/foo", type="file")
+ setup = dict(ppath='/', max_copies=5, paranoid=False)
+ posix = get_posix_object(setup=setup)
+
+ # paranoid false globally
+ posix._paranoid_backup(entry)
+ self.assertFalse(mock_prune.called)
+ self.assertFalse(mock_copy.called)
+
+ # paranoid false on the entry
+ mock_prune.reset_mock()
+ setup['paranoid'] = True
+ posix = get_posix_object(setup=setup)
+ posix._paranoid_backup(entry)
+ self.assertFalse(mock_prune.called)
+ self.assertFalse(mock_copy.called)
+
+ # entry does not exist on filesystem
+ mock_prune.reset_mock()
+ entry.set("paranoid", "true")
+ entry.set("current_exists", "false")
+ posix._paranoid_backup(entry)
+ self.assertFalse(mock_prune.called)
+ self.assertFalse(mock_copy.called)
+
+ # entry is a directory on the filesystem
+ mock_prune.reset_mock()
+ entry.set("current_exists", "true")
+ mock_isdir.return_value = True
+ posix._paranoid_backup(entry)
+ self.assertFalse(mock_prune.called)
+ self.assertFalse(mock_copy.called)
+ mock_isdir.assert_called_with(entry.get("name"))
+
+ # test the actual backup now
+ mock_prune.reset_mock()
+ mock_isdir.return_value = False
+ posix._paranoid_backup(entry)
+ mock_isdir.assert_called_with(entry.get("name"))
+ mock_prune.assert_called_with(entry)
+ # it's basically impossible to test the shutil.copy() call
+ # exactly because the destination includes microseconds, so we
+ # just test it good enough
+ self.assertEqual(mock_copy.call_args[0][0],
+ entry.get("name"))
+ bkupnam = os.path.join(setup['ppath'],
+ entry.get('name').replace('/', '_')) + '_'
+ self.assertEqual(bkupnam, mock_copy.call_args[0][1][:len(bkupnam)])
diff --git a/testsuite/Testlib/TestClient/TestTools/TestPOSIX/Testbase.py b/testsuite/Testlib/TestClient/TestTools/TestPOSIX/Testbase.py
new file mode 100644
index 000000000..b447ab642
--- /dev/null
+++ b/testsuite/Testlib/TestClient/TestTools/TestPOSIX/Testbase.py
@@ -0,0 +1,991 @@
+import os
+import sys
+import copy
+import stat
+import lxml.etree
+from mock import Mock, MagicMock, patch
+import Bcfg2.Client.Tools
+from Bcfg2.Client.Tools.POSIX.base import *
+
+# add all parent testsuite directories to sys.path to allow (most)
+# relative imports in python 2.4
+path = os.path.dirname(__file__)
+while path != "/":
+ if os.path.basename(path).lower().startswith("test"):
+ sys.path.append(path)
+ if os.path.basename(path) == "testsuite":
+ break
+ path = os.path.dirname(path)
+from Test__init import get_posix_object
+from common import XI_NAMESPACE, XI, inPy3k, call, builtins, u, can_skip, \
+ skip, skipIf, skipUnless, Bcfg2TestCase, DBModelTestCase, syncdb, \
+ patchIf, datastore
+
+try:
+ import selinux
+ has_selinux = True
+except ImportError:
+ has_selinux = False
+
+try:
+ import posix1e
+ has_acls = True
+except ImportError:
+ has_acls = False
+
+class TestPOSIXTool(Bcfg2TestCase):
+ test_obj = POSIXTool
+
+ def get_obj(self, posix=None):
+ if posix is None:
+ posix = get_posix_object()
+ return self.test_obj(posix.logger, posix.setup, posix.config)
+
+ def setUp(self):
+ self.ptool = self.get_obj()
+
+ def tearDown(self):
+ # just to guarantee that we start fresh each time
+ self.ptool = None
+
+ def test_fully_specified(self):
+ # fully_specified should do no checking on the abstract
+ # POSIXTool object
+ self.assertTrue(self.ptool.fully_specified(Mock()))
+
+ @patch('os.stat')
+ @patch('os.walk')
+ @patch("Bcfg2.Client.Tools.POSIX.base.%s._verify_metadata" %
+ test_obj.__name__)
+ def test_verify(self, mock_verify, mock_walk, mock_stat):
+ entry = lxml.etree.Element("Path", name="/test", type="file")
+
+ mock_stat.return_value = MagicMock()
+ mock_verify.return_value = False
+ self.assertFalse(self.ptool.verify(entry, []))
+ mock_verify.assert_called_with(entry)
+
+ mock_verify.reset_mock()
+ mock_verify.return_value = True
+ self.assertTrue(self.ptool.verify(entry, []))
+ mock_verify.assert_called_with(entry)
+
+ mock_verify.reset_mock()
+ entry.set("recursive", "true")
+ walk_rv = [("/", ["dir1", "dir2"], ["file1", "file2"]),
+ ("/dir1", ["dir3"], []),
+ ("/dir2", [], ["file3", "file4"])]
+ mock_walk.return_value = walk_rv
+ self.assertTrue(self.ptool.verify(entry, []))
+ mock_walk.assert_called_with(entry.get("name"))
+ all_verifies = [call(entry)]
+ for root, dirs, files in walk_rv:
+ all_verifies.extend([call(entry, path=os.path.join(root, p))
+ for p in dirs + files])
+ self.assertItemsEqual(mock_verify.call_args_list, all_verifies)
+
+ @patch('os.walk')
+ @patch("Bcfg2.Client.Tools.POSIX.base.%s._set_perms" % test_obj.__name__)
+ def test_install(self, mock_set_perms, mock_walk):
+ entry = lxml.etree.Element("Path", name="/test", type="file")
+
+ mock_set_perms.return_value = True
+ self.assertTrue(self.ptool.install(entry))
+ mock_set_perms.assert_called_with(entry)
+
+ mock_set_perms.reset_mock()
+ entry.set("recursive", "true")
+ walk_rv = [("/", ["dir1", "dir2"], ["file1", "file2"]),
+ ("/dir1", ["dir3"], []),
+ ("/dir2", [], ["file3", "file4"])]
+ mock_walk.return_value = walk_rv
+
+ mock_set_perms.return_value = True
+ self.assertTrue(self.ptool.install(entry))
+ mock_walk.assert_called_with(entry.get("name"))
+ all_set_perms = [call(entry)]
+ for root, dirs, files in walk_rv:
+ all_set_perms.extend([call(entry, path=os.path.join(root, p))
+ for p in dirs + files])
+ self.assertItemsEqual(mock_set_perms.call_args_list,
+ all_set_perms)
+
+ mock_walk.reset_mock()
+ mock_set_perms.reset_mock()
+
+ def set_perms_rv(entry, path=None):
+ if path == '/dir2/file3':
+ return False
+ else:
+ return True
+ mock_set_perms.side_effect = set_perms_rv
+
+ self.assertFalse(self.ptool.install(entry))
+ mock_walk.assert_called_with(entry.get("name"))
+ self.assertItemsEqual(mock_set_perms.call_args_list, all_set_perms)
+
+ @patch('os.lstat')
+ @patch("os.unlink")
+ @patch("os.path.isdir")
+ @patch("shutil.rmtree")
+ def test_exists(self, mock_rmtree, mock_isdir, mock_unlink, mock_lstat):
+ entry = lxml.etree.Element("Path", name="/etc/foo", type="file")
+
+ mock_lstat.side_effect = OSError
+ self.assertFalse(self.ptool._exists(entry))
+ mock_lstat.assert_called_with(entry.get('name'))
+ self.assertFalse(mock_unlink.called)
+
+ mock_lstat.reset_mock()
+ mock_unlink.reset_mock()
+ rv = MagicMock()
+ mock_lstat.return_value = rv
+ mock_lstat.side_effect = None
+ self.assertEqual(self.ptool._exists(entry), rv)
+ mock_lstat.assert_called_with(entry.get('name'))
+ self.assertFalse(mock_unlink.called)
+
+ mock_lstat.reset_mock()
+ mock_unlink.reset_mock()
+ mock_isdir.return_value = False
+ self.assertFalse(self.ptool._exists(entry, remove=True))
+ mock_isdir.assert_called_with(entry.get('name'))
+ mock_lstat.assert_called_with(entry.get('name'))
+ mock_unlink.assert_called_with(entry.get('name'))
+ self.assertFalse(mock_rmtree.called)
+
+ mock_lstat.reset_mock()
+ mock_isdir.reset_mock()
+ mock_unlink.reset_mock()
+ mock_rmtree.reset_mock()
+ mock_isdir.return_value = True
+ self.assertFalse(self.ptool._exists(entry, remove=True))
+ mock_isdir.assert_called_with(entry.get('name'))
+ mock_lstat.assert_called_with(entry.get('name'))
+ mock_rmtree.assert_called_with(entry.get('name'))
+ self.assertFalse(mock_unlink.called)
+
+ mock_isdir.reset_mock()
+ mock_lstat.reset_mock()
+ mock_unlink.reset_mock()
+ mock_rmtree.reset_mock()
+ mock_rmtree.side_effect = OSError
+ self.assertEqual(self.ptool._exists(entry, remove=True), rv)
+ mock_isdir.assert_called_with(entry.get('name'))
+ mock_lstat.assert_called_with(entry.get('name'))
+ mock_rmtree.assert_called_with(entry.get('name'))
+ self.assertFalse(mock_unlink.called)
+
+ @patch("os.chown")
+ @patch("os.chmod")
+ @patch("os.utime")
+ @patch("Bcfg2.Client.Tools.POSIX.base.%s._norm_entry_uid" %
+ test_obj.__name__)
+ @patch("Bcfg2.Client.Tools.POSIX.base.%s._norm_entry_gid" %
+ test_obj.__name__)
+ @patch("Bcfg2.Client.Tools.POSIX.base.%s._set_acls" % test_obj.__name__)
+ @patch("Bcfg2.Client.Tools.POSIX.base.%s._set_secontext" %
+ test_obj.__name__)
+ def test_set_perms(self, mock_set_secontext, mock_set_acls, mock_norm_gid,
+ mock_norm_uid, mock_utime, mock_chmod, mock_chown):
+ def reset():
+ mock_set_secontext.reset_mock()
+ mock_set_acls.reset_mock()
+ mock_norm_gid.reset_mock()
+ mock_norm_uid.reset_mock()
+ mock_chmod.reset_mock()
+ mock_chown.reset_mock()
+ mock_utime.reset_mock()
+
+ entry = lxml.etree.Element("Path", name="/etc/foo", to="/etc/bar",
+ type="symlink")
+ mock_set_acls.return_value = True
+ mock_set_secontext.return_value = True
+ self.assertTrue(self.ptool._set_perms(entry))
+ mock_set_secontext.assert_called_with(entry, path=entry.get("name"))
+ mock_set_acls.assert_called_with(entry, path=entry.get("name"))
+
+ entry = lxml.etree.Element("Path", name="/etc/foo", owner="owner",
+ group="group", perms="644", type="file")
+ mock_norm_uid.return_value = 10
+ mock_norm_gid.return_value = 100
+
+ reset()
+ self.assertTrue(self.ptool._set_perms(entry))
+ mock_norm_uid.assert_called_with(entry)
+ mock_norm_gid.assert_called_with(entry)
+ mock_chown.assert_called_with(entry.get("name"), 10, 100)
+ mock_chmod.assert_called_with(entry.get("name"),
+ int(entry.get("perms"), 8))
+ self.assertFalse(mock_utime.called)
+ mock_set_secontext.assert_called_with(entry, path=entry.get("name"))
+ mock_set_acls.assert_called_with(entry, path=entry.get("name"))
+
+ reset()
+ mtime = 1344459042
+ entry.set("mtime", str(mtime))
+ self.assertTrue(self.ptool._set_perms(entry))
+ mock_norm_uid.assert_called_with(entry)
+ mock_norm_gid.assert_called_with(entry)
+ mock_chown.assert_called_with(entry.get("name"), 10, 100)
+ mock_chmod.assert_called_with(entry.get("name"),
+ int(entry.get("perms"), 8))
+ mock_utime.assert_called_with(entry.get("name"), (mtime, mtime))
+ mock_set_secontext.assert_called_with(entry, path=entry.get("name"))
+ mock_set_acls.assert_called_with(entry, path=entry.get("name"))
+
+ reset()
+ self.assertTrue(self.ptool._set_perms(entry, path='/etc/bar'))
+ mock_norm_uid.assert_called_with(entry)
+ mock_norm_gid.assert_called_with(entry)
+ mock_chown.assert_called_with('/etc/bar', 10, 100)
+ mock_chmod.assert_called_with('/etc/bar', int(entry.get("perms"), 8))
+ mock_utime.assert_called_with(entry.get("name"), (mtime, mtime))
+ mock_set_secontext.assert_called_with(entry, path='/etc/bar')
+ mock_set_acls.assert_called_with(entry, path='/etc/bar')
+
+ # test dev_type modification of perms, failure of chown
+ reset()
+ def chown_rv(path, owner, group):
+ if owner == 0 and group == 0:
+ return True
+ else:
+ raise KeyError
+ os.chown.side_effect = chown_rv
+ entry.set("type", "device")
+ entry.set("dev_type", list(device_map.keys())[0])
+ self.assertFalse(self.ptool._set_perms(entry))
+ mock_norm_uid.assert_called_with(entry)
+ mock_norm_gid.assert_called_with(entry)
+ mock_chown.assert_called_with(entry.get("name"), 0, 0)
+ mock_chmod.assert_called_with(entry.get("name"),
+ int(entry.get("perms"), 8) | list(device_map.values())[0])
+ mock_utime.assert_called_with(entry.get("name"), (mtime, mtime))
+ mock_set_secontext.assert_called_with(entry, path=entry.get("name"))
+ mock_set_acls.assert_called_with(entry, path=entry.get("name"))
+
+ # test failure of chmod
+ reset()
+ os.chown.side_effect = None
+ os.chmod.side_effect = OSError
+ entry.set("type", "file")
+ del entry.attrib["dev_type"]
+ self.assertFalse(self.ptool._set_perms(entry))
+ mock_norm_uid.assert_called_with(entry)
+ mock_norm_gid.assert_called_with(entry)
+ mock_chown.assert_called_with(entry.get("name"), 10, 100)
+ mock_chmod.assert_called_with(entry.get("name"),
+ int(entry.get("perms"), 8))
+ mock_utime.assert_called_with(entry.get("name"), (mtime, mtime))
+ mock_set_secontext.assert_called_with(entry, path=entry.get("name"))
+ mock_set_acls.assert_called_with(entry, path=entry.get("name"))
+
+ # test that even when everything fails, we try to do it all.
+ # e.g., when chmod fails, we still try to apply acls, set
+ # selinux context, etc.
+ reset()
+ os.chown.side_effect = OSError
+ os.utime.side_effect = OSError
+ mock_set_acls.return_value = False
+ mock_set_secontext.return_value = False
+ self.assertFalse(self.ptool._set_perms(entry))
+ mock_norm_uid.assert_called_with(entry)
+ mock_norm_gid.assert_called_with(entry)
+ mock_chown.assert_called_with(entry.get("name"), 10, 100)
+ mock_chmod.assert_called_with(entry.get("name"),
+ int(entry.get("perms"), 8))
+ mock_utime.assert_called_with(entry.get("name"), (mtime, mtime))
+ mock_set_secontext.assert_called_with(entry, path=entry.get("name"))
+ mock_set_acls.assert_called_with(entry, path=entry.get("name"))
+
+ @skipUnless(has_acls, "ACLS not found, skipping")
+ @patchIf(has_acls, "posix1e.ACL")
+ @patchIf(has_acls, "posix1e.Entry")
+ @patch("os.path.isdir")
+ @patch("Bcfg2.Client.Tools.POSIX.base.%s._norm_uid" % test_obj.__name__)
+ @patch("Bcfg2.Client.Tools.POSIX.base.%s._norm_gid" % test_obj.__name__)
+ @patch("Bcfg2.Client.Tools.POSIX.base.%s._list_entry_acls" %
+ test_obj.__name__)
+ def test_set_acls(self, mock_list_entry_acls, mock_norm_gid, mock_norm_uid,
+ mock_isdir, mock_Entry, mock_ACL):
+ entry = lxml.etree.Element("Path", name="/etc/foo", type="file")
+
+ # disable acls for the initial test
+ Bcfg2.Client.Tools.POSIX.base.has_acls = False
+ self.assertTrue(self.ptool._set_acls(entry))
+ Bcfg2.Client.Tools.POSIX.base.has_acls = True
+
+ # build a set of file ACLs to return from posix1e.ACL(file=...)
+ file_acls = []
+ acl = Mock()
+ acl.tag_type = posix1e.ACL_USER
+ acl.name = "remove"
+ file_acls.append(acl)
+ acl = Mock()
+ acl.tag_type = posix1e.ACL_GROUP
+ acl.name = "remove"
+ file_acls.append(acl)
+ acl = Mock()
+ acl.tag_type = posix1e.ACL_MASK
+ acl.name = "keep"
+ file_acls.append(acl)
+ remove_acls = [a for a in file_acls if a.name == "remove"]
+
+ # build a set of ACLs listed on the entry as returned by
+ # _list_entry_acls()
+ entry_acls = {("default", posix1e.ACL_USER, "user"): 7,
+ ("access", posix1e.ACL_GROUP, "group"): 5}
+ mock_list_entry_acls.return_value = entry_acls
+ mock_norm_uid.return_value = 10
+ mock_norm_gid.return_value = 100
+
+ # set up the unreasonably complex return value for
+ # posix1e.ACL(), which has three separate uses
+ fileacl_rv = MagicMock()
+ fileacl_rv.valid.return_value = True
+ fileacl_rv.__iter__.return_value = iter(file_acls)
+ filedef_rv = MagicMock()
+ filedef_rv.valid.return_value = True
+ filedef_rv.__iter__.return_value = iter(file_acls)
+ acl_rv = MagicMock()
+ def mock_acl_rv(file=None, filedef=None, acl=None):
+ if file:
+ return fileacl_rv
+ elif filedef:
+ return filedef_rv
+ elif acl:
+ return acl_rv
+
+ # set up the equally unreasonably complex return value for
+ # posix1e.Entry, which returns a new entry and adds it to
+ # an ACL, so we have to track the Mock objects it returns.
+ # why can't they just have an acl.add_entry() method?!?
+ acl_entries = []
+ def mock_entry_rv(acl):
+ rv = MagicMock()
+ rv.acl = acl
+ rv.permset = set()
+ acl_entries.append(rv)
+ return rv
+ mock_Entry.side_effect = mock_entry_rv
+
+ def reset():
+ mock_isdir.reset_mock()
+ mock_ACL.reset_mock()
+ mock_Entry.reset_mock()
+ fileacl_rv.reset_mock()
+
+ # test fs mounted noacl
+ mock_ACL.side_effect = IOError(95, "Operation not permitted")
+ self.assertFalse(self.ptool._set_acls(entry))
+
+ # test other error
+ reset()
+ mock_ACL.side_effect = IOError
+ self.assertFalse(self.ptool._set_acls(entry))
+
+ reset()
+ mock_ACL.side_effect = mock_acl_rv
+ mock_isdir.return_value = True
+ self.assertTrue(self.ptool._set_acls(entry))
+ self.assertItemsEqual(mock_ACL.call_args_list,
+ [call(file=entry.get("name")),
+ call(filedef=entry.get("name"))])
+ self.assertItemsEqual(fileacl_rv.delete_entry.call_args_list,
+ [call(a) for a in remove_acls])
+ self.assertItemsEqual(filedef_rv.delete_entry.call_args_list,
+ [call(a) for a in remove_acls])
+ mock_list_entry_acls.assert_called_with(entry)
+ mock_norm_uid.assert_called_with("user")
+ mock_norm_gid.assert_called_with("group")
+ fileacl_rv.calc_mask.assert_any_call()
+ fileacl_rv.applyto.assert_called_with(entry.get("name"),
+ posix1e.ACL_TYPE_ACCESS)
+ filedef_rv.calc_mask.assert_any_call()
+ filedef_rv.applyto.assert_called_with(entry.get("name"),
+ posix1e.ACL_TYPE_DEFAULT)
+
+ # build tuples of the Entry objects that were added to acl
+ # and defaacl so they're easier to compare for equality
+ added_acls = []
+ for acl in acl_entries:
+ added_acls.append((acl.acl, acl.tag_type, acl.qualifier,
+ sum(acl.permset)))
+ self.assertItemsEqual(added_acls,
+ [(filedef_rv, posix1e.ACL_USER, 10, 7),
+ (fileacl_rv, posix1e.ACL_GROUP, 100, 5)])
+
+ reset()
+ # have to reassign these because they're iterators, and
+ # they've already been iterated over once
+ fileacl_rv.__iter__.return_value = iter(file_acls)
+ filedef_rv.__iter__.return_value = iter(file_acls)
+ mock_list_entry_acls.reset_mock()
+ mock_norm_uid.reset_mock()
+ mock_norm_gid.reset_mock()
+ mock_isdir.return_value = False
+ acl_entries = []
+ self.assertTrue(self.ptool._set_acls(entry, path="/bin/bar"))
+ mock_ACL.assert_called_with(file="/bin/bar")
+ self.assertItemsEqual(fileacl_rv.delete_entry.call_args_list,
+ [call(a) for a in remove_acls])
+ mock_list_entry_acls.assert_called_with(entry)
+ mock_norm_gid.assert_called_with("group")
+ fileacl_rv.calc_mask.assert_any_call()
+ fileacl_rv.applyto.assert_called_with("/bin/bar",
+ posix1e.ACL_TYPE_ACCESS)
+
+ added_acls = []
+ for acl in acl_entries:
+ added_acls.append((acl.acl, acl.tag_type, acl.qualifier,
+ sum(acl.permset)))
+ self.assertItemsEqual(added_acls,
+ [(fileacl_rv, posix1e.ACL_GROUP, 100, 5)])
+
+ @skipUnless(has_selinux, "SELinux not found, skipping")
+ @patchIf(has_selinux, "selinux.restorecon")
+ @patchIf(has_selinux, "selinux.lsetfilecon")
+ def test_set_secontext(self, mock_lsetfilecon, mock_restorecon):
+ entry = lxml.etree.Element("Path", name="/etc/foo", type="file")
+
+ # disable selinux for the initial test
+ Bcfg2.Client.Tools.POSIX.base.has_selinux = False
+ self.assertTrue(self.ptool._set_secontext(entry))
+ Bcfg2.Client.Tools.POSIX.base.has_selinux = True
+
+ # no context given
+ self.assertTrue(self.ptool._set_secontext(entry))
+ self.assertFalse(mock_restorecon.called)
+ self.assertFalse(mock_lsetfilecon.called)
+
+ mock_restorecon.reset_mock()
+ mock_lsetfilecon.reset_mock()
+ entry.set("secontext", "__default__")
+ self.assertTrue(self.ptool._set_secontext(entry))
+ mock_restorecon.assert_called_with(entry.get("name"))
+ self.assertFalse(mock_lsetfilecon.called)
+
+ mock_restorecon.reset_mock()
+ mock_lsetfilecon.reset_mock()
+ mock_lsetfilecon.return_value = 0
+ entry.set("secontext", "foo_t")
+ self.assertTrue(self.ptool._set_secontext(entry))
+ self.assertFalse(mock_restorecon.called)
+ mock_lsetfilecon.assert_called_with(entry.get("name"), "foo_t")
+
+ mock_restorecon.reset_mock()
+ mock_lsetfilecon.reset_mock()
+ mock_lsetfilecon.return_value = 1
+ self.assertFalse(self.ptool._set_secontext(entry))
+ self.assertFalse(mock_restorecon.called)
+ mock_lsetfilecon.assert_called_with(entry.get("name"), "foo_t")
+
+ @patch("grp.getgrnam")
+ def test_norm_gid(self, mock_getgrnam):
+ self.assertEqual(5, self.ptool._norm_gid("5"))
+ self.assertFalse(mock_getgrnam.called)
+
+ mock_getgrnam.reset_mock()
+ mock_getgrnam.return_value = ("group", "x", 5, [])
+ self.assertEqual(5, self.ptool._norm_gid("group"))
+ mock_getgrnam.assert_called_with("group")
+
+ @patch("Bcfg2.Client.Tools.POSIX.base.%s._norm_gid" % test_obj.__name__)
+ def test_norm_entry_gid(self, mock_norm_gid):
+ entry = lxml.etree.Element("Path", name="/test", type="file",
+ group="group", owner="user")
+ mock_norm_gid.return_value = 10
+ self.assertEqual(10, self.ptool._norm_entry_gid(entry))
+ mock_norm_gid.assert_called_with(entry.get("group"))
+
+ mock_norm_gid.reset_mock()
+ mock_norm_gid.side_effect = KeyError
+ self.assertEqual(0, self.ptool._norm_entry_gid(entry))
+ mock_norm_gid.assert_called_with(entry.get("group"))
+
+ @patch("pwd.getpwnam")
+ def test_norm_uid(self, mock_getpwnam):
+ self.assertEqual(5, self.ptool._norm_uid("5"))
+ self.assertFalse(mock_getpwnam.called)
+
+ mock_getpwnam.reset_mock()
+ mock_getpwnam.return_value = ("user", "x", 5, 5, "User", "/home/user",
+ "/bin/zsh")
+ self.assertEqual(5, self.ptool._norm_uid("user"))
+ mock_getpwnam.assert_called_with("user")
+
+ @patch("Bcfg2.Client.Tools.POSIX.base.%s._norm_uid" % test_obj.__name__)
+ def test_norm_entry_uid(self, mock_norm_uid):
+ entry = lxml.etree.Element("Path", name="/test", type="file",
+ group="group", owner="user")
+ mock_norm_uid.return_value = 10
+ self.assertEqual(10, self.ptool._norm_entry_uid(entry))
+ mock_norm_uid.assert_called_with(entry.get("owner"))
+
+ mock_norm_uid.reset_mock()
+ mock_norm_uid.side_effect = KeyError
+ self.assertEqual(0, self.ptool._norm_entry_uid(entry))
+ mock_norm_uid.assert_called_with(entry.get("owner"))
+
+ def test_norm_acl_perms(self):
+ # there's basically no reasonably way to test the Permset
+ # object parsing feature without writing our own Mock object
+ # that re-implements Permset.test(). silly pylibacl won't let
+ # us create standalone Entry or Permset objects.
+ self.assertEqual(5, self.ptool._norm_acl_perms("5"))
+ self.assertEqual(0, self.ptool._norm_acl_perms("55"))
+ self.assertEqual(5, self.ptool._norm_acl_perms("rx"))
+ self.assertEqual(5, self.ptool._norm_acl_perms("r-x"))
+ self.assertEqual(6, self.ptool._norm_acl_perms("wr-"))
+ self.assertEqual(0, self.ptool._norm_acl_perms("rwrw"))
+ self.assertEqual(0, self.ptool._norm_acl_perms("-"))
+ self.assertEqual(0, self.ptool._norm_acl_perms("a"))
+ self.assertEqual(6, self.ptool._norm_acl_perms("rwa"))
+ self.assertEqual(4, self.ptool._norm_acl_perms("rr"))
+
+ @patch('os.stat')
+ def test__gather_data(self, mock_stat):
+ path = '/test'
+ mock_stat.side_effect = OSError
+ self.assertFalse(self.ptool._gather_data(path)[0])
+ mock_stat.assert_called_with(path)
+
+ mock_stat.reset_mock()
+ mock_stat.side_effect = None
+ # create a return value
+ stat_rv = MagicMock()
+ def stat_getitem(key):
+ if int(key) == stat.ST_UID:
+ return 0
+ elif int(key) == stat.ST_GID:
+ return 10
+ elif int(key) == stat.ST_MODE:
+ # return extra bits in the mode to emulate a device
+ # and ensure that they're stripped
+ return int('060660', 8)
+ stat_rv.__getitem__ = Mock(side_effect=stat_getitem)
+ mock_stat.return_value = stat_rv
+
+ # disable selinux and acls for this call -- we test them
+ # separately so that we can skip those tests as appropriate
+ states = (Bcfg2.Client.Tools.POSIX.base.has_selinux,
+ Bcfg2.Client.Tools.POSIX.base.has_acls)
+ Bcfg2.Client.Tools.POSIX.base.has_selinux = False
+ Bcfg2.Client.Tools.POSIX.base.has_acls = False
+ self.assertEqual(self.ptool._gather_data(path),
+ (stat_rv, '0', '10', '0660', None, None))
+ Bcfg2.Client.Tools.POSIX.base.has_selinux, \
+ Bcfg2.Client.Tools.POSIX.base.has_acls = states
+ mock_stat.assert_called_with(path)
+
+ @skipUnless(has_selinux, "SELinux not found, skipping")
+ def test__gather_data_selinux(self):
+ context = 'system_u:object_r:root_t:s0'
+ path = '/test'
+
+ @patch('os.stat')
+ @patchIf(has_selinux, "selinux.getfilecon")
+ def inner(mock_getfilecon, mock_stat):
+ mock_getfilecon.return_value = [len(context) + 1, context]
+ mock_stat.return_value = MagicMock()
+ # disable acls for this call and test them separately
+ state = Bcfg2.Client.Tools.POSIX.base.has_acls
+ Bcfg2.Client.Tools.POSIX.base.has_acls = False
+ self.assertEqual(self.ptool._gather_data(path)[4], 'root_t')
+ Bcfg2.Client.Tools.POSIX.base.has_acls = state
+ mock_getfilecon.assert_called_with(path)
+
+ inner()
+
+ @skipUnless(has_acls, "ACLS not found, skipping")
+ @patch('os.stat')
+ @patch("Bcfg2.Client.Tools.POSIX.base.%s._list_file_acls" %
+ test_obj.__name__)
+ def test__gather_data_acls(self, mock_list_file_acls, mock_stat):
+ acls = {("default", posix1e.ACL_USER, "testuser"): "rwx",
+ ("access", posix1e.ACL_GROUP, "testgroup"): "rx"}
+ mock_list_file_acls.return_value = acls
+ path = '/test'
+ mock_stat.return_value = MagicMock()
+ # disable selinux for this call and test it separately
+ state = Bcfg2.Client.Tools.POSIX.base.has_selinux
+ Bcfg2.Client.Tools.POSIX.base.has_selinux = False
+ self.assertItemsEqual(self.ptool._gather_data(path)[5], acls)
+ Bcfg2.Client.Tools.POSIX.base.has_selinux = state
+ mock_list_file_acls.assert_called_with(path)
+
+ @patchIf(has_selinux, "selinux.matchpathcon")
+ @patch("Bcfg2.Client.Tools.POSIX.base.%s._verify_acls" % test_obj.__name__)
+ @patch("Bcfg2.Client.Tools.POSIX.base.%s._gather_data" % test_obj.__name__)
+ @patch("Bcfg2.Client.Tools.POSIX.base.%s._norm_entry_uid" %
+ test_obj.__name__)
+ @patch("Bcfg2.Client.Tools.POSIX.base.%s._norm_entry_gid" %
+ test_obj.__name__)
+ def test_verify_metadata(self, mock_norm_gid, mock_norm_uid,
+ mock_gather_data, mock_verify_acls,
+ mock_matchpathcon):
+ entry = lxml.etree.Element("Path", name="/test", type="file",
+ group="group", owner="user", perms="664",
+ secontext='etc_t')
+ # _verify_metadata() mutates the entry, so we keep a backup so we
+ # can start fresh every time
+ orig_entry = copy.deepcopy(entry)
+
+ def reset():
+ mock_gather_data.reset_mock()
+ mock_verify_acls.reset_mock()
+ mock_norm_uid.reset_mock()
+ mock_norm_gid.reset_mock()
+ return copy.deepcopy(orig_entry)
+
+ # test nonexistent file
+ mock_gather_data.return_value = (False, None, None, None, None, None)
+ self.assertFalse(self.ptool._verify_metadata(entry))
+ self.assertEqual(entry.get("current_exists", "").lower(), "false")
+ mock_gather_data.assert_called_with(entry.get("name"))
+
+ # expected data. tuple of attr, return value index, value
+ expected = [('current_owner', 1, '0'),
+ ('current_group', 2, '10'),
+ ('current_perms', 3, '0664'),
+ ('current_secontext', 4, 'etc_t')]
+ mock_norm_uid.return_value = 0
+ mock_norm_gid.return_value = 10
+ gather_data_rv = [MagicMock(), None, None, None, None, []]
+ for attr, idx, val in expected:
+ gather_data_rv[idx] = val
+
+ entry = reset()
+ mock_gather_data.return_value = tuple(gather_data_rv)
+ self.assertTrue(self.ptool._verify_metadata(entry))
+ mock_gather_data.assert_called_with(entry.get("name"))
+ mock_verify_acls.assert_called_with(entry, path=entry.get("name"))
+ self.assertEqual(entry.get("current_exists", 'true'), 'true')
+ for attr, idx, val in expected:
+ self.assertEqual(entry.get(attr), val)
+
+ # test when secontext is None
+ entry = reset()
+ gather_data_rv[4] = None
+ sestate = Bcfg2.Client.Tools.POSIX.base.has_selinux
+ Bcfg2.Client.Tools.POSIX.base.has_selinux = False
+ mock_gather_data.return_value = tuple(gather_data_rv)
+ self.assertTrue(self.ptool._verify_metadata(entry))
+ mock_gather_data.assert_called_with(entry.get("name"))
+ mock_verify_acls.assert_called_with(entry, path=entry.get("name"))
+ self.assertEqual(entry.get("current_exists", 'true'), 'true')
+ for attr, idx, val in expected:
+ if attr != 'current_secontext':
+ self.assertEqual(entry.get(attr), val)
+ Bcfg2.Client.Tools.POSIX.base.has_selinux = sestate
+
+ gather_data_rv = [MagicMock(), None, None, None, None, []]
+ for attr, idx, val in expected:
+ gather_data_rv[idx] = val
+ mock_gather_data.return_value = tuple(gather_data_rv)
+
+ mtime = 1344430414
+ entry = reset()
+ entry.set("mtime", str(mtime))
+ stat_rv = MagicMock()
+ stat_rv.__getitem__.return_value = mtime
+ gather_data_rv[0] = stat_rv
+ mock_gather_data.return_value = tuple(gather_data_rv)
+ self.assertTrue(self.ptool._verify_metadata(entry))
+ mock_gather_data.assert_called_with(entry.get("name"))
+ mock_verify_acls.assert_called_with(entry, path=entry.get("name"))
+ self.assertEqual(entry.get("current_exists", 'true'), 'true')
+ for attr, idx, val in expected:
+ self.assertEqual(entry.get(attr), val)
+ self.assertEqual(entry.get("current_mtime"), str(mtime))
+
+ # failure modes for each checked datum. tuple of changed attr,
+ # return value index, new (failing) value
+ failures = [('current_owner', 1, '10'),
+ ('current_group', 2, '100'),
+ ('current_perms', 3, '0660')]
+ if has_selinux:
+ failures.append(('current_secontext', 4, 'root_t'))
+
+ for fail_attr, fail_idx, fail_val in failures:
+ entry = reset()
+ entry.set("mtime", str(mtime))
+ gather_data_rv = [stat_rv, None, None, None, None, []]
+ for attr, idx, val in expected:
+ gather_data_rv[idx] = val
+ gather_data_rv[fail_idx] = fail_val
+ mock_gather_data.return_value = tuple(gather_data_rv)
+ self.assertFalse(self.ptool._verify_metadata(entry))
+ mock_gather_data.assert_called_with(entry.get("name"))
+ mock_verify_acls.assert_called_with(entry, path=entry.get("name"))
+ self.assertEqual(entry.get("current_exists", 'true'), 'true')
+ self.assertEqual(entry.get(fail_attr), fail_val)
+ for attr, idx, val in expected:
+ if attr != fail_attr:
+ self.assertEqual(entry.get(attr), val)
+ self.assertEqual(entry.get("current_mtime"), str(mtime))
+
+ # failure mode for mtime
+ fail_mtime = 1344431162
+ entry = reset()
+ entry.set("mtime", str(mtime))
+ fail_stat_rv = MagicMock()
+ fail_stat_rv.__getitem__.return_value = fail_mtime
+ gather_data_rv = [fail_stat_rv, None, None, None, None, []]
+ for attr, idx, val in expected:
+ gather_data_rv[idx] = val
+ mock_gather_data.return_value = tuple(gather_data_rv)
+ self.assertFalse(self.ptool._verify_metadata(entry))
+ mock_gather_data.assert_called_with(entry.get("name"))
+ mock_verify_acls.assert_called_with(entry, path=entry.get("name"))
+ self.assertEqual(entry.get("current_exists", 'true'), 'true')
+ for attr, idx, val in expected:
+ self.assertEqual(entry.get(attr), val)
+ self.assertEqual(entry.get("current_mtime"), str(fail_mtime))
+
+ if has_selinux:
+ # test success and failure for __default__ secontext
+ entry = reset()
+ entry.set("mtime", str(mtime))
+ entry.set("secontext", "__default__")
+
+ context1 = "system_u:object_r:etc_t:s0"
+ context2 = "system_u:object_r:root_t:s0"
+ mock_matchpathcon.return_value = [1 + len(context1),
+ context1]
+ gather_data_rv = [stat_rv, None, None, None, None, []]
+ for attr, idx, val in expected:
+ gather_data_rv[idx] = val
+ mock_gather_data.return_value = tuple(gather_data_rv)
+ self.assertTrue(self.ptool._verify_metadata(entry))
+ mock_gather_data.assert_called_with(entry.get("name"))
+ mock_verify_acls.assert_called_with(entry,
+ path=entry.get("name"))
+ mock_matchpathcon.assert_called_with(entry.get("name"), 0)
+ self.assertEqual(entry.get("current_exists", 'true'), 'true')
+ for attr, idx, val in expected:
+ self.assertEqual(entry.get(attr), val)
+ self.assertEqual(entry.get("current_mtime"), str(mtime))
+
+ entry = reset()
+ entry.set("mtime", str(mtime))
+ entry.set("secontext", "__default__")
+ mock_matchpathcon.return_value = [1 + len(context2),
+ context2]
+ self.assertFalse(self.ptool._verify_metadata(entry))
+ mock_gather_data.assert_called_with(entry.get("name"))
+ mock_verify_acls.assert_called_with(entry,
+ path=entry.get("name"))
+ mock_matchpathcon.assert_called_with(entry.get("name"), 0)
+ self.assertEqual(entry.get("current_exists", 'true'), 'true')
+ for attr, idx, val in expected:
+ self.assertEqual(entry.get(attr), val)
+ self.assertEqual(entry.get("current_mtime"), str(mtime))
+
+ @skipUnless(has_acls, "ACLS not found, skipping")
+ def test_list_entry_acls(self):
+ entry = lxml.etree.Element("Path", name="/test", type="file")
+ lxml.etree.SubElement(entry, "ACL", scope="user", type="default",
+ user="user", perms="rwx")
+ lxml.etree.SubElement(entry, "ACL", scope="group", type="access",
+ group="group", perms="5")
+ self.assertItemsEqual(self.ptool._list_entry_acls(entry),
+ {("default", posix1e.ACL_USER, "user"): 7,
+ ("access", posix1e.ACL_GROUP, "group"): 5})
+
+ @skipUnless(has_acls, "ACLS not found, skipping")
+ @patch("pwd.getpwuid")
+ @patch("grp.getgrgid")
+ @patch("os.path.isdir")
+ def test_list_file_acls(self, mock_isdir, mock_getgrgid, mock_getpwuid,
+ mock_ACL):
+ path = '/test'
+
+ # build a set of file ACLs to return from posix1e.ACL(file=...)
+ file_acls = []
+ acl = Mock()
+ acl.tag_type = posix1e.ACL_USER
+ acl.qualifier = 10
+ # yes, this is a bogus permset. thanks to _norm_acl_perms
+ # it works and is easier than many of the alternatives.
+ acl.permset = 'rwx'
+ file_acls.append(acl)
+ acl = Mock()
+ acl.tag_type = posix1e.ACL_GROUP
+ acl.qualifier = 100
+ acl.permset = 'rx'
+ file_acls.append(acl)
+ acl = Mock()
+ acl.tag_type = posix1e.ACL_MASK
+ file_acls.append(acl)
+ acls = {("access", posix1e.ACL_USER, "user"): 7,
+ ("access", posix1e.ACL_GROUP, "group"): 5}
+
+ # set up the unreasonably complex return value for
+ # posix1e.ACL(), which has two separate uses
+ fileacl_rv = MagicMock()
+ fileacl_rv.valid.return_value = True
+ fileacl_rv.__iter__.return_value = iter(file_acls)
+ filedef_rv = MagicMock()
+ filedef_rv.valid.return_value = True
+ filedef_rv.__iter__.return_value = iter(file_acls)
+ def mock_acl_rv(file=None, filedef=None):
+ if file:
+ return fileacl_rv
+ elif filedef:
+ return filedef_rv
+ # other return values
+ mock_isdir.return_value = False
+ mock_getgrgid.return_value = ("group", "x", 5, [])
+ mock_getpwuid.return_value = ("user", "x", 5, 5, "User",
+ "/home/user", "/bin/zsh")
+
+ def reset():
+ mock_isdir.reset_mock()
+ mock_getgrgid.reset_mock()
+ mock_getpwuid.reset_mock()
+ mock_ACL.reset_mock()
+
+ mock_ACL.side_effect = IOError(95, "Operation not supported")
+ self.assertItemsEqual(self.ptool._list_file_acls(path), dict())
+
+ reset()
+ mock_ACL.side_effect = IOError
+ self.assertItemsEqual(self.ptool._list_file_acls(path), dict())
+
+ reset()
+ mock_ACL.side_effect = mock_acl_rv
+ self.assertItemsEqual(self.ptool._list_file_acls(path), acls)
+ mock_isdir.assert_called_with(path)
+ mock_getgrgid.assert_called_with(100)
+ mock_getpwuid.assert_called_with(10)
+ mock_ACL.assert_called_with(file=path)
+
+ reset()
+ mock_isdir.return_value = True
+ fileacl_rv.__iter__.return_value = iter(file_acls)
+ filedef_rv.__iter__.return_value = iter(file_acls)
+
+ defacls = acls
+ for akey, perms in acls.items():
+ defacls[('default', akey[1], akey[2])] = perms
+ self.assertItemsEqual(self.ptool._list_file_acls(path), defacls)
+ mock_isdir.assert_called_with(path)
+ self.assertItemsEqual(mock_getgrgid.call_args_list,
+ [call(100), call(100)])
+ self.assertItemsEqual(mock_getpwuid.call_args_list,
+ [call(10), call(10)])
+ self.assertItemsEqual(mock_ACL.call_args_list,
+ [call(file=path), call(filedef=path)])
+
+ if has_acls:
+ # python 2.6 applies decorators at compile-time, not at
+ # run-time, so we can't do these as decorators because
+ # pylibacl might not be installed. (If it's not, this test
+ # will be skipped, so as long as this is done at run-time
+ # we're safe.)
+ test_list_file_acls = patch("posix1e.ACL")(test_list_file_acls)
+
+ @skipUnless(has_acls, "ACLS not found, skipping")
+ @patch("Bcfg2.Client.Tools.POSIX.base.%s._list_file_acls" %
+ test_obj.__name__)
+ @patch("Bcfg2.Client.Tools.POSIX.base.%s._list_entry_acls" %
+ test_obj.__name__)
+ def test_verify_acls(self, mock_list_entry_acls, mock_list_file_acls):
+ entry = lxml.etree.Element("Path", name="/test", type="file")
+ # we can't test to make sure that errors get properly sorted
+ # into (missing, extra, wrong) without refactoring the
+ # _verify_acls code, and I don't feel like doing that, so eff
+ # it. let's just test to make sure that failures are
+ # identified at all for now.
+
+ acls = {("access", posix1e.ACL_USER, "user"): 7,
+ ("default", posix1e.ACL_GROUP, "group"): 5}
+ extra_acls = copy.deepcopy(acls)
+ extra_acls[("access", posix1e.ACL_USER, "user2")] = 4
+
+ mock_list_entry_acls.return_value = acls
+ mock_list_file_acls.return_value = acls
+ self.assertTrue(self.ptool._verify_acls(entry))
+ mock_list_entry_acls.assert_called_with(entry)
+ mock_list_file_acls.assert_called_with(entry.get("name"))
+
+ # test missing
+ mock_list_entry_acls.reset_mock()
+ mock_list_file_acls.reset_mock()
+ mock_list_file_acls.return_value = extra_acls
+ self.assertFalse(self.ptool._verify_acls(entry))
+ mock_list_entry_acls.assert_called_with(entry)
+ mock_list_file_acls.assert_called_with(entry.get("name"))
+
+ # test extra
+ mock_list_entry_acls.reset_mock()
+ mock_list_file_acls.reset_mock()
+ mock_list_entry_acls.return_value = extra_acls
+ mock_list_file_acls.return_value = acls
+ self.assertFalse(self.ptool._verify_acls(entry))
+ mock_list_entry_acls.assert_called_with(entry)
+ mock_list_file_acls.assert_called_with(entry.get("name"))
+
+ # test wrong
+ wrong_acls = copy.deepcopy(extra_acls)
+ wrong_acls[("access", posix1e.ACL_USER, "user2")] = 5
+ mock_list_entry_acls.reset_mock()
+ mock_list_file_acls.reset_mock()
+ mock_list_entry_acls.return_value = extra_acls
+ mock_list_file_acls.return_value = wrong_acls
+ self.assertFalse(self.ptool._verify_acls(entry))
+ mock_list_entry_acls.assert_called_with(entry)
+ mock_list_file_acls.assert_called_with(entry.get("name"))
+
+ @patch("os.makedirs")
+ @patch("os.path.exists")
+ @patch("Bcfg2.Client.Tools.POSIX.base.%s._set_perms" % test_obj.__name__)
+ def test_makedirs(self, mock_set_perms, mock_exists, mock_makedirs):
+ entry = lxml.etree.Element("Path", name="/test/foo/bar",
+ type="directory")
+
+ def reset():
+ mock_exists.reset_mock()
+ mock_set_perms.reset_mock()
+ mock_makedirs.reset_mock()
+
+ mock_set_perms.return_value = True
+ def path_exists_rv(path):
+ if path == "/test":
+ return True
+ else:
+ return False
+ mock_exists.side_effect = path_exists_rv
+ self.assertTrue(self.ptool._makedirs(entry))
+ self.assertItemsEqual(mock_exists.call_args_list,
+ [call("/test"), call("/test/foo"),
+ call("/test/foo/bar")])
+ self.assertItemsEqual(mock_set_perms.call_args_list,
+ [call(entry, path="/test/foo"),
+ call(entry, path="/test/foo/bar")])
+ mock_makedirs.assert_called_with(entry.get("name"))
+
+ reset()
+ mock_makedirs.side_effect = OSError
+ self.assertFalse(self.ptool._makedirs(entry))
+ self.assertItemsEqual(mock_set_perms.call_args_list,
+ [call(entry, path="/test/foo"),
+ call(entry, path="/test/foo/bar")])
+
+ reset()
+ mock_makedirs.side_effect = None
+ def set_perms_rv(entry, path=None):
+ if path == '/test/foo':
+ return False
+ else:
+ return True
+ mock_set_perms.side_effect = set_perms_rv
+ self.assertFalse(self.ptool._makedirs(entry))
+ self.assertItemsEqual(mock_exists.call_args_list,
+ [call("/test"), call("/test/foo"),
+ call("/test/foo/bar")])
+ self.assertItemsEqual(mock_set_perms.call_args_list,
+ [call(entry, path="/test/foo"),
+ call(entry, path="/test/foo/bar")])
+ mock_makedirs.assert_called_with(entry.get("name"))
diff --git a/testsuite/Testlib/TestClient/TestTools/TestPOSIX/__init__.py b/testsuite/Testlib/TestClient/TestTools/TestPOSIX/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testsuite/Testlib/TestClient/TestTools/TestPOSIX/__init__.py
diff --git a/testsuite/Testlib/TestClient/TestTools/__init__.py b/testsuite/Testlib/TestClient/TestTools/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testsuite/Testlib/TestClient/TestTools/__init__.py
diff --git a/testsuite/Testlib/TestClient/__init__.py b/testsuite/Testlib/TestClient/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testsuite/Testlib/TestClient/__init__.py
diff --git a/testsuite/Testlib/TestOptions.py b/testsuite/Testlib/TestOptions.py
index f5833a54a..acb5c9bfc 100644
--- a/testsuite/Testlib/TestOptions.py
+++ b/testsuite/Testlib/TestOptions.py
@@ -1,39 +1,161 @@
import os
import sys
-import unittest
-from mock import Mock, patch
-import Bcfg2.Options
+from mock import Mock, MagicMock, patch
+from Bcfg2.Options import *
+from Bcfg2.Bcfg2Py3k import ConfigParser
-class TestOption(unittest.TestCase):
+# add all parent testsuite directories to sys.path to allow (most)
+# relative imports in python 2.4
+path = os.path.dirname(__file__)
+while path != '/':
+ if os.path.basename(path).lower().startswith("test"):
+ sys.path.append(path)
+ if os.path.basename(path) == "testsuite":
+ break
+ path = os.path.dirname(path)
+from common import XI_NAMESPACE, XI, inPy3k, call, builtins, u, can_skip, \
+ skip, skipIf, skipUnless, Bcfg2TestCase, DBModelTestCase, syncdb, \
+ patchIf, datastore
+
+class TestDefaultConfigParser(Bcfg2TestCase):
+ @patch("%s.ConfigParser.get" % ConfigParser.__name__)
+ def test_get(self, mock_get):
+ dcp = DefaultConfigParser()
+ mock_get.return_value = "foo"
+ self.assertEqual(dcp.get("section", "option"), "foo")
+ mock_get.assert_called_with(dcp, "section", "option")
+
+ mock_get.reset_mock()
+ self.assertEqual(dcp.get("section", "option",
+ default="bar", other="test"), "foo")
+ mock_get.assert_called_with(dcp, "section", "option", other="test")
+
+ for etype, err in [(ConfigParser.NoOptionError,
+ ConfigParser.NoOptionError(None, None)),
+ (ConfigParser.NoSectionError,
+ ConfigParser.NoSectionError(None))]:
+ mock_get.side_effect = err
+ mock_get.reset_mock()
+ self.assertEqual(dcp.get("section", "option", default="bar"), "bar")
+ mock_get.assert_called_with(dcp, "section", "option")
+
+ mock_get.reset_mock()
+ self.assertRaises(etype, dcp.get, "section", "option")
+ mock_get.assert_called_with(dcp, "section", "option")
+
+ @patch("%s.ConfigParser.getboolean" % ConfigParser.__name__)
+ def test_getboolean(self, mock_getboolean):
+ dcp = DefaultConfigParser()
+ mock_getboolean.return_value = True
+ self.assertEqual(dcp.getboolean("section", "option"), True)
+ mock_getboolean.assert_called_with(dcp, "section", "option")
+
+ mock_getboolean.reset_mock()
+ self.assertEqual(dcp.getboolean("section", "option",
+ default=False, other="test"), True)
+ mock_getboolean.assert_called_with(dcp, "section", "option",
+ other="test")
+
+ for etype, err in [(ConfigParser.NoOptionError,
+ ConfigParser.NoOptionError(None, None)),
+ (ConfigParser.NoSectionError,
+ ConfigParser.NoSectionError(None))]:
+ mock_getboolean.side_effect = err
+ mock_getboolean.reset_mock()
+ self.assertEqual(dcp.getboolean("section", "option", default=False),
+ False)
+ mock_getboolean.assert_called_with(dcp, "section", "option")
+
+ mock_getboolean.reset_mock()
+ self.assertRaises(etype, dcp.getboolean, "section", "option")
+ mock_getboolean.assert_called_with(dcp, "section", "option")
+
+
+class TestOption(Bcfg2TestCase):
def test__init(self):
- self.assertRaises(Bcfg2.Options.OptionFailure,
- Bcfg2.Options.Option,
+ self.assertRaises(OptionFailure,
+ Option,
'foo', False, cmd='f')
- self.assertRaises(Bcfg2.Options.OptionFailure,
- Bcfg2.Options.Option,
+ self.assertRaises(OptionFailure,
+ Option,
'foo', False, cmd='--f')
- self.assertRaises(Bcfg2.Options.OptionFailure,
- Bcfg2.Options.Option,
+ self.assertRaises(OptionFailure,
+ Option,
'foo', False, cmd='-foo')
- self.assertRaises(Bcfg2.Options.OptionFailure,
- Bcfg2.Options.Option,
+ self.assertRaises(OptionFailure,
+ Option,
'foo', False, cmd='-foo', long_arg=True)
+ opt = Option('foo', False)
+ self.assertTrue(opt.boolean)
+ opt = Option('foo', False, odesc='<val>')
+ self.assertFalse(opt.boolean)
+ opt = Option('foo', False, cook=get_bool)
+ self.assertFalse(opt.boolean)
+ opt = Option('foo', "foo")
+ self.assertFalse(opt.boolean)
+
+ def test_get_cooked_value(self):
+ opt = Option('foo', False)
+ opt.boolean = True
+ self.assertTrue(opt.get_cooked_value("anything"))
- @patch('ConfigParser.ConfigParser')
- @patch('__builtin__.open')
- def test_getCFP(self, mock_open, mock_cp):
- mock_cp.return_value = Mock()
- o = Bcfg2.Options.Option('foo', False, cmd='-f')
- self.assertFalse(o._Option__cfp)
- o.getCFP()
- mock_cp.assert_any_call()
- mock_open.assert_any_call(Bcfg2.Options.DEFAULT_CONFIG_LOCATION)
- self.assertTrue(mock_cp.return_value.readfp.called)
+ opt = Option('foo', 'foo')
+ opt.boolean = False
+ opt.cook = False
+ self.assertEqual("foo", opt.get_cooked_value("foo"))
- @patch('Bcfg2.Options.Option.cfp')
- def test_parse(self, mock_cfp):
+ opt = Option('foo', 'foo')
+ opt.boolean = False
+ opt.cook = Mock()
+ self.assertEqual(opt.cook.return_value, opt.get_cooked_value("foo"))
+ opt.cook.assert_called_with("foo")
+
+ def test_buildHelpMessage(self):
+ opt = Option('foo', False)
+ self.assertEqual(opt.buildHelpMessage(), '')
+
+ opt = Option('foo', False, '-f')
+ self.assertEqual(opt.buildHelpMessage().split(),
+ ["-f", "foo"])
+
+ opt = Option('foo', False, cmd="--foo", long_arg=True)
+ self.assertEqual(opt.buildHelpMessage().split(),
+ ["--foo", "foo"])
+
+ opt = Option('foo', False, cmd="-f", odesc='<val>')
+ self.assertEqual(opt.buildHelpMessage().split(),
+ ["-f", "<val>", "foo"])
+
+ opt = Option('foo', False, cmd="--foo", long_arg=True, odesc='<val>')
+ self.assertEqual(opt.buildHelpMessage().split(),
+ ["--foo=<val>", "foo"])
+
+ def test_buildGetopt(self):
+ opt = Option('foo', False)
+ self.assertEqual(opt.buildGetopt(), '')
+
+ opt = Option('foo', False, '-f')
+ self.assertEqual(opt.buildGetopt(), "f")
+
+ opt = Option('foo', False, cmd="--foo", long_arg=True)
+ self.assertEqual(opt.buildGetopt(), '')
+
+ opt = Option('foo', False, cmd="-f", odesc='<val>')
+ self.assertEqual(opt.buildGetopt(), 'f:')
+
+ opt = Option('foo', False, cmd="--foo", long_arg=True, odesc='<val>')
+ self.assertEqual(opt.buildGetopt(), '')
+
+ def test_buildLongGetopt(self):
+ opt = Option('foo', False, cmd="--foo", long_arg=True)
+ self.assertEqual(opt.buildLongGetopt(), 'foo')
+
+ opt = Option('foo', False, cmd="--foo", long_arg=True, odesc='<val>')
+ self.assertEqual(opt.buildLongGetopt(), 'foo=')
+
+ def test_parse(self):
cf = ('communication', 'password')
- o = Bcfg2.Options.Option('foo', 'test4', cmd='-F', env='TEST2',
+ o = Option('foo', default='test4', cmd='-F', env='TEST2',
odesc='bar', cf=cf)
o.parse([], ['-F', 'test'])
self.assertEqual(o.value, 'test')
@@ -45,80 +167,72 @@ class TestOption(unittest.TestCase):
self.assertEqual(o.value, 'test3')
del os.environ['TEST2']
- mock_cfp.get = Mock()
- mock_cfp.get.return_value = 'test5'
- o.parse([], [])
+ cfp = DefaultConfigParser()
+ cfp.get = Mock()
+ cfp.get.return_value = 'test5'
+ o.parse([], [], configparser=cfp)
+ cfp.get.assert_any_call(*cf)
self.assertEqual(o.value, 'test5')
- mock_cfp.get.assert_any_call(*cf)
o.cf = False
o.parse([], [])
assert o.value == 'test4'
- def test_cook(self):
- # check that default value isn't cooked
- o1 = Bcfg2.Options.Option('foo', 'test4', cook=Bcfg2.Options.bool_cook)
- o1.parse([], [])
- assert o1.value == 'test4'
- o2 = Bcfg2.Options.Option('foo', False, cmd='-F')
- o2.parse([('-F', '')], [])
- assert o2.value == True
-
-class TestOptionSet(unittest.TestCase):
+class TestOptionSet(Bcfg2TestCase):
def test_buildGetopt(self):
- opts = [('foo', Bcfg2.Options.Option('foo', 'test1', cmd='-G')),
- ('bar', Bcfg2.Options.Option('foo', 'test2')),
- ('baz', Bcfg2.Options.Option('foo', 'test1', cmd='-H',
+ opts = [('foo', Option('foo', 'test1', cmd='-G')),
+ ('bar', Option('foo', 'test2')),
+ ('baz', Option('foo', 'test1', cmd='-H',
odesc='1'))]
- oset = Bcfg2.Options.OptionSet(opts)
+ oset = OptionSet(opts)
res = oset.buildGetopt()
self.assertIn('H:', res)
self.assertIn('G', res)
self.assertEqual(len(res), 3)
def test_buildLongGetopt(self):
- opts = [('foo', Bcfg2.Options.Option('foo', 'test1', cmd='-G')),
- ('bar', Bcfg2.Options.Option('foo', 'test2')),
- ('baz', Bcfg2.Options.Option('foo', 'test1', cmd='--H',
+ opts = [('foo', Option('foo', 'test1', cmd='-G')),
+ ('bar', Option('foo', 'test2')),
+ ('baz', Option('foo', 'test1', cmd='--H',
odesc='1', long_arg=True))]
- oset = Bcfg2.Options.OptionSet(opts)
+ oset = OptionSet(opts)
res = oset.buildLongGetopt()
self.assertIn('H=', res)
self.assertEqual(len(res), 1)
def test_parse(self):
- opts = [('foo', Bcfg2.Options.Option('foo', 'test1', cmd='-G')),
- ('bar', Bcfg2.Options.Option('foo', 'test2')),
- ('baz', Bcfg2.Options.Option('foo', 'test1', cmd='-H',
+ opts = [('foo', Option('foo', 'test1', cmd='-G')),
+ ('bar', Option('foo', 'test2')),
+ ('baz', Option('foo', 'test1', cmd='-H',
odesc='1'))]
- oset = Bcfg2.Options.OptionSet(opts)
+ oset = OptionSet(opts)
self.assertRaises(SystemExit,
oset.parse,
['-G', '-H'])
- oset2 = Bcfg2.Options.OptionSet(opts)
+ oset2 = OptionSet(opts)
self.assertRaises(SystemExit,
oset2.parse,
['-h'])
- oset3 = Bcfg2.Options.OptionSet(opts)
+ oset3 = OptionSet(opts)
oset3.parse(['-G'])
self.assertTrue(oset3['foo'])
-class TestOptionParser(unittest.TestCase):
+class TestOptionParser(Bcfg2TestCase):
def test__init(self):
- opts = [('foo', Bcfg2.Options.Option('foo', 'test1', cmd='-h')),
- ('bar', Bcfg2.Options.Option('foo', 'test2')),
- ('baz', Bcfg2.Options.Option('foo', 'test1', cmd='-H',
+ opts = [('foo', Option('foo', 'test1', cmd='-h')),
+ ('bar', Option('foo', 'test2')),
+ ('baz', Option('foo', 'test1', cmd='-H',
odesc='1'))]
- oset1 = Bcfg2.Options.OptionParser(opts)
- self.assertEqual(Bcfg2.Options.Option.cfpath,
- Bcfg2.Options.DEFAULT_CONFIG_LOCATION)
+ oset1 = OptionParser(opts)
+ self.assertEqual(oset1.cfile,
+ DEFAULT_CONFIG_LOCATION)
sys.argv = ['foo', '-C', '/usr/local/etc/bcfg2.conf']
- oset2 = Bcfg2.Options.OptionParser(opts)
- self.assertEqual(Bcfg2.Options.Option.cfpath,
+ oset2 = OptionParser(opts)
+ self.assertEqual(oset2.cfile,
'/usr/local/etc/bcfg2.conf')
sys.argv = []
- oset3 = Bcfg2.Options.OptionParser(opts)
- self.assertEqual(Bcfg2.Options.Option.cfpath,
- Bcfg2.Options.DEFAULT_CONFIG_LOCATION)
+ oset3 = OptionParser(opts)
+ self.assertEqual(oset3.cfile,
+ DEFAULT_CONFIG_LOCATION)
diff --git a/testsuite/Testlib/TestServer/TestPlugin.py b/testsuite/Testlib/TestServer/TestPlugin.py
new file mode 100644
index 000000000..c2dee9962
--- /dev/null
+++ b/testsuite/Testlib/TestServer/TestPlugin.py
@@ -0,0 +1,2296 @@
+import os
+import re
+import sys
+import copy
+import logging
+import lxml.etree
+import Bcfg2.Server
+from Bcfg2.Bcfg2Py3k import reduce
+from mock import Mock, MagicMock, patch
+from Bcfg2.Server.Plugin import *
+
+# add all parent testsuite directories to sys.path to allow (most)
+# relative imports in python 2.4
+path = os.path.dirname(__file__)
+while path != '/':
+ if os.path.basename(path).lower().startswith("test"):
+ sys.path.append(path)
+ if os.path.basename(path) == "testsuite":
+ break
+ path = os.path.dirname(path)
+from common import XI_NAMESPACE, XI, inPy3k, call, builtins, u, can_skip, \
+ skip, skipIf, skipUnless, Bcfg2TestCase, DBModelTestCase, syncdb, \
+ patchIf, datastore
+
+
+try:
+ re_type = re._pattern_type
+except AttributeError:
+ re_type = type(re.compile(""))
+
+def tostring(el):
+ return lxml.etree.tostring(el, xml_declaration=False).decode('UTF-8')
+
+
+class FakeElementTree(lxml.etree._ElementTree):
+ xinclude = Mock()
+
+
+class TestFunctions(Bcfg2TestCase):
+ def test_bind_info(self):
+ entry = lxml.etree.Element("Path", name="/test")
+ metadata = Mock()
+ default = dict(test1="test1", test2="test2")
+ # test without infoxml
+ bind_info(entry, metadata, default=default)
+ self.assertItemsEqual(entry.attrib,
+ dict(test1="test1",
+ test2="test2",
+ name="/test"))
+
+ # test with bogus infoxml
+ entry = lxml.etree.Element("Path", name="/test")
+ infoxml = Mock()
+ self.assertRaises(PluginExecutionError,
+ bind_info,
+ entry, metadata, infoxml=infoxml)
+ infoxml.pnode.Match.assert_called_with(metadata, dict(), entry=entry)
+
+ # test with valid infoxml
+ entry = lxml.etree.Element("Path", name="/test")
+ infoxml.reset_mock()
+ infodata = {None: {"test3": "test3", "test4": "test4"}}
+ def infoxml_rv(metadata, rv, entry=None):
+ rv['Info'] = infodata
+ infoxml.pnode.Match.side_effect = infoxml_rv
+ bind_info(entry, metadata, infoxml=infoxml, default=default)
+ # mock objects don't properly track the called-with value of
+ # arguments whose value is changed by the function, so it
+ # thinks Match() was called with the final value of the mdata
+ # arg, not the initial value. makes this test a little less
+ # worthwhile, TBH.
+ infoxml.pnode.Match.assert_called_with(metadata, dict(Info=infodata),
+ entry=entry)
+ self.assertItemsEqual(entry.attrib,
+ dict(test1="test1",
+ test2="test2",
+ test3="test3",
+ test4="test4",
+ name="/test"))
+
+
+class TestPluginInitError(Bcfg2TestCase):
+ """ placeholder for future tests """
+ pass
+
+
+class TestPluginExecutionError(Bcfg2TestCase):
+ """ placeholder for future tests """
+ pass
+
+
+class TestDebuggable(Bcfg2TestCase):
+ test_obj = Debuggable
+
+ def get_obj(self):
+ return self.test_obj()
+
+ def test__init(self):
+ d = self.get_obj()
+ self.assertIsInstance(d.logger, logging.Logger)
+ self.assertFalse(d.debug_flag)
+
+ @patch("Bcfg2.Server.Plugin.%s.debug_log" % test_obj.__name__)
+ def test_toggle_debug(self, mock_debug):
+ d = self.get_obj()
+ orig = d.debug_flag
+ d.toggle_debug()
+ self.assertNotEqual(orig, d.debug_flag)
+ self.assertTrue(mock_debug.called)
+
+ mock_debug.reset_mock()
+
+ changed = d.debug_flag
+ d.toggle_debug()
+ self.assertNotEqual(changed, d.debug_flag)
+ self.assertEqual(orig, d.debug_flag)
+ self.assertTrue(mock_debug.called)
+
+ def test_debug_log(self):
+ d = self.get_obj()
+ d.logger = Mock()
+ d.debug_flag = False
+ d.debug_log("test")
+ self.assertFalse(d.logger.error.called)
+
+ d.logger.reset_mock()
+ d.debug_log("test", flag=True)
+ self.assertTrue(d.logger.error.called)
+
+ d.logger.reset_mock()
+ d.debug_flag = True
+ d.debug_log("test")
+ self.assertTrue(d.logger.error.called)
+
+
+class TestPlugin(TestDebuggable):
+ test_obj = Plugin
+
+ def get_obj(self, core=None):
+ if core is None:
+ core = Mock()
+ return self.test_obj(core, datastore)
+
+ def test__init(self):
+ core = Mock()
+ p = self.get_obj(core=core)
+ self.assertEqual(p.data, os.path.join(datastore, p.name))
+ self.assertEqual(p.core, core)
+ self.assertIsInstance(p, Debuggable)
+
+ @patch("os.makedirs")
+ def test_init_repo(self, mock_makedirs):
+ self.test_obj.init_repo(datastore)
+ mock_makedirs.assert_called_with(os.path.join(datastore,
+ self.test_obj.name))
+
+
+class TestDatabaseBacked(TestPlugin):
+ test_obj = DatabaseBacked
+
+ @skipUnless(has_django, "Django not found")
+ def test__use_db(self):
+ core = Mock()
+ core.setup.cfp.getboolean.return_value = True
+ db = self.get_obj(core)
+ self.assertTrue(db._use_db)
+
+ core = Mock()
+ core.setup.cfp.getboolean.return_value = False
+ db = self.get_obj(core)
+ self.assertFalse(db._use_db)
+
+ Bcfg2.Server.Plugin.has_django = False
+ core = Mock()
+ db = self.get_obj(core)
+ self.assertFalse(db._use_db)
+
+ core = Mock()
+ core.setup.cfp.getboolean.return_value = True
+ db = self.get_obj(core)
+ self.assertFalse(db._use_db)
+ Bcfg2.Server.Plugin.has_django = True
+
+
+class TestPluginDatabaseModel(Bcfg2TestCase):
+ """ placeholder for future tests """
+ pass
+
+
+class TestGenerator(Bcfg2TestCase):
+ test_obj = Generator
+
+ def test_HandlesEntry(self):
+ pass
+
+ def test_HandleEntry(self):
+ pass
+
+
+class TestStructure(Bcfg2TestCase):
+ test_obj = Structure
+
+ def get_obj(self):
+ return self.test_obj()
+
+ def test_BuildStructures(self):
+ s = self.get_obj()
+ self.assertRaises(NotImplementedError,
+ s.BuildStructures, None)
+
+
+class TestMetadata(Bcfg2TestCase):
+ test_obj = Metadata
+
+ def get_obj(self):
+ return self.test_obj()
+
+ def test_get_initial_metadata(self):
+ m = self.get_obj()
+ self.assertRaises(NotImplementedError,
+ m.get_initial_metadata, None)
+
+ def test_merge_additional_data(self):
+ m = self.get_obj()
+ self.assertRaises(NotImplementedError,
+ m.merge_additional_data, None, None, None)
+
+ def test_merge_additional_groups(self):
+ m = self.get_obj()
+ self.assertRaises(NotImplementedError,
+ m.merge_additional_groups, None, None)
+
+
+class TestConnector(Bcfg2TestCase):
+ """ placeholder """
+ def test_get_additional_groups(self):
+ pass
+
+ def test_get_additional_data(self):
+ pass
+
+
+class TestProbing(Bcfg2TestCase):
+ """ placeholder """
+ def test_GetProbes(self):
+ pass
+
+ def test_ReceiveData(self):
+ pass
+
+
+class TestStatistics(TestPlugin):
+ test_obj = Statistics
+
+ def test_process_statistics(self):
+ pass
+
+
+class TestThreadedStatistics(TestStatistics):
+ test_obj = ThreadedStatistics
+ data = [("foo.example.com", "<foo/>"),
+ ("bar.example.com", "<bar/>")]
+
+ @patch("threading.Thread.start")
+ def test__init(self, mock_start):
+ core = Mock()
+ ts = self.get_obj(core)
+ mock_start.assert_any_call()
+
+ @patch("%s.open" % builtins)
+ @patch("%s.dump" % cPickle.__name__)
+ @patch("Bcfg2.Server.Plugin.ThreadedStatistics.run", Mock())
+ def test_save(self, mock_dump, mock_open):
+ core = Mock()
+ ts = self.get_obj(core)
+ queue = Mock()
+ queue.empty = Mock(side_effect=Empty)
+ ts.work_queue = queue
+
+ mock_open.side_effect = OSError
+ # test that save does _not_ raise an exception even when
+ # everything goes pear-shaped
+ ts.save()
+ queue.empty.assert_any_call()
+ mock_open.assert_called_with(ts.pending_file, 'w')
+
+ queue.reset_mock()
+ mock_open.reset_mock()
+
+ queue.data = []
+ for hostname, xml in self.data:
+ md = Mock()
+ md.hostname = hostname
+ queue.data.append((md, lxml.etree.XML(xml)))
+ queue.empty.side_effect = lambda: len(queue.data) == 0
+ queue.get_nowait = Mock(side_effect=lambda: queue.data.pop())
+ mock_open.side_effect = None
+
+ ts.save()
+ queue.empty.assert_any_call()
+ queue.get_nowait.assert_any_call()
+ mock_open.assert_called_with(ts.pending_file, 'w')
+ mock_open.return_value.close.assert_any_call()
+ # the order of the queue data gets changed, so we have to
+ # verify this call in an ugly way
+ self.assertItemsEqual(mock_dump.call_args[0][0], self.data)
+ self.assertEqual(mock_dump.call_args[0][1], mock_open.return_value)
+
+ @patch("os.unlink")
+ @patch("os.path.exists")
+ @patch("%s.open" % builtins)
+ @patch("lxml.etree.XML")
+ @patch("%s.load" % cPickle.__name__)
+ @patch("Bcfg2.Server.Plugin.ThreadedStatistics.run", Mock())
+ def test_load(self, mock_load, mock_XML, mock_open, mock_exists,
+ mock_unlink):
+ core = Mock()
+ core.terminate.isSet.return_value = False
+ ts = self.get_obj(core)
+
+ ts.work_queue = Mock()
+ ts.work_queue.data = []
+ def reset():
+ core.reset_mock()
+ mock_open.reset_mock()
+ mock_exists.reset_mock()
+ mock_unlink.reset_mock()
+ mock_load.reset_mock()
+ mock_XML.reset_mock()
+ ts.work_queue.reset_mock()
+ ts.work_queue.data = []
+
+ mock_exists.return_value = False
+ self.assertTrue(ts.load())
+ mock_exists.assert_called_with(ts.pending_file)
+
+ reset()
+ mock_exists.return_value = True
+ mock_open.side_effect = OSError
+ self.assertFalse(ts.load())
+ mock_exists.assert_called_with(ts.pending_file)
+ mock_open.assert_called_with(ts.pending_file, 'r')
+
+ reset()
+ mock_open.side_effect = None
+ mock_load.return_value = self.data
+ ts.work_queue.put_nowait.side_effect = Full
+ self.assertTrue(ts.load())
+ mock_exists.assert_called_with(ts.pending_file)
+ mock_open.assert_called_with(ts.pending_file, 'r')
+ mock_open.return_value.close.assert_any_call()
+ mock_load.assert_called_with(mock_open.return_value)
+
+ reset()
+ core.build_metadata.side_effect = lambda x: x
+ mock_XML.side_effect = lambda x, parser=None: x
+ ts.work_queue.put_nowait.side_effect = None
+ self.assertTrue(ts.load())
+ mock_exists.assert_called_with(ts.pending_file)
+ mock_open.assert_called_with(ts.pending_file, 'r')
+ mock_open.return_value.close.assert_any_call()
+ mock_load.assert_called_with(mock_open.return_value)
+ self.assertItemsEqual(mock_XML.call_args_list,
+ [call(x, parser=Bcfg2.Server.XMLParser)
+ for h, x in self.data])
+ self.assertItemsEqual(ts.work_queue.put_nowait.call_args_list,
+ [call((h, x)) for h, x in self.data])
+ mock_unlink.assert_called_with(ts.pending_file)
+
+ @patch("threading.Thread.start", Mock())
+ @patch("Bcfg2.Server.Plugin.ThreadedStatistics.load")
+ @patch("Bcfg2.Server.Plugin.ThreadedStatistics.save")
+ @patch("Bcfg2.Server.Plugin.ThreadedStatistics.handle_statistic")
+ def test_run(self, mock_handle, mock_save, mock_load):
+ core = Mock()
+ ts = self.get_obj(core)
+ mock_load.return_value = True
+ ts.work_queue = Mock()
+
+ def reset():
+ mock_handle.reset_mock()
+ mock_save.reset_mock()
+ mock_load.reset_mock()
+ core.reset_mock()
+ ts.work_queue.reset_mock()
+ ts.work_queue.data = self.data[:]
+ ts.work_queue.get_calls = 0
+
+ reset()
+
+ def get_rv(**kwargs):
+ ts.work_queue.get_calls += 1
+ try:
+ return ts.work_queue.data.pop()
+ except:
+ raise Empty
+ ts.work_queue.get.side_effect = get_rv
+ def terminate_isset():
+ # this lets the loop go on a few iterations with an empty
+ # queue to test that it doesn't error out
+ return ts.work_queue.get_calls > 3
+ core.terminate.isSet.side_effect = terminate_isset
+
+ ts.work_queue.empty.return_value = False
+ ts.run()
+ mock_load.assert_any_call()
+ self.assertGreaterEqual(ts.work_queue.get.call_count, len(self.data))
+ self.assertItemsEqual(mock_handle.call_args_list,
+ [call(h, x) for h, x in self.data])
+ mock_save.assert_any_call()
+
+ @patch("copy.copy", Mock(side_effect=lambda x: x))
+ @patch("Bcfg2.Server.Plugin.ThreadedStatistics.run", Mock())
+ def test_process_statistics(self):
+ TestStatistics.test_process_statistics(self)
+
+ core = Mock()
+ ts = self.get_obj(core)
+ ts.work_queue = Mock()
+ ts.process_statistics(*self.data[0])
+ ts.work_queue.put_nowait.assert_called_with(self.data[0])
+
+ ts.work_queue.reset_mock()
+ ts.work_queue.put_nowait.side_effect = Full
+ # test that no exception is thrown
+ ts.process_statistics(*self.data[0])
+
+
+class TestPullSource(Bcfg2TestCase):
+ def test_GetCurrentEntry(self):
+ ps = PullSource()
+ self.assertRaises(NotImplementedError,
+ ps.GetCurrentEntry, None, None, None)
+
+
+class TestPullTarget(Bcfg2TestCase):
+ def test_AcceptChoices(self):
+ pt = PullTarget()
+ self.assertRaises(NotImplementedError,
+ pt.AcceptChoices, None, None)
+
+ def test_AcceptPullData(self):
+ pt = PullTarget()
+ self.assertRaises(NotImplementedError,
+ pt.AcceptPullData, None, None, None)
+
+
+class TestDecision(Bcfg2TestCase):
+ """ placeholder for future tests """
+ pass
+
+
+class TestValidationError(Bcfg2TestCase):
+ """ placeholder for future tests """
+ pass
+
+
+class TestStructureValidator(Bcfg2TestCase):
+ def test_validate_structures(self):
+ sv = StructureValidator()
+ self.assertRaises(NotImplementedError,
+ sv.validate_structures, None, None)
+
+
+class TestGoalValidator(Bcfg2TestCase):
+ def test_validate_goals(self):
+ gv = GoalValidator()
+ self.assertRaises(NotImplementedError,
+ gv.validate_goals, None, None)
+
+
+class TestVersion(Bcfg2TestCase):
+ """ placeholder for future tests """
+ pass
+
+
+class TestClientRunHooks(Bcfg2TestCase):
+ """ placeholder for future tests """
+ pass
+
+
+class TestFileBacked(Bcfg2TestCase):
+ test_obj = FileBacked
+ path = os.path.join(datastore, "test")
+
+ def get_obj(self, path=None, fam=None):
+ if path is None:
+ path = self.path
+ return self.test_obj(path, fam=fam)
+
+ @patch("%s.open" % builtins)
+ def test_HandleEvent(self, mock_open):
+ fb = self.get_obj()
+ fb.Index = Mock()
+
+ def reset():
+ fb.Index.reset_mock()
+ mock_open.reset_mock()
+
+ for evt in ["exists", "changed", "created"]:
+ reset()
+ event = Mock()
+ event.code2str.return_value = evt
+ fb.HandleEvent(event)
+ mock_open.assert_called_with(self.path)
+ mock_open.return_value.read.assert_any_call()
+ fb.Index.assert_any_call()
+
+ reset()
+ event = Mock()
+ event.code2str.return_value = "endExist"
+ fb.HandleEvent(event)
+ self.assertFalse(mock_open.called)
+ self.assertFalse(fb.Index.called)
+
+
+class TestDirectoryBacked(Bcfg2TestCase):
+ test_obj = DirectoryBacked
+ testpaths = {1: '',
+ 2: '/foo',
+ 3: '/foo/bar',
+ 4: '/foo/bar/baz',
+ 5: 'quux',
+ 6: 'xyzzy/',
+ 7: 'xyzzy/plugh/'}
+ testfiles = ['foo', 'bar/baz.txt', 'plugh.py']
+ ignore = [] # ignore no events
+ badevents = [] # DirectoryBacked handles all files, so there's no
+ # such thing as a bad event
+
+ def test_child_interface(self):
+ # ensure that the child object has the correct interface
+ self.assertTrue(hasattr(self.test_obj.__child__, "HandleEvent"))
+
+ @patch("Bcfg2.Server.Plugin.%s.add_directory_monitor" % test_obj.__name__,
+ Mock())
+ def get_obj(self, fam=None):
+ if fam is None:
+ fam = Mock()
+ return self.test_obj(os.path.join(datastore, self.test_obj.__name__),
+ fam)
+
+ @patch("Bcfg2.Server.Plugin.%s.add_directory_monitor" % test_obj.__name__)
+ def test__init(self, mock_add_monitor):
+ db = self.test_obj(datastore, Mock())
+ mock_add_monitor.assert_called_with('')
+
+ def test__getitem(self):
+ db = self.get_obj()
+ db.entries.update(dict(a=1, b=2, c=3))
+ self.assertEqual(db['a'], 1)
+ self.assertEqual(db['b'], 2)
+ expected = KeyError
+ try:
+ db['d']
+ except expected:
+ pass
+ except:
+ err = sys.exc_info()[1]
+ self.assertFalse(True, "%s raised instead of %s" %
+ (err.__class__.__name__,
+ expected.__class__.__name__))
+ else:
+ self.assertFalse(True,
+ "%s not raised" % expected.__class__.__name__)
+
+ def test__iter(self):
+ db = self.get_obj()
+ db.entries.update(dict(a=1, b=2, c=3))
+ self.assertEqual([i for i in db],
+ [i for i in db.entries.items()])
+
+ @patch("os.path.isdir")
+ def test_add_directory_monitor(self, mock_isdir):
+ db = self.get_obj()
+ db.fam = Mock()
+ db.fam.rv = 0
+
+ def reset():
+ db.fam.rv += 1
+ db.fam.AddMonitor.return_value = db.fam.rv
+ db.fam.reset_mock()
+ mock_isdir.reset_mock()
+
+ mock_isdir.return_value = True
+ for path in self.testpaths.values():
+ reset()
+ db.add_directory_monitor(path)
+ db.fam.AddMonitor.assert_called_with(os.path.join(db.data, path),
+ db)
+ self.assertIn(db.fam.rv, db.handles)
+ self.assertEqual(db.handles[db.fam.rv], path)
+
+ reset()
+ # test duplicate adds
+ for path in self.testpaths.values():
+ reset()
+ db.add_directory_monitor(path)
+ self.assertFalse(db.fam.AddMonitor.called)
+
+ reset()
+ mock_isdir.return_value = False
+ db.add_directory_monitor('bogus')
+ self.assertFalse(db.fam.AddMonitor.called)
+ self.assertNotIn(db.fam.rv, db.handles)
+
+ def test_add_entry(self):
+ db = self.get_obj()
+ db.fam = Mock()
+ class MockChild(Mock):
+ def __init__(self, path, fam, **kwargs):
+ Mock.__init__(self, **kwargs)
+ self.path = path
+ self.fam = fam
+ self.HandleEvent = Mock()
+ db.__child__ = MockChild
+
+ for path in self.testpaths.values():
+ event = Mock()
+ db.add_entry(path, event)
+ self.assertIn(path, db.entries)
+ self.assertEqual(db.entries[path].path,
+ os.path.join(db.data, path))
+ self.assertEqual(db.entries[path].fam, db.fam)
+ db.entries[path].HandleEvent.assert_called_with(event)
+
+ @patch("os.path.isdir")
+ @patch("Bcfg2.Server.Plugin.%s.add_entry" % test_obj.__name__)
+ @patch("Bcfg2.Server.Plugin.%s.add_directory_monitor" % test_obj.__name__)
+ def test_HandleEvent(self, mock_add_monitor, mock_add_entry, mock_isdir):
+ db = self.get_obj()
+ # a path with a leading / should never get into
+ # DirectoryBacked.handles, so strip that test case
+ for rid, path in self.testpaths.items():
+ path = path.lstrip('/')
+ db.handles[rid] = path
+
+ def reset():
+ mock_isdir.reset_mock()
+ mock_add_entry.reset_mock()
+ mock_add_monitor.reset_mock()
+
+ def get_event(filename, action, requestID):
+ event = Mock()
+ event.code2str.return_value = action
+ event.filename = filename
+ event.requestID = requestID
+ return event
+
+ # test events on the data directory itself
+ reset()
+ mock_isdir.return_value = True
+ event = get_event(db.data, "exists", 1)
+ db.HandleEvent(event)
+ mock_add_monitor.assert_called_with("")
+
+ # test events on paths that aren't handled
+ reset()
+ mock_isdir.return_value = False
+ event = get_event('/' + self.testfiles[0], 'created',
+ max(self.testpaths.keys()) + 1)
+ db.HandleEvent(event)
+ self.assertFalse(mock_add_monitor.called)
+ self.assertFalse(mock_add_entry.called)
+
+ for req_id, path in self.testpaths.items():
+ # a path with a leading / should never get into
+ # DirectoryBacked.handles, so strip that test case
+ path = path.lstrip('/')
+ basepath = os.path.join(datastore, path)
+ for fname in self.testfiles:
+ relpath = os.path.join(path, fname)
+ abspath = os.path.join(basepath, fname)
+
+ # test endExist does nothing
+ reset()
+ event = get_event(fname, 'endExist', req_id)
+ db.HandleEvent(event)
+ self.assertFalse(mock_add_monitor.called)
+ self.assertFalse(mock_add_entry.called)
+
+ mock_isdir.return_value = True
+ for evt in ["created", "exists", "changed"]:
+ # test that creating or changing a directory works
+ reset()
+ event = get_event(fname, evt, req_id)
+ db.HandleEvent(event)
+ mock_add_monitor.assert_called_with(relpath)
+ self.assertFalse(mock_add_entry.called)
+
+ mock_isdir.return_value = False
+ for evt in ["created", "exists"]:
+ # test that creating a file works
+ reset()
+ event = get_event(fname, evt, req_id)
+ db.HandleEvent(event)
+ mock_add_entry.assert_called_with(relpath, event)
+ self.assertFalse(mock_add_monitor.called)
+ db.entries[relpath] = MagicMock()
+
+ # test that changing a file that already exists works
+ reset()
+ event = get_event(fname, "changed", req_id)
+ db.HandleEvent(event)
+ db.entries[relpath].HandleEvent.assert_called_with(event)
+ self.assertFalse(mock_add_monitor.called)
+ self.assertFalse(mock_add_entry.called)
+
+ # test that deleting an entry works
+ reset()
+ event = get_event(fname, "deleted", req_id)
+ db.HandleEvent(event)
+ self.assertNotIn(relpath, db.entries)
+
+ # test that changing a file that doesn't exist works
+ reset()
+ event = get_event(fname, "changed", req_id)
+ db.HandleEvent(event)
+ mock_add_entry.assert_called_with(relpath, event)
+ self.assertFalse(mock_add_monitor.called)
+ db.entries[relpath] = MagicMock()
+
+ # test that deleting a directory works. this is a little
+ # strange because the _parent_ directory has to handle the
+ # deletion
+ reset()
+ event = get_event('quux', "deleted", 1)
+ db.HandleEvent(event)
+ for key in db.entries.keys():
+ self.assertFalse(key.startswith('quux'))
+
+ # test bad events
+ for fname in self.badevents:
+ reset()
+ event = get_event(fname, "created", 1)
+ db.HandleEvent(event)
+ self.assertFalse(mock_add_entry.called)
+ self.assertFalse(mock_add_monitor.called)
+
+ # test ignored events
+ for fname in self.ignore:
+ reset()
+ event = get_event(fname, "created", 1)
+ db.HandleEvent(event)
+ self.assertFalse(mock_isdir.called,
+ msg="Failed to ignore %s" % fname)
+ self.assertFalse(mock_add_entry.called,
+ msg="Failed to ignore %s" % fname)
+ self.assertFalse(mock_add_monitor.called,
+ msg="Failed to ignore %s" % fname)
+
+
+class TestXMLFileBacked(TestFileBacked):
+ test_obj = XMLFileBacked
+ path = os.path.join(datastore, "test", "test1.xml")
+
+ def get_obj(self, path=None, fam=None, should_monitor=False):
+ if path is None:
+ path = self.path
+ return self.test_obj(path, fam=fam, should_monitor=should_monitor)
+
+ def test__init(self):
+ fam = Mock()
+ xfb = self.get_obj()
+ self.assertIsNone(xfb.fam)
+
+ xfb = self.get_obj(fam=fam)
+ self.assertFalse(fam.AddMonitor.called)
+
+ fam.reset_mock()
+ xfb = self.get_obj(fam=fam, should_monitor=True)
+ fam.AddMonitor.assert_called_with(self.path, xfb)
+
+ @patch("os.path.exists")
+ @patch("lxml.etree.parse")
+ def test_follow_xincludes(self, mock_parse, mock_exists):
+ xfb = self.get_obj()
+ xfb.add_monitor = Mock()
+
+ def reset():
+ xfb.add_monitor.reset_mock()
+ mock_parse.reset_mock()
+ mock_exists.reset_mock()
+ xfb.extras = []
+
+ mock_exists.return_value = True
+ xdata = dict()
+ mock_parse.side_effect = lambda p: xdata[p]
+
+ # basic functionality
+ xdata['/test/test2.xml'] = lxml.etree.Element("Test").getroottree()
+ xfb._follow_xincludes(xdata=xdata['/test/test2.xml'])
+ self.assertFalse(xfb.add_monitor.called)
+
+ if (not hasattr(self.test_obj, "xdata") or
+ not isinstance(self.test_obj.xdata, property)):
+ # if xdata is settable, test that method of getting data
+ # to _follow_xincludes
+ reset()
+ xfb.xdata = xdata['/test/test2.xml'].getroot()
+ xfb._follow_xincludes()
+ self.assertFalse(xfb.add_monitor.called)
+ xfb.xdata = None
+
+ reset()
+ xfb._follow_xincludes(fname="/test/test2.xml")
+ self.assertFalse(xfb.add_monitor.called)
+
+ # test one level of xinclude
+ xdata[self.path] = lxml.etree.Element("Test").getroottree()
+ lxml.etree.SubElement(xdata[self.path].getroot(),
+ Bcfg2.Server.XI_NAMESPACE + "include",
+ href="/test/test2.xml")
+ reset()
+ xfb._follow_xincludes(fname=self.path)
+ xfb.add_monitor.assert_called_with("/test/test2.xml")
+ self.assertItemsEqual(mock_parse.call_args_list,
+ [call(f) for f in xdata.keys()])
+ mock_exists.assert_called_with("/test/test2.xml")
+
+ reset()
+ xfb._follow_xincludes(fname=self.path, xdata=xdata[self.path])
+ xfb.add_monitor.assert_called_with("/test/test2.xml")
+ self.assertItemsEqual(mock_parse.call_args_list,
+ [call(f) for f in xdata.keys()
+ if f != self.path])
+ mock_exists.assert_called_with("/test/test2.xml")
+
+ # test two-deep level of xinclude, with some files in another
+ # directory
+ xdata["/test/test3.xml"] = \
+ lxml.etree.Element("Test").getroottree()
+ lxml.etree.SubElement(xdata["/test/test3.xml"].getroot(),
+ Bcfg2.Server.XI_NAMESPACE + "include",
+ href="/test/test_dir/test4.xml")
+ xdata["/test/test_dir/test4.xml"] = \
+ lxml.etree.Element("Test").getroottree()
+ lxml.etree.SubElement(xdata["/test/test_dir/test4.xml"].getroot(),
+ Bcfg2.Server.XI_NAMESPACE + "include",
+ href="/test/test_dir/test5.xml")
+ xdata['/test/test_dir/test5.xml'] = \
+ lxml.etree.Element("Test").getroottree()
+ xdata['/test/test_dir/test6.xml'] = \
+ lxml.etree.Element("Test").getroottree()
+ # relative includes
+ lxml.etree.SubElement(xdata[self.path].getroot(),
+ Bcfg2.Server.XI_NAMESPACE + "include",
+ href="test3.xml")
+ lxml.etree.SubElement(xdata["/test/test3.xml"].getroot(),
+ Bcfg2.Server.XI_NAMESPACE + "include",
+ href="test_dir/test6.xml")
+
+ reset()
+ xfb._follow_xincludes(fname=self.path)
+ self.assertItemsEqual(xfb.add_monitor.call_args_list,
+ [call(f) for f in xdata.keys() if f != self.path])
+ self.assertItemsEqual(mock_parse.call_args_list,
+ [call(f) for f in xdata.keys()])
+ self.assertItemsEqual(mock_exists.call_args_list,
+ [call(f) for f in xdata.keys() if f != self.path])
+
+ reset()
+ xfb._follow_xincludes(fname=self.path, xdata=xdata[self.path])
+ self.assertItemsEqual(xfb.add_monitor.call_args_list,
+ [call(f) for f in xdata.keys() if f != self.path])
+ self.assertItemsEqual(mock_parse.call_args_list,
+ [call(f) for f in xdata.keys() if f != self.path])
+ self.assertItemsEqual(mock_exists.call_args_list,
+ [call(f) for f in xdata.keys() if f != self.path])
+
+ @patch("lxml.etree._ElementTree", FakeElementTree)
+ @patch("Bcfg2.Server.Plugin.%s._follow_xincludes" % test_obj.__name__)
+ def test_Index(self, mock_follow):
+ xfb = self.get_obj()
+
+ def reset():
+ mock_follow.reset_mock()
+ FakeElementTree.xinclude.reset_mock()
+ xfb.extras = []
+ xfb.xdata = None
+
+ # syntax error
+ xfb.data = "<"
+ self.assertRaises(PluginInitError, xfb.Index)
+
+ # no xinclude
+ reset()
+ xdata = lxml.etree.Element("Test", name="test")
+ children = [lxml.etree.SubElement(xdata, "Foo"),
+ lxml.etree.SubElement(xdata, "Bar", name="bar")]
+ xfb.data = tostring(xdata)
+ xfb.Index()
+ mock_follow.assert_any_call()
+ try:
+ self.assertEqual(xfb.xdata.base, self.path)
+ except AttributeError:
+ # python 2.4 and/or lxml 2.0 don't store the base_url in
+ # .base -- no idea where it's stored.
+ pass
+ self.assertItemsEqual([tostring(e) for e in xfb.entries],
+ [tostring(e) for e in children])
+
+ # with xincludes
+ reset()
+ mock_follow.side_effect = \
+ lambda: xfb.extras.extend(["/test/test2.xml",
+ "/test/test_dir/test3.xml"])
+ children.extend([
+ lxml.etree.SubElement(xdata,
+ Bcfg2.Server.XI_NAMESPACE + "include",
+ href="/test/test2.xml"),
+ lxml.etree.SubElement(xdata,
+ Bcfg2.Server.XI_NAMESPACE + "include",
+ href="/test/test_dir/test3.xml")])
+ test2 = lxml.etree.Element("Test", name="test2")
+ lxml.etree.SubElement(test2, "Baz")
+ test3 = lxml.etree.Element("Test", name="test3")
+ replacements = {"/test/test2.xml": test2,
+ "/test/test_dir/test3.xml": test3}
+ def xinclude():
+ for el in xfb.xdata.findall('//%sinclude' %
+ Bcfg2.Server.XI_NAMESPACE):
+ xfb.xdata.replace(el, replacements[el.get("href")])
+ FakeElementTree.xinclude.side_effect = xinclude
+
+ xfb.data = tostring(xdata)
+ xfb.Index()
+ mock_follow.assert_any_call()
+ FakeElementTree.xinclude.assert_any_call
+ try:
+ self.assertEqual(xfb.xdata.base, self.path)
+ except AttributeError:
+ pass
+ self.assertItemsEqual([tostring(e) for e in xfb.entries],
+ [tostring(e) for e in children])
+
+ def test_add_monitor(self):
+ xfb = self.get_obj()
+ xfb.add_monitor("/test/test2.xml")
+ self.assertIn("/test/test2.xml", xfb.extras)
+
+ fam = Mock()
+ xfb = self.get_obj(fam=fam)
+ fam.reset_mock()
+ xfb.add_monitor("/test/test3.xml")
+ self.assertFalse(fam.AddMonitor.called)
+ self.assertIn("/test/test3.xml", xfb.extras)
+
+ fam.reset_mock()
+ xfb = self.get_obj(fam=fam, should_monitor=True)
+ xfb.add_monitor("/test/test4.xml")
+ fam.AddMonitor.assert_called_with("/test/test4.xml", xfb)
+ self.assertIn("/test/test4.xml", xfb.extras)
+
+
+class TestStructFile(TestXMLFileBacked):
+ test_obj = StructFile
+
+ def _get_test_data(self):
+ """ build a very complex set of test data """
+ # top-level group and client elements
+ groups = dict()
+ # group and client elements that are descendents of other group or
+ # client elements
+ subgroups = dict()
+ # children of elements in `groups' that should be included in
+ # match results
+ children = dict()
+ # children of elements in `subgroups' that should be included in
+ # match results
+ subchildren = dict()
+ # top-level tags that are not group elements
+ standalone = []
+ xdata = lxml.etree.Element("Test", name="test")
+ groups[0] = lxml.etree.SubElement(xdata, "Group", name="group1",
+ include="true")
+ children[0] = [lxml.etree.SubElement(groups[0], "Child", name="c1"),
+ lxml.etree.SubElement(groups[0], "Child", name="c2")]
+ subgroups[0] = [lxml.etree.SubElement(groups[0], "Group",
+ name="subgroup1", include="true"),
+ lxml.etree.SubElement(groups[0],
+ "Client", name="client1",
+ include="false")]
+ subchildren[0] = \
+ [lxml.etree.SubElement(subgroups[0][0], "Child", name="sc1"),
+ lxml.etree.SubElement(subgroups[0][0], "Child", name="sc2",
+ attr="some attr"),
+ lxml.etree.SubElement(subgroups[0][0], "Child", name="sc3")]
+ lxml.etree.SubElement(subchildren[0][-1], "SubChild", name="subchild")
+ lxml.etree.SubElement(subgroups[0][1], "Child", name="sc4")
+
+ groups[1] = lxml.etree.SubElement(xdata, "Group", name="group2",
+ include="false")
+ children[1] = []
+ subgroups[1] = []
+ subchildren[1] = []
+ lxml.etree.SubElement(groups[1], "Child", name="c3")
+ lxml.etree.SubElement(groups[1], "Child", name="c4")
+
+ standalone.append(lxml.etree.SubElement(xdata, "Standalone", name="s1"))
+
+ groups[2] = lxml.etree.SubElement(xdata, "Client", name="client2",
+ include="false")
+ children[2] = []
+ subgroups[2] = []
+ subchildren[2] = []
+ lxml.etree.SubElement(groups[2], "Child", name="c5")
+ lxml.etree.SubElement(groups[2], "Child", name="c6")
+
+ standalone.append(lxml.etree.SubElement(xdata, "Standalone", name="s2",
+ attr="some attr"))
+
+ groups[3] = lxml.etree.SubElement(xdata, "Client", name="client3",
+ include="true")
+ children[3] = [lxml.etree.SubElement(groups[3], "Child", name="c7",
+ attr="some_attr"),
+ lxml.etree.SubElement(groups[3], "Child", name="c8")]
+ subgroups[3] = []
+ subchildren[3] = []
+ lxml.etree.SubElement(children[3][-1], "SubChild", name="subchild")
+
+ standalone.append(lxml.etree.SubElement(xdata, "Standalone", name="s3"))
+ lxml.etree.SubElement(standalone[-1], "SubStandalone", name="sub1")
+
+ children[4] = standalone
+ return (xdata, groups, subgroups, children, subchildren, standalone)
+
+ def test_include_element(self):
+ sf = self.get_obj()
+ metadata = Mock()
+ metadata.groups = ["group1", "group2"]
+ metadata.hostname = "foo.example.com"
+
+ inc = lambda tag, **attrs: \
+ sf._include_element(lxml.etree.Element(tag, **attrs), metadata)
+
+ self.assertFalse(sf._include_element(lxml.etree.Comment("test"),
+ metadata))
+
+ self.assertFalse(inc("Group", name="group3"))
+ self.assertFalse(inc("Group", name="group2", negate="true"))
+ self.assertFalse(inc("Group", name="group2", negate="tRuE"))
+ self.assertTrue(inc("Group", name="group2"))
+ self.assertTrue(inc("Group", name="group2", negate="false"))
+ self.assertTrue(inc("Group", name="group2", negate="faLSe"))
+ self.assertTrue(inc("Group", name="group3", negate="true"))
+ self.assertTrue(inc("Group", name="group3", negate="tRUe"))
+
+ self.assertFalse(inc("Client", name="bogus.example.com"))
+ self.assertFalse(inc("Client", name="foo.example.com", negate="true"))
+ self.assertFalse(inc("Client", name="foo.example.com", negate="tRuE"))
+ self.assertTrue(inc("Client", name="foo.example.com"))
+ self.assertTrue(inc("Client", name="foo.example.com", negate="false"))
+ self.assertTrue(inc("Client", name="foo.example.com", negate="faLSe"))
+ self.assertTrue(inc("Client", name="bogus.example.com", negate="true"))
+ self.assertTrue(inc("Client", name="bogus.example.com", negate="tRUe"))
+
+ self.assertTrue(inc("Other"))
+
+ @patch("Bcfg2.Server.Plugin.%s._include_element" % test_obj.__name__)
+ def test__match(self, mock_include):
+ sf = self.get_obj()
+ metadata = Mock()
+
+ (xdata, groups, subgroups, children, subchildren, standalone) = \
+ self._get_test_data()
+
+ mock_include.side_effect = \
+ lambda x, _: (x.tag not in ['Client', 'Group'] or
+ x.get("include") == "true")
+
+ for i, group in groups.items():
+ actual = sf._match(group, metadata)
+ expected = children[i] + subchildren[i]
+ self.assertEqual(len(actual), len(expected))
+ # easiest way to compare the values is actually to make
+ # them into an XML document and let assertXMLEqual compare
+ # them
+ xactual = lxml.etree.Element("Container")
+ xactual.extend(actual)
+ xexpected = lxml.etree.Element("Container")
+ xexpected.extend(expected)
+ self.assertXMLEqual(xactual, xexpected)
+
+ for el in standalone:
+ self.assertXMLEqual(el, sf._match(el, metadata)[0])
+
+ @patch("Bcfg2.Server.Plugin.%s._match" % test_obj.__name__)
+ def test_Match(self, mock_match):
+ sf = self.get_obj()
+ metadata = Mock()
+
+ (xdata, groups, subgroups, children, subchildren, standalone) = \
+ self._get_test_data()
+ sf.entries.extend(copy.deepcopy(xdata).getchildren())
+
+ def match_rv(el, _):
+ if el.tag not in ['Client', 'Group']:
+ return [el]
+ elif x.get("include") == "true":
+ return el.getchildren()
+ else:
+ return []
+ mock_match.side_effect = match_rv
+ actual = sf.Match(metadata)
+ expected = reduce(lambda x, y: x + y,
+ list(children.values()) + list(subgroups.values()))
+ self.assertEqual(len(actual), len(expected))
+ # easiest way to compare the values is actually to make
+ # them into an XML document and let assertXMLEqual compare
+ # them
+ xactual = lxml.etree.Element("Container")
+ xactual.extend(actual)
+ xexpected = lxml.etree.Element("Container")
+ xexpected.extend(expected)
+ self.assertXMLEqual(xactual, xexpected)
+
+ @patch("Bcfg2.Server.Plugin.%s._include_element" % test_obj.__name__)
+ def test__xml_match(self, mock_include):
+ sf = self.get_obj()
+ metadata = Mock()
+
+ (xdata, groups, subgroups, children, subchildren, standalone) = \
+ self._get_test_data()
+
+ mock_include.side_effect = \
+ lambda x, _: (x.tag not in ['Client', 'Group'] or
+ x.get("include") == "true")
+
+ actual = copy.deepcopy(xdata)
+ for el in actual.getchildren():
+ sf._xml_match(el, metadata)
+ expected = lxml.etree.Element(xdata.tag, **dict(xdata.attrib))
+ expected.text = xdata.text
+ expected.extend(reduce(lambda x, y: x + y,
+ list(children.values()) + list(subchildren.values())))
+ expected.extend(standalone)
+ self.assertXMLEqual(actual, expected)
+
+ @patch("Bcfg2.Server.Plugin.%s._xml_match" % test_obj.__name__)
+ def test_Match(self, mock_xml_match):
+ sf = self.get_obj()
+ metadata = Mock()
+
+ (sf.xdata, groups, subgroups, children, subchildren, standalone) = \
+ self._get_test_data()
+
+ sf.XMLMatch(metadata)
+ actual = []
+ for call in mock_xml_match.call_args_list:
+ actual.append(call[0][0])
+ self.assertEqual(call[0][1], metadata)
+ expected = list(groups.values()) + standalone
+ # easiest way to compare the values is actually to make
+ # them into an XML document and let assertXMLEqual compare
+ # them
+ xactual = lxml.etree.Element("Container")
+ xactual.extend(actual)
+ xexpected = lxml.etree.Element("Container")
+ xexpected.extend(expected)
+ self.assertXMLEqual(xactual, xexpected)
+
+
+class TestINode(Bcfg2TestCase):
+ test_obj = INode
+
+ # INode.__init__ and INode._load_children() call each other
+ # recursively, which makes this class kind of a nightmare to test.
+ # we have to first patch INode._load_children so that we can
+ # create an INode object with no children loaded, then we unpatch
+ # INode._load_children and patch INode.__init__ so that child
+ # objects aren't actually created. but in order to test things
+ # atomically, we do this umpteen times in order to test with
+ # different data. this convenience method makes this a little
+ # easier. fun fun fun.
+ @patch("Bcfg2.Server.Plugin.%s._load_children" % test_obj.__name__, Mock())
+ def _get_inode(self, data, idict):
+ return self.test_obj(data, idict)
+
+ def test_raw_predicates(self):
+ metadata = Mock()
+ metadata.groups = ["group1", "group2"]
+ metadata.hostname = "foo.example.com"
+ entry = None
+
+ parent_predicate = lambda m, e: True
+ pred = eval(self.test_obj.raw['Client'] % dict(name="foo.example.com"),
+ dict(predicate=parent_predicate))
+ self.assertTrue(pred(metadata, entry))
+ pred = eval(self.test_obj.raw['Client'] % dict(name="bar.example.com"),
+ dict(predicate=parent_predicate))
+ self.assertFalse(pred(metadata, entry))
+
+ pred = eval(self.test_obj.raw['Group'] % dict(name="group1"),
+ dict(predicate=parent_predicate))
+ self.assertTrue(pred(metadata, entry))
+ pred = eval(self.test_obj.raw['Group'] % dict(name="group3"),
+ dict(predicate=parent_predicate))
+ self.assertFalse(pred(metadata, entry))
+
+ pred = eval(self.test_obj.nraw['Client'] % dict(name="foo.example.com"),
+ dict(predicate=parent_predicate))
+ self.assertFalse(pred(metadata, entry))
+ pred = eval(self.test_obj.nraw['Client'] % dict(name="bar.example.com"),
+ dict(predicate=parent_predicate))
+ self.assertTrue(pred(metadata, entry))
+
+ pred = eval(self.test_obj.nraw['Group'] % dict(name="group1"),
+ dict(predicate=parent_predicate))
+ self.assertFalse(pred(metadata, entry))
+ pred = eval(self.test_obj.nraw['Group'] % dict(name="group3"),
+ dict(predicate=parent_predicate))
+ self.assertTrue(pred(metadata, entry))
+
+ parent_predicate = lambda m, e: False
+ pred = eval(self.test_obj.raw['Client'] % dict(name="foo.example.com"),
+ dict(predicate=parent_predicate))
+ self.assertFalse(pred(metadata, entry))
+ pred = eval(self.test_obj.raw['Group'] % dict(name="group1"),
+ dict(predicate=parent_predicate))
+ self.assertFalse(pred(metadata, entry))
+ pred = eval(self.test_obj.nraw['Client'] % dict(name="bar.example.com"),
+ dict(predicate=parent_predicate))
+ self.assertFalse(pred(metadata, entry))
+ pred = eval(self.test_obj.nraw['Group'] % dict(name="group3"),
+ dict(predicate=parent_predicate))
+ self.assertFalse(pred(metadata, entry))
+
+ self.assertItemsEqual(self.test_obj.containers,
+ self.test_obj.raw.keys())
+ self.assertItemsEqual(self.test_obj.containers,
+ self.test_obj.nraw.keys())
+
+ @patch("Bcfg2.Server.Plugin.INode._load_children")
+ def test__init(self, mock_load_children):
+ data = lxml.etree.Element("Bogus")
+ # called with no parent, should not raise an exception; it's a
+ # top-level tag in an XML file and so is not expected to be a
+ # proper predicate
+ INode(data, dict())
+ self.assertRaises(PluginExecutionError,
+ INode, data, dict(), Mock())
+
+ data = lxml.etree.Element("Client", name="foo.example.com")
+ idict = dict()
+ inode = INode(data, idict)
+ mock_load_children.assert_called_with(data, idict)
+ self.assertTrue(inode.predicate(Mock(), Mock()))
+
+ parent = Mock()
+ parent.predicate = lambda m, e: True
+ metadata = Mock()
+ metadata.groups = ["group1", "group2"]
+ metadata.hostname = "foo.example.com"
+ entry = None
+
+ # test setting predicate with parent object
+ mock_load_children.reset_mock()
+ inode = INode(data, idict, parent=parent)
+ mock_load_children.assert_called_with(data, idict)
+ self.assertTrue(inode.predicate(metadata, entry))
+
+ # test negation
+ data = lxml.etree.Element("Client", name="foo.example.com",
+ negate="true")
+ mock_load_children.reset_mock()
+ inode = INode(data, idict, parent=parent)
+ mock_load_children.assert_called_with(data, idict)
+ self.assertFalse(inode.predicate(metadata, entry))
+
+ # test failure of a matching predicate (client names do not match)
+ data = lxml.etree.Element("Client", name="foo.example.com")
+ metadata.hostname = "bar.example.com"
+ mock_load_children.reset_mock()
+ inode = INode(data, idict, parent=parent)
+ mock_load_children.assert_called_with(data, idict)
+ self.assertFalse(inode.predicate(metadata, entry))
+
+ # test that parent predicate is AND'ed in correctly
+ parent.predicate = lambda m, e: False
+ metadata.hostname = "foo.example.com"
+ mock_load_children.reset_mock()
+ inode = INode(data, idict, parent=parent)
+ mock_load_children.assert_called_with(data, idict)
+ self.assertFalse(inode.predicate(metadata, entry))
+
+ def test_load_children(self):
+ data = lxml.etree.Element("Parent")
+ child1 = lxml.etree.SubElement(data, "Client", name="foo.example.com")
+ child2 = lxml.etree.SubElement(data, "Group", name="bar", negate="true")
+ idict = dict()
+
+ inode = self._get_inode(data, idict)
+
+ @patch("Bcfg2.Server.Plugin.%s.__init__" % inode.__class__.__name__)
+ def inner(mock_init):
+ mock_init.return_value = None
+ inode._load_children(data, idict)
+ self.assertItemsEqual(mock_init.call_args_list,
+ [call(child1, idict, inode),
+ call(child2, idict, inode)])
+ self.assertEqual(idict, dict())
+ self.assertItemsEqual(inode.contents, dict())
+
+ inner()
+
+ data = lxml.etree.Element("Parent")
+ child1 = lxml.etree.SubElement(data, "Data", name="child1",
+ attr="some attr")
+ child1.text = "text"
+ subchild1 = lxml.etree.SubElement(child1, "SubChild", name="subchild")
+ child2 = lxml.etree.SubElement(data, "Group", name="bar", negate="true")
+ idict = dict()
+
+ inode = self._get_inode(data, idict)
+
+ @patch("Bcfg2.Server.Plugin.%s.__init__" % inode.__class__.__name__)
+ def inner2(mock_init):
+ mock_init.return_value = None
+ inode._load_children(data, idict)
+ mock_init.assert_called_with(child2, idict, inode)
+ tag = child1.tag
+ name = child1.get("name")
+ self.assertEqual(idict, dict(Data=[name]))
+ self.assertIn(tag, inode.contents)
+ self.assertIn(name, inode.contents[tag])
+ self.assertItemsEqual(inode.contents[tag][name],
+ dict(name=name,
+ attr=child1.get('attr'),
+ __text__=child1.text,
+ __children__=[subchild1]))
+
+ inner2()
+
+ # test ignore. no ignore is set on INode by default, so we
+ # have to set one
+ old_ignore = copy.copy(self.test_obj.ignore)
+ self.test_obj.ignore.append("Data")
+ idict = dict()
+
+ inode = self._get_inode(data, idict)
+
+ @patch("Bcfg2.Server.Plugin.%s.__init__" % inode.__class__.__name__)
+ def inner3(mock_init):
+ mock_init.return_value = None
+ inode._load_children(data, idict)
+ mock_init.assert_called_with(child2, idict, inode)
+ self.assertEqual(idict, dict())
+ self.assertItemsEqual(inode.contents, dict())
+
+ inner3()
+ self.test_obj.ignore = old_ignore
+
+ def test_Match(self):
+ idata = lxml.etree.Element("Parent")
+ contents = lxml.etree.SubElement(idata, "Data", name="contents",
+ attr="some attr")
+ child = lxml.etree.SubElement(idata, "Group", name="bar", negate="true")
+
+ inode = INode(idata, dict())
+ inode.predicate = Mock()
+ inode.predicate.return_value = False
+
+ metadata = Mock()
+ metadata.groups = ['foo']
+ data = dict()
+ entry = child
+
+ inode.Match(metadata, data, entry=child)
+ self.assertEqual(data, dict())
+ inode.predicate.assert_called_with(metadata, child)
+
+ inode.predicate.reset_mock()
+ inode.Match(metadata, data)
+ self.assertEqual(data, dict())
+ # can't easily compare XML args without the original
+ # object, and we're testing that Match() works without an
+ # XML object passed in, so...
+ self.assertEqual(inode.predicate.call_args[0][0],
+ metadata)
+ self.assertXMLEqual(inode.predicate.call_args[0][1],
+ lxml.etree.Element("None"))
+
+ inode.predicate.reset_mock()
+ inode.predicate.return_value = True
+ inode.Match(metadata, data, entry=child)
+ self.assertEqual(data, inode.contents)
+ inode.predicate.assert_called_with(metadata, child)
+
+
+class TestInfoNode(TestINode):
+ __test__ = True
+ test_obj = InfoNode
+
+ def test_raw_predicates(self):
+ TestINode.test_raw_predicates(self)
+ metadata = Mock()
+ entry = lxml.etree.Element("Path", name="/tmp/foo",
+ realname="/tmp/bar")
+
+ parent_predicate = lambda m, d: True
+ pred = eval(self.test_obj.raw['Path'] % dict(name="/tmp/foo"),
+ dict(predicate=parent_predicate))
+ self.assertTrue(pred(metadata, entry))
+ pred = eval(InfoNode.raw['Path'] % dict(name="/tmp/bar"),
+ dict(predicate=parent_predicate))
+ self.assertTrue(pred(metadata, entry))
+ pred = eval(InfoNode.raw['Path'] % dict(name="/tmp/bogus"),
+ dict(predicate=parent_predicate))
+ self.assertFalse(pred(metadata, entry))
+
+ pred = eval(self.test_obj.nraw['Path'] % dict(name="/tmp/foo"),
+ dict(predicate=parent_predicate))
+ self.assertFalse(pred(metadata, entry))
+ pred = eval(InfoNode.nraw['Path'] % dict(name="/tmp/bar"),
+ dict(predicate=parent_predicate))
+ self.assertFalse(pred(metadata, entry))
+ pred = eval(InfoNode.nraw['Path'] % dict(name="/tmp/bogus"),
+ dict(predicate=parent_predicate))
+ self.assertTrue(pred(metadata, entry))
+
+ parent_predicate = lambda m, d: False
+ pred = eval(self.test_obj.raw['Path'] % dict(name="/tmp/foo"),
+ dict(predicate=parent_predicate))
+ self.assertFalse(pred(metadata, entry))
+ pred = eval(InfoNode.raw['Path'] % dict(name="/tmp/bar"),
+ dict(predicate=parent_predicate))
+ self.assertFalse(pred(metadata, entry))
+ pred = eval(InfoNode.nraw['Path'] % dict(name="/tmp/bogus"),
+ dict(predicate=parent_predicate))
+ self.assertFalse(pred(metadata, entry))
+
+
+class TestXMLSrc(TestXMLFileBacked):
+ test_obj = XMLSrc
+
+ def test_node_interface(self):
+ # ensure that the node object has the necessary interface
+ self.assertTrue(hasattr(self.test_obj.__node__, "Match"))
+
+ @patch("%s.open" % builtins)
+ def test_HandleEvent(self, mock_open):
+ xdata = lxml.etree.Element("Test")
+ lxml.etree.SubElement(xdata, "Path", name="path", attr="whatever")
+
+ xsrc = self.get_obj("/test/foo.xml")
+ xsrc.__node__ = Mock()
+ mock_open.return_value.read.return_value = tostring(xdata)
+
+ if xsrc.__priority_required__:
+ # test with no priority at all
+ self.assertRaises(PluginExecutionError,
+ xsrc.HandleEvent, Mock())
+
+ # test with bogus priority
+ xdata.set("priority", "cow")
+ mock_open.return_value.read.return_value = tostring(xdata)
+ self.assertRaises(PluginExecutionError,
+ xsrc.HandleEvent, Mock())
+
+ # assign a priority to use in future tests
+ xdata.set("priority", "10")
+ mock_open.return_value.read.return_value = tostring(xdata)
+
+ mock_open.reset_mock()
+ xsrc = self.get_obj("/test/foo.xml")
+ xsrc.__node__ = Mock()
+ xsrc.HandleEvent(Mock())
+ mock_open.assert_called_with("/test/foo.xml")
+ mock_open.return_value.read.assert_any_call()
+ self.assertXMLEqual(xsrc.__node__.call_args[0][0], xdata)
+ self.assertEqual(xsrc.__node__.call_args[0][1], dict())
+ self.assertEqual(xsrc.pnode, xsrc.__node__.return_value)
+ self.assertEqual(xsrc.cache, None)
+
+ @patch("Bcfg2.Server.Plugin.XMLSrc.HandleEvent")
+ def test_Cache(self, mock_HandleEvent):
+ xsrc = self.get_obj("/test/foo.xml")
+ metadata = Mock()
+ xsrc.Cache(metadata)
+ mock_HandleEvent.assert_any_call()
+
+ xsrc.pnode = Mock()
+ xsrc.Cache(metadata)
+ xsrc.pnode.Match.assert_called_with(metadata, xsrc.__cacheobj__())
+ self.assertEqual(xsrc.cache[0], metadata)
+
+ xsrc.pnode.reset_mock()
+ xsrc.Cache(metadata)
+ self.assertFalse(xsrc.pnode.Mock.called)
+ self.assertEqual(xsrc.cache[0], metadata)
+
+ xsrc.cache = ("bogus")
+ xsrc.Cache(metadata)
+ xsrc.pnode.Match.assert_called_with(metadata, xsrc.__cacheobj__())
+ self.assertEqual(xsrc.cache[0], metadata)
+
+
+class TestInfoXML(TestXMLSrc):
+ test_obj = InfoXML
+
+
+class TestXMLDirectoryBacked(TestDirectoryBacked):
+ test_obj = XMLDirectoryBacked
+ testfiles = ['foo.xml', 'bar/baz.xml', 'plugh.plugh.xml']
+ badpaths = ["foo", "foo.txt", "foo.xsd", "xml"]
+
+
+class TestPrioDir(TestPlugin, TestGenerator, TestXMLDirectoryBacked):
+ test_obj = PrioDir
+
+ @patch("Bcfg2.Server.Plugin.%s.add_directory_monitor" % test_obj.__name__,
+ Mock())
+ def get_obj(self, core=None):
+ if core is None:
+ core = Mock()
+ return self.test_obj(core, datastore)
+
+ def test_HandleEvent(self):
+ TestXMLDirectoryBacked.test_HandleEvent(self)
+
+ @patch("Bcfg2.Server.Plugin.XMLDirectoryBacked.HandleEvent", Mock())
+ def inner():
+ pd = self.get_obj()
+ test1 = Mock()
+ test1.items = dict(Path=["/etc/foo.conf", "/etc/bar.conf"])
+ test2 = Mock()
+ test2.items = dict(Path=["/etc/baz.conf"],
+ Package=["quux", "xyzzy"])
+ pd.entries = {"/test1.xml": test1,
+ "/test2.xml": test2}
+ pd.HandleEvent(Mock())
+ self.assertItemsEqual(pd.Entries,
+ dict(Path={"/etc/foo.conf": pd.BindEntry,
+ "/etc/bar.conf": pd.BindEntry,
+ "/etc/baz.conf": pd.BindEntry},
+ Package={"quux": pd.BindEntry,
+ "xyzzy": pd.BindEntry}))
+
+ inner()
+
+ def test__matches(self):
+ pd = self.get_obj()
+ self.assertTrue(pd._matches(lxml.etree.Element("Test",
+ name="/etc/foo.conf"),
+ Mock(),
+ {"/etc/foo.conf": pd.BindEntry,
+ "/etc/bar.conf": pd.BindEntry}))
+ self.assertFalse(pd._matches(lxml.etree.Element("Test",
+ name="/etc/baz.conf"),
+ Mock(),
+ {"/etc/foo.conf": pd.BindEntry,
+ "/etc/bar.conf": pd.BindEntry}))
+
+ def test_BindEntry(self):
+ pd = self.get_obj()
+ pd.get_attrs = Mock(return_value=dict(test1="test1", test2="test2"))
+ entry = lxml.etree.Element("Path", name="/etc/foo.conf", test1="bogus")
+ metadata = Mock()
+ pd.BindEntry(entry, metadata)
+ pd.get_attrs.assert_called_with(entry, metadata)
+ self.assertItemsEqual(entry.attrib,
+ dict(name="/etc/foo.conf",
+ test1="test1", test2="test2"))
+
+ def test_get_attrs(self):
+ pd = self.get_obj()
+ entry = lxml.etree.Element("Path", name="/etc/foo.conf")
+ children = [lxml.etree.Element("Child")]
+ metadata = Mock()
+ pd.entries = dict()
+
+ def reset():
+ metadata.reset_mock()
+ for src in pd.entries.values():
+ src.reset_mock()
+ src.cache = None
+
+ # test with no matches
+ self.assertRaises(PluginExecutionError,
+ pd.get_attrs, entry, metadata)
+
+ def add_entry(name, data, prio=10):
+ path = os.path.join(pd.data, name)
+ pd.entries[path] = Mock()
+ pd.entries[path].priority = prio
+ def do_Cache(metadata):
+ pd.entries[path].cache = (metadata, data)
+ pd.entries[path].Cache.side_effect = do_Cache
+
+ add_entry('test1.xml',
+ dict(Path={'/etc/foo.conf': dict(attr="attr1",
+ __children__=children),
+ '/etc/bar.conf': dict()}))
+ add_entry('test2.xml',
+ dict(Path={'/etc/bar.conf': dict(__text__="text",
+ attr="attr1")},
+ Package={'quux': dict(),
+ 'xyzzy': dict()}),
+ prio=20)
+ add_entry('test3.xml',
+ dict(Path={'/etc/baz.conf': dict()},
+ Package={'xyzzy': dict()}),
+ prio=20)
+
+ # test with exactly one match, __children__
+ reset()
+ self.assertItemsEqual(pd.get_attrs(entry, metadata),
+ dict(attr="attr1"))
+ for src in pd.entries.values():
+ src.Cache.assert_called_with(metadata)
+ self.assertEqual(len(entry.getchildren()), 1)
+ self.assertXMLEqual(entry.getchildren()[0], children[0])
+
+ # test with multiple matches with different priorities, __text__
+ reset()
+ entry = lxml.etree.Element("Path", name="/etc/bar.conf")
+ self.assertItemsEqual(pd.get_attrs(entry, metadata),
+ dict(attr="attr1"))
+ for src in pd.entries.values():
+ src.Cache.assert_called_with(metadata)
+ self.assertEqual(entry.text, "text")
+
+ # test with multiple matches with identical priorities
+ reset()
+ entry = lxml.etree.Element("Package", name="xyzzy")
+ self.assertRaises(PluginExecutionError,
+ pd.get_attrs, entry, metadata)
+
+
+class TestSpecificityError(Bcfg2TestCase):
+ """ placeholder for future tests """
+ pass
+
+
+class TestSpecificity(Bcfg2TestCase):
+ test_obj = Specificity
+
+ def get_obj(self, **kwargs):
+ return self.test_obj(**kwargs)
+
+ def test_matches(self):
+ metadata = Mock()
+ metadata.hostname = "foo.example.com"
+ metadata.groups = ["group1", "group2"]
+ self.assertTrue(self.get_obj(all=True).matches(metadata))
+ self.assertTrue(self.get_obj(group="group1").matches(metadata))
+ self.assertTrue(self.get_obj(hostname="foo.example.com").matches(metadata))
+ self.assertFalse(self.get_obj().matches(metadata))
+ self.assertFalse(self.get_obj(group="group3").matches(metadata))
+ self.assertFalse(self.get_obj(hostname="bar.example.com").matches(metadata))
+
+ def test__cmp(self):
+ specs = [self.get_obj(all=True),
+ self.get_obj(group="group1", prio=10),
+ self.get_obj(group="group1", prio=20),
+ self.get_obj(hostname="foo.example.com")]
+
+ for i in range(len(specs)):
+ for j in range(len(specs)):
+ if i == j:
+ self.assertEqual(0, specs[i].__cmp__(specs[j]))
+ self.assertEqual(0, specs[j].__cmp__(specs[i]))
+ elif i > j:
+ self.assertEqual(-1, specs[i].__cmp__(specs[j]))
+ self.assertEqual(1, specs[j].__cmp__(specs[i]))
+ elif i < j:
+ self.assertEqual(1, specs[i].__cmp__(specs[j]))
+ self.assertEqual(-1, specs[j].__cmp__(specs[i]))
+
+ def test_cmp(self):
+ """ test __lt__/__gt__/__eq__ """
+ specs = [self.get_obj(all=True),
+ self.get_obj(group="group1", prio=10),
+ self.get_obj(group="group1", prio=20),
+ self.get_obj(hostname="foo.example.com")]
+
+ for i in range(len(specs)):
+ for j in range(len(specs)):
+ if i < j:
+ self.assertGreater(specs[i], specs[j])
+ self.assertLess(specs[j], specs[i])
+ self.assertGreaterEqual(specs[i], specs[j])
+ self.assertLessEqual(specs[j], specs[i])
+ elif i == j:
+ self.assertEqual(specs[i], specs[j])
+ self.assertEqual(specs[j], specs[i])
+ self.assertLessEqual(specs[i], specs[j])
+ self.assertGreaterEqual(specs[j], specs[i])
+ elif i > j:
+ self.assertLess(specs[i], specs[j])
+ self.assertGreater(specs[j], specs[i])
+ self.assertLessEqual(specs[i], specs[j])
+ self.assertGreaterEqual(specs[j], specs[i])
+
+
+class TestSpecificData(Bcfg2TestCase):
+ test_obj = SpecificData
+ path = os.path.join(datastore, "test.txt")
+
+ def get_obj(self, name=None, specific=None, encoding=None):
+ if name is None:
+ name = self.path
+ if specific is None:
+ specific = Mock()
+ return self.test_obj(name, specific, encoding)
+
+ @patch("%s.open" % builtins)
+ def test_handle_event(self, mock_open):
+ event = Mock()
+ event.code2str.return_value = 'deleted'
+ sd = self.get_obj()
+ sd.handle_event(event)
+ self.assertFalse(mock_open.called)
+ if hasattr(sd, 'data'):
+ self.assertIsNone(sd.data)
+ else:
+ self.assertFalse(hasattr(sd, 'data'))
+
+ event = Mock()
+ mock_open.return_value.read.return_value = "test"
+ sd.handle_event(event)
+ mock_open.assert_called_with(self.path)
+ mock_open.return_value.read.assert_any_call()
+ self.assertEqual(sd.data, "test")
+
+
+class TestEntrySet(TestDebuggable):
+ test_obj = EntrySet
+ # filenames that should be matched successfully by the EntrySet
+ # 'specific' regex. these are filenames alone -- a specificity
+ # will be added to these
+ basenames = ["test", "test.py", "test with spaces.txt",
+ "test.multiple.dots.py", "test_underscores.and.dots",
+ "really_misleading.G10_test",
+ "name$with*regex(special){chars}",
+ "misleading.H_hostname.test.com"]
+ # filenames that do not match any of the basenames (or the
+ # basename regex, if applicable)
+ bogus_names = ["bogus"]
+ # filenames that should be ignored
+ ignore = ["foo~", ".#foo", ".foo.swp", ".foo.swx",
+ "test.txt.genshi_include", "test.G_foo.genshi_include"]
+
+
+ def get_obj(self, basename="test", path=datastore, entry_type=MagicMock(),
+ encoding=None):
+ return self.test_obj(basename, path, entry_type, encoding)
+
+ def test__init(self):
+ for basename in self.basenames:
+ eset = self.get_obj(basename=basename)
+ self.assertIsInstance(eset.specific, re_type)
+ self.assertTrue(eset.specific.match(os.path.join(datastore,
+ basename)))
+ ppath = os.path.join(datastore, "Plugin", basename)
+ self.assertTrue(eset.specific.match(ppath))
+ self.assertTrue(eset.specific.match(ppath + ".G20_foo"))
+ self.assertTrue(eset.specific.match(ppath + ".G1_foo"))
+ self.assertTrue(eset.specific.match(ppath + ".G32768_foo"))
+ # a group named '_'
+ self.assertTrue(eset.specific.match(ppath + ".G10__"))
+ self.assertTrue(eset.specific.match(ppath + ".H_hostname"))
+ self.assertTrue(eset.specific.match(ppath + ".H_fqdn.subdomain.example.com"))
+ self.assertTrue(eset.specific.match(ppath + ".G20_group_with_underscores"))
+
+ self.assertFalse(eset.specific.match(ppath + ".G20_group with spaces"))
+ self.assertFalse(eset.specific.match(ppath + ".G_foo"))
+ self.assertFalse(eset.specific.match(ppath + ".G_"))
+ self.assertFalse(eset.specific.match(ppath + ".G20_"))
+ self.assertFalse(eset.specific.match(ppath + ".H_"))
+
+ for bogus in self.bogus_names:
+ self.assertFalse(eset.specific.match(os.path.join(datastore,
+ "Plugin",
+ bogus)))
+
+ for ignore in self.ignore:
+ self.assertTrue(eset.ignore.match(ignore))
+
+ self.assertFalse(eset.ignore.match(basename))
+ self.assertFalse(eset.ignore.match(basename + ".G20_foo"))
+ self.assertFalse(eset.ignore.match(basename + ".G1_foo"))
+ self.assertFalse(eset.ignore.match(basename + ".G32768_foo"))
+ self.assertFalse(eset.ignore.match(basename + ".G10__"))
+ self.assertFalse(eset.ignore.match(basename + ".H_hostname"))
+ self.assertFalse(eset.ignore.match(basename + ".H_fqdn.subdomain.example.com"))
+ self.assertFalse(eset.ignore.match(basename + ".G20_group_with_underscores"))
+
+ def test_get_matching(self):
+ items = {0: Mock(), 1: Mock(), 2: Mock(), 3: Mock(), 4: Mock(),
+ 5: Mock()}
+ items[0].specific.matches.return_value = False
+ items[1].specific.matches.return_value = True
+ items[2].specific.matches.return_value = False
+ items[3].specific.matches.return_value = False
+ items[4].specific.matches.return_value = True
+ items[5].specific.matches.return_value = True
+ metadata = Mock()
+ eset = self.get_obj()
+ eset.entries = items
+ self.assertItemsEqual(eset.get_matching(metadata),
+ [items[1], items[4], items[5]])
+ for i in items.values():
+ i.specific.matches.assert_called_with(metadata)
+
+ @patch("Bcfg2.Server.Plugin.%s.get_matching" % test_obj.__name__)
+ def test_best_matching(self, mock_get_matching):
+ eset = self.get_obj()
+ metadata = Mock()
+ matching = []
+
+ def reset():
+ mock_get_matching.reset_mock()
+ metadata.reset_mock()
+ for m in matching:
+ m.reset_mock()
+
+ def specific(all=False, group=False, prio=None, hostname=False):
+ spec = Mock()
+ spec.specific = Specificity(all=all, group=group, prio=prio,
+ hostname=hostname)
+ return spec
+
+ self.assertRaises(PluginExecutionError,
+ eset.best_matching, metadata, matching=[])
+
+ reset()
+ mock_get_matching.return_value = matching
+ self.assertRaises(PluginExecutionError,
+ eset.best_matching, metadata)
+ mock_get_matching.assert_called_with(metadata)
+
+ # test with a single file for all
+ reset()
+ expected = specific(all=True)
+ matching.append(expected)
+ mock_get_matching.return_value = matching
+ self.assertEqual(eset.best_matching(metadata), expected)
+ mock_get_matching.assert_called_with(metadata)
+
+ # test with a single group-specific file
+ reset()
+ expected = specific(group=True, prio=10)
+ matching.append(expected)
+ mock_get_matching.return_value = matching
+ self.assertEqual(eset.best_matching(metadata), expected)
+ mock_get_matching.assert_called_with(metadata)
+
+ # test with multiple group-specific files
+ reset()
+ expected = specific(group=True, prio=20)
+ matching.append(expected)
+ mock_get_matching.return_value = matching
+ self.assertEqual(eset.best_matching(metadata), expected)
+ mock_get_matching.assert_called_with(metadata)
+
+ # test with host-specific file
+ reset()
+ expected = specific(hostname=True)
+ matching.append(expected)
+ mock_get_matching.return_value = matching
+ self.assertEqual(eset.best_matching(metadata), expected)
+ mock_get_matching.assert_called_with(metadata)
+
+ @patch("Bcfg2.Server.Plugin.%s.entry_init" % test_obj.__name__)
+ @patch("Bcfg2.Server.Plugin.%s.reset_metadata" % test_obj.__name__)
+ @patch("Bcfg2.Server.Plugin.%s.update_metadata" % test_obj.__name__)
+ def test_handle_event(self, mock_update_md, mock_reset_md, mock_init):
+ def reset():
+ mock_update_md.reset_mock()
+ mock_reset_md.reset_mock()
+ mock_init.reset_mock()
+
+ eset = self.get_obj()
+ for fname in ["info", "info.xml", ":info"]:
+ for evt in ["exists", "created", "changed"]:
+ reset()
+ event = Mock()
+ event.code2str.return_value = evt
+ event.filename = fname
+ eset.handle_event(event)
+ mock_update_md.assert_called_with(event)
+ self.assertFalse(mock_init.called)
+ self.assertFalse(mock_reset_md.called)
+
+ reset()
+ event = Mock()
+ event.code2str.return_value = "deleted"
+ event.filename = fname
+ eset.handle_event(event)
+ mock_reset_md.assert_called_with(event)
+ self.assertFalse(mock_init.called)
+ self.assertFalse(mock_update_md.called)
+
+ for evt in ["exists", "created", "changed"]:
+ reset()
+ event = Mock()
+ event.code2str.return_value = evt
+ event.filename = "test.txt"
+ eset.handle_event(event)
+ mock_init.assert_called_with(event)
+ self.assertFalse(mock_reset_md.called)
+ self.assertFalse(mock_update_md.called)
+
+ reset()
+ entry = Mock()
+ eset.entries["test.txt"] = entry
+ event = Mock()
+ event.code2str.return_value = "changed"
+ event.filename = "test.txt"
+ eset.handle_event(event)
+ entry.handle_event.assert_called_with(event)
+ self.assertFalse(mock_init.called)
+ self.assertFalse(mock_reset_md.called)
+ self.assertFalse(mock_update_md.called)
+
+ reset()
+ entry = Mock()
+ eset.entries["test.txt"] = entry
+ event = Mock()
+ event.code2str.return_value = "deleted"
+ event.filename = "test.txt"
+ eset.handle_event(event)
+ self.assertNotIn("test.txt", eset.entries)
+
+ @patch("Bcfg2.Server.Plugin.%s.specificity_from_filename" %
+ test_obj.__name__)
+ def test_entry_init(self, mock_spec):
+ eset = self.get_obj()
+
+ def reset():
+ eset.entry_type.reset_mock()
+ mock_spec.reset_mock()
+
+ event = Mock()
+ event.code2str.return_value = "created"
+ event.filename = "test.txt"
+ eset.entry_init(event)
+ mock_spec.assert_called_with("test.txt", specific=None)
+ eset.entry_type.assert_called_with(os.path.join(eset.path, "test.txt"),
+ mock_spec.return_value, None)
+ eset.entry_type.return_value.handle_event.assert_called_with(event)
+ self.assertIn("test.txt", eset.entries)
+
+ # test duplicate add
+ reset()
+ eset.entry_init(event)
+ self.assertFalse(mock_spec.called)
+ self.assertFalse(eset.entry_type.called)
+ eset.entries["test.txt"].handle_event.assert_called_with(event)
+
+ # test keyword args
+ etype = Mock()
+ specific = Mock()
+ event = Mock()
+ event.code2str.return_value = "created"
+ event.filename = "test2.txt"
+ eset.entry_init(event, entry_type=etype, specific=specific)
+ mock_spec.assert_called_with("test2.txt", specific=specific)
+ etype.assert_called_with(os.path.join(eset.path, "test2.txt"),
+ mock_spec.return_value, None)
+ etype.return_value.handle_event.assert_called_with(event)
+ self.assertIn("test2.txt", eset.entries)
+
+ # test specificity error
+ event = Mock()
+ event.code2str.return_value = "created"
+ event.filename = "test3.txt"
+ mock_spec.side_effect = SpecificityError
+ eset.entry_init(event)
+ mock_spec.assert_called_with("test3.txt", specific=None)
+ self.assertFalse(eset.entry_type.called)
+
+ @patch("Bcfg2.Server.Plugin.Specificity")
+ def test_specificity_from_filename(self, mock_spec):
+ def test(eset, fname, **kwargs):
+ mock_spec.reset_mock()
+ if "specific" in kwargs:
+ specific = kwargs['specific']
+ del kwargs['specific']
+ else:
+ specific = None
+ self.assertEqual(eset.specificity_from_filename(fname,
+ specific=specific),
+ mock_spec.return_value)
+ mock_spec.assert_called_with(**kwargs)
+
+ def fails(eset, fname, specific=None):
+ mock_spec.reset_mock()
+ self.assertRaises(SpecificityError,
+ eset.specificity_from_filename, fname,
+ specific=specific)
+
+ for basename in self.basenames:
+ eset = self.get_obj(basename=basename)
+ ppath = os.path.join(datastore, "Plugin", basename)
+ test(eset, ppath, all=True)
+ test(eset, ppath + ".G20_foo", group="foo", prio=20)
+ test(eset, ppath + ".G1_foo", group="foo", prio=1)
+ test(eset, ppath + ".G32768_foo", group="foo", prio=32768)
+ test(eset, ppath + ".G10__", group="_", prio=10)
+ test(eset, ppath + ".H_hostname", hostname="hostname")
+ test(eset, ppath + ".H_fqdn.subdomain.example.com",
+ hostname="fqdn.subdomain.example.com")
+ test(eset, ppath + ".G20_group_with_underscores",
+ group="group_with_underscores", prio=20)
+
+ for bogus in self.bogus_names:
+ fails(eset, bogus)
+ fails(eset, ppath + ".G_group with spaces")
+ fails(eset, ppath + ".G_foo")
+ fails(eset, ppath + ".G_")
+ fails(eset, ppath + ".G20_")
+ fails(eset, ppath + ".H_")
+
+ @patch("%s.open" % builtins)
+ @patch("Bcfg2.Server.Plugin.InfoXML")
+ def test_update_metadata(self, mock_InfoXML, mock_open):
+ eset = self.get_obj()
+
+ # add info.xml
+ event = Mock()
+ event.filename = "info.xml"
+ eset.update_metadata(event)
+ mock_InfoXML.assert_called_with(os.path.join(eset.path, "info.xml"))
+ mock_InfoXML.return_value.HandleEvent.assert_called_with(event)
+ self.assertEqual(eset.infoxml, mock_InfoXML.return_value)
+
+ # modify info.xml
+ mock_InfoXML.reset_mock()
+ eset.update_metadata(event)
+ self.assertFalse(mock_InfoXML.called)
+ eset.infoxml.HandleEvent.assert_called_with(event)
+
+ for fname in [':info', 'info']:
+ event = Mock()
+ event.filename = fname
+
+ idata = ["owner:owner",
+ "group: GROUP",
+ "perms: 775",
+ "important: true",
+ "bogus: line"]
+ mock_open.return_value.readlines.return_value = idata
+ eset.update_metadata(event)
+ expected = default_file_metadata.copy()
+ expected['owner'] = 'owner'
+ expected['group'] = 'GROUP'
+ expected['perms'] = '0775'
+ expected['important'] = 'true'
+ self.assertItemsEqual(eset.metadata,
+ expected)
+
+ def test_reset_metadata(self):
+ eset = self.get_obj()
+
+ # test info.xml
+ event = Mock()
+ event.filename = "info.xml"
+ eset.infoxml = Mock()
+ eset.reset_metadata(event)
+ self.assertIsNone(eset.infoxml)
+
+ for fname in [':info', 'info']:
+ event = Mock()
+ event.filename = fname
+ eset.metadata = Mock()
+ eset.reset_metadata(event)
+ self.assertItemsEqual(eset.metadata, default_file_metadata)
+
+ @patch("Bcfg2.Server.Plugin.bind_info")
+ def test_bind_info_to_entry(self, mock_bind_info):
+ eset = self.get_obj()
+ entry = Mock()
+ metadata = Mock()
+ eset.bind_info_to_entry(entry, metadata)
+ mock_bind_info.assert_called_with(entry, metadata,
+ infoxml=eset.infoxml,
+ default=eset.metadata)
+
+ @patch("Bcfg2.Server.Plugin.%s.best_matching" % test_obj.__name__)
+ @patch("Bcfg2.Server.Plugin.%s.bind_info_to_entry" % test_obj.__name__)
+ def test_bind_entry(self, mock_bind_info, mock_best_matching):
+ eset = self.get_obj()
+ entry = Mock()
+ metadata = Mock()
+ eset.bind_entry(entry, metadata)
+ mock_bind_info.assert_called_with(entry, metadata)
+ mock_best_matching.assert_called_with(metadata)
+ mock_best_matching.return_value.bind_entry.assert_called_with(entry,
+ metadata)
+
+
+class TestGroupSpool(TestPlugin, TestGenerator):
+ test_obj = GroupSpool
+
+ @patch("Bcfg2.Server.Plugin.%s.AddDirectoryMonitor" % test_obj.__name__)
+ def get_obj(self, core=None):
+ return TestPlugin.get_obj(self, core=core)
+
+ @patch("Bcfg2.Server.Plugin.%s.AddDirectoryMonitor" % test_obj.__name__)
+ def test__init(self, mock_Add):
+ core = Mock()
+ gs = self.test_obj(core, datastore)
+ mock_Add.assert_called_with('')
+ self.assertItemsEqual(gs.Entries, {gs.entry_type: {}})
+
+ @patch("os.path.isdir")
+ @patch("os.path.isfile")
+ @patch("Bcfg2.Server.Plugin.%s.event_id" % test_obj.__name__)
+ @patch("Bcfg2.Server.Plugin.%s.event_path" % test_obj.__name__)
+ @patch("Bcfg2.Server.Plugin.%s.AddDirectoryMonitor" % test_obj.__name__)
+ def test_add_entry(self, mock_Add, mock_event_path, mock_event_id,
+ mock_isfile, mock_isdir):
+ gs = self.get_obj()
+ gs.es_cls = Mock()
+ gs.es_child_cls = Mock()
+
+ def reset():
+ gs.es_cls.reset_mock()
+ gs.es_child_cls.reset_mock()
+ mock_Add.reset_mock()
+ mock_event_path.reset_mock()
+ mock_event_id.reset_mock()
+ mock_isfile.reset_mock()
+ mock_isdir.reset_mock()
+
+ # directory
+ event = Mock()
+ event.filename = "foo"
+ basedir = "test"
+ epath = os.path.join(gs.data, basedir, event.filename)
+ ident = os.path.join(basedir, event.filename)
+ mock_event_path.return_value = epath
+ mock_event_id.return_value = ident
+ mock_isdir.return_value = True
+ mock_isfile.return_value = False
+ gs.add_entry(event)
+ mock_Add.assert_called_with(os.path.join("/" + basedir, event.filename))
+ self.assertNotIn(ident, gs.entries)
+ mock_isdir.assert_called_with(epath)
+
+ # file that is not in self.entries
+ reset()
+ event = Mock()
+ event.filename = "foo"
+ basedir = "test/foo/"
+ epath = os.path.join(gs.data, basedir, event.filename)
+ ident = basedir[:-1]
+ mock_event_path.return_value = epath
+ mock_event_id.return_value = ident
+ mock_isdir.return_value = False
+ mock_isfile.return_value = True
+ gs.add_entry(event)
+ self.assertFalse(mock_Add.called)
+ gs.es_cls.assert_called_with(gs.filename_pattern,
+ gs.data + ident,
+ gs.es_child_cls,
+ gs.encoding)
+ self.assertIn(ident, gs.entries)
+ self.assertEqual(gs.entries[ident], gs.es_cls.return_value)
+ self.assertIn(ident, gs.Entries[gs.entry_type])
+ self.assertEqual(gs.Entries[gs.entry_type][ident],
+ gs.es_cls.return_value.bind_entry)
+ gs.entries[ident].handle_event.assert_called_with(event)
+ mock_isfile.assert_called_with(epath)
+
+ # file that is in self.entries
+ reset()
+ gs.add_entry(event)
+ self.assertFalse(mock_Add.called)
+ self.assertFalse(gs.es_cls.called)
+ gs.entries[ident].handle_event.assert_called_with(event)
+
+ def test_event_path(self):
+ gs = self.get_obj()
+ gs.handles[1] = "/var/lib/foo/"
+ gs.handles[2] = "/etc/foo/"
+ gs.handles[3] = "/usr/share/foo/"
+ event = Mock()
+ event.filename = "foo"
+ for i in range(1, 4):
+ event.requestID = i
+ self.assertEqual(gs.event_path(event),
+ os.path.join(datastore, gs.name,
+ gs.handles[event.requestID].lstrip('/'),
+ event.filename))
+
+ @patch("os.path.isdir")
+ @patch("Bcfg2.Server.Plugin.%s.event_path" % test_obj.__name__)
+ def test_event_id(self, mock_event_path, mock_isdir):
+ gs = self.get_obj()
+
+ def reset():
+ mock_event_path.reset_mock()
+ mock_isdir.reset_mock()
+
+ gs.handles[1] = "/var/lib/foo/"
+ gs.handles[2] = "/etc/foo/"
+ gs.handles[3] = "/usr/share/foo/"
+ event = Mock()
+ event.filename = "foo"
+ for i in range(1, 4):
+ event.requestID = i
+ reset()
+ mock_isdir.return_value = True
+ self.assertEqual(gs.event_id(event),
+ os.path.join(gs.handles[event.requestID].lstrip('/'),
+ event.filename))
+ mock_isdir.assert_called_with(mock_event_path.return_value)
+
+ reset()
+ mock_isdir.return_value = False
+ self.assertEqual(gs.event_id(event),
+ gs.handles[event.requestID].rstrip('/'))
+ mock_isdir.assert_called_with(mock_event_path.return_value)
+
+ def test_toggle_debug(self):
+ gs = self.get_obj()
+ gs.entries = {"/foo": Mock(),
+ "/bar": Mock(),
+ "/baz/quux": Mock()}
+
+ @patch("Bcfg2.Server.Plugin.Plugin.toggle_debug")
+ def inner(mock_debug):
+ gs.toggle_debug()
+ mock_debug.assert_called_with(gs)
+ for entry in gs.entries.values():
+ entry.toggle_debug.assert_any_call()
+
+ inner()
+
+ TestPlugin.test_toggle_debug(self)
+
+ def test_HandleEvent(self):
+ gs = self.get_obj()
+ gs.entries = {"/foo": Mock(),
+ "/bar": Mock(),
+ "/baz": Mock(),
+ "/baz/quux": Mock()}
+ for path in gs.entries.keys():
+ gs.Entries[gs.entry_type] = {path: Mock()}
+ gs.handles = {1: "/foo/",
+ 2: "/bar/",
+ 3: "/baz/",
+ 4: "/baz/quux"}
+
+ gs.add_entry = Mock()
+ gs.event_id = Mock()
+
+ def reset():
+ gs.add_entry.reset_mock()
+ gs.event_id.reset_mock()
+ for entry in gs.entries.values():
+ entry.reset_mock()
+
+ # test event creation, changing entry that doesn't exist
+ for evt in ["exists", "created", "changed"]:
+ reset()
+ event = Mock()
+ event.filename = "foo"
+ event.requestID = 1
+ event.code2str.return_value = evt
+ gs.HandleEvent(event)
+ gs.event_id.assert_called_with(event)
+ gs.add_entry.assert_called_with(event)
+
+ # test deleting entry, changing entry that does exist
+ for evt in ["changed", "deleted"]:
+ reset()
+ event = Mock()
+ event.filename = "quux"
+ event.requestID = 4
+ event.code2str.return_value = evt
+ gs.event_id.return_value = "/baz/quux"
+ gs.HandleEvent(event)
+ gs.event_id.assert_called_with(event)
+ self.assertIn(gs.event_id.return_value, gs.entries)
+ gs.entries[gs.event_id.return_value].handle_event.assert_called_with(event)
+ self.assertFalse(gs.add_entry.called)
+
+ # test deleting directory
+ reset()
+ event = Mock()
+ event.filename = "quux"
+ event.requestID = 3
+ event.code2str.return_value = "deleted"
+ gs.event_id.return_value = "/baz/quux"
+ gs.HandleEvent(event)
+ gs.event_id.assert_called_with(event)
+ self.assertNotIn("/baz/quux", gs.entries)
+ self.assertNotIn("/baz/quux", gs.Entries[gs.entry_type])
+
+
+
diff --git a/testsuite/Testlib/TestServer/TestPlugins/TestMetadata.py b/testsuite/Testlib/TestServer/TestPlugins/TestMetadata.py
index 455731d00..2ff0af78e 100644
--- a/testsuite/Testlib/TestServer/TestPlugins/TestMetadata.py
+++ b/testsuite/Testlib/TestServer/TestPlugins/TestMetadata.py
@@ -1,17 +1,31 @@
import os
+import sys
import copy
import time
import socket
-import unittest
import lxml.etree
-from mock import Mock, patch
+import Bcfg2.Server
import Bcfg2.Server.Plugin
from Bcfg2.Server.Plugins.Metadata import *
+from mock import Mock, patch
-XI_NAMESPACE = "http://www.w3.org/2001/XInclude"
-XI = "{%s}" % XI_NAMESPACE
-
-clients_test_tree = lxml.etree.XML('''
+# add all parent testsuite directories to sys.path to allow (most)
+# relative imports in python 2.4
+path = os.path.dirname(__file__)
+while path != "/":
+ if os.path.basename(path).lower().startswith("test"):
+ sys.path.append(path)
+ if os.path.basename(path) == "testsuite":
+ break
+ path = os.path.dirname(path)
+from common import XI_NAMESPACE, XI, inPy3k, call, builtins, u, can_skip, \
+ skip, skipIf, skipUnless, Bcfg2TestCase, DBModelTestCase, syncdb, \
+ patchIf, datastore
+from TestPlugin import TestXMLFileBacked, TestMetadata as _TestMetadata, \
+ TestStatistics, TestDatabaseBacked
+
+def get_clients_test_tree():
+ return lxml.etree.XML('''
<Clients>
<Client name="client1" address="1.2.3.1" auth="cert"
location="floating" password="password2" profile="group1"/>
@@ -30,10 +44,19 @@ clients_test_tree = lxml.etree.XML('''
<Client name="client8" profile="group1" auth="cert+password"
address="1.2.3.5"/>
<Client name="client9" profile="group2" secure="true" password="password3"/>
+ <Client name="client10" profile="group1" floating="true"/>
</Clients>''').getroottree()
-groups_test_tree = lxml.etree.XML('''
+def get_groups_test_tree():
+ return lxml.etree.XML('''
<Groups xmlns:xi="http://www.w3.org/2001/XInclude">
+ <Client name="client8">
+ <Group name="group8"/>
+ </Client>
+ <Client name="client9">
+ <Group name="group8"/>
+ </Client>
+
<Group name="group1" default="true" profile="true" public="true"
category="category1"/>
<Group name="group2" profile="true" public="true" category="category1">
@@ -53,103 +76,246 @@ groups_test_tree = lxml.etree.XML('''
</Group>
<Group name="group8">
<Group name="group9"/>
+ <Client name="client9">
+ <Group name="group11"/>
+ <Group name="group9" negate="true"/>
+ </Client>
+ <Group name="group1">
+ <Group name="group10"/>
+ </Group>
</Group>
</Groups>''').getroottree()
-datastore = "/"
-def get_metadata_object(core=None, watch_clients=False):
+def get_metadata_object(core=None, watch_clients=False, use_db=False):
if core is None:
core = Mock()
- metadata = Metadata(core, datastore, watch_clients=watch_clients)
- #metadata.logger = Mock()
- return metadata
+ core.setup.cfp.getboolean = Mock(return_value=use_db)
+ return Metadata(core, datastore, watch_clients=watch_clients)
+
+
+class TestMetadataDB(DBModelTestCase):
+ if has_django:
+ models = [MetadataClientModel]
+
+
+if has_django or can_skip:
+ class TestClientVersions(Bcfg2TestCase):
+ test_clients = dict(client1="1.2.0",
+ client2="1.2.2",
+ client3="1.3.0pre1",
+ client4="1.1.0",
+ client5=None,
+ client6=None)
+
+ @skipUnless(has_django, "Django not found")
+ def setUp(self):
+ syncdb(TestMetadataDB)
+ for client, version in self.test_clients.items():
+ MetadataClientModel(hostname=client, version=version).save()
+
+ def test__contains(self):
+ v = ClientVersions()
+ self.assertIn("client1", v)
+ self.assertIn("client5", v)
+ self.assertNotIn("client__contains", v)
+
+ def test_keys(self):
+ v = ClientVersions()
+ self.assertItemsEqual(self.test_clients.keys(), v.keys())
+
+ def test__setitem(self):
+ v = ClientVersions()
+
+ # test setting version of existing client
+ v["client1"] = "1.2.3"
+ self.assertIn("client1", v)
+ self.assertEqual(v['client1'], "1.2.3")
+ client = MetadataClientModel.objects.get(hostname="client1")
+ self.assertEqual(client.version, "1.2.3")
+
+ # test adding new client
+ new = "client__setitem"
+ v[new] = "1.3.0"
+ self.assertIn(new, v)
+ self.assertEqual(v[new], "1.3.0")
+ client = MetadataClientModel.objects.get(hostname=new)
+ self.assertEqual(client.version, "1.3.0")
+
+ # test adding new client with no version
+ new2 = "client__setitem_2"
+ v[new2] = None
+ self.assertIn(new2, v)
+ self.assertEqual(v[new2], None)
+ client = MetadataClientModel.objects.get(hostname=new2)
+ self.assertEqual(client.version, None)
+
+ def test__getitem(self):
+ v = ClientVersions()
+
+ # test getting existing client
+ self.assertEqual(v['client2'], "1.2.2")
+ self.assertIsNone(v['client5'])
+
+ # test exception on nonexistent client
+ expected = KeyError
+ try:
+ v['clients__getitem']
+ except expected:
+ pass
+ except:
+ err = sys.exc_info()[1]
+ self.assertFalse(True, "%s raised instead of %s" %
+ (err.__class__.__name__,
+ expected.__class__.__name__))
+ else:
+ self.assertFalse(True,
+ "%s not raised" % expected.__class__.__name__)
+ def test__len(self):
+ v = ClientVersions()
+ self.assertEqual(len(v), MetadataClientModel.objects.count())
-class TestXMLMetadataConfig(unittest.TestCase):
- def get_config_object(self, basefile="clients.xml", core=None,
- watch_clients=False):
+ def test__iter(self):
+ v = ClientVersions()
+ self.assertItemsEqual([h for h in iter(v)], v.keys())
+
+ def test__delitem(self):
+ v = ClientVersions()
+
+ # test adding new client
+ new = "client__delitem"
+ v[new] = "1.3.0"
+
+ del v[new]
+ self.assertIn(new, v)
+ self.assertIsNone(v[new])
+
+
+class TestXMLMetadataConfig(TestXMLFileBacked):
+ test_obj = XMLMetadataConfig
+
+ def get_obj(self, basefile="clients.xml", core=None, watch_clients=False):
self.metadata = get_metadata_object(core=core,
watch_clients=watch_clients)
return XMLMetadataConfig(self.metadata, watch_clients, basefile)
+ def test__init(self):
+ xmc = self.get_obj()
+ self.assertEqual(self.metadata.core.fam, xmc.fam)
+ self.assertFalse(xmc.fam.AddMonitor.called)
+
def test_xdata(self):
- config = self.get_config_object()
- # we can't use assertRaises here because xdata is a property
+ config = self.get_obj()
+ expected = Bcfg2.Server.Plugin.MetadataRuntimeError
try:
config.xdata
- except MetadataRuntimeError:
+ except expected:
pass
except:
- assert False
+ err = sys.exc_info()[1]
+ self.assertFalse(True, "%s raised instead of %s" %
+ (err.__class__.__name__,
+ expected.__class__.__name__))
+ else:
+ self.assertFalse(True,
+ "%s not raised" % expected.__class__.__name__)
+ pass
+
config.data = "<test/>"
self.assertEqual(config.xdata, "<test/>")
def test_base_xdata(self):
- config = self.get_config_object()
+ config = self.get_obj()
# we can't use assertRaises here because base_xdata is a property
+ expected = Bcfg2.Server.Plugin.MetadataRuntimeError
try:
config.base_xdata
- except MetadataRuntimeError:
+ except expected:
pass
except:
- assert False
+ err = sys.exc_info()[1]
+ self.assertFalse(True, "%s raised instead of %s" %
+ (err.__class__.__name__,
+ expected.__class__.__name__))
+ else:
+ self.assertFalse(True,
+ "%s not raised" % expected.__class__.__name__)
+ pass
+
config.basedata = "<test/>"
self.assertEqual(config.base_xdata, "<test/>")
def test_add_monitor(self):
core = Mock()
- config = self.get_config_object(core=core)
+ config = self.get_obj(core=core)
fname = "test.xml"
- fpath = os.path.join(self.metadata.data, "test.xml")
+ fpath = os.path.join(self.metadata.data, fname)
config.extras = []
- config.add_monitor(fpath, fname)
+ config.add_monitor(fpath)
self.assertFalse(core.fam.AddMonitor.called)
- self.assertEqual(config.extras, [])
+ self.assertEqual(config.extras, [fpath])
+
+ config = self.get_obj(core=core, watch_clients=True)
+ config.add_monitor(fpath)
+ core.fam.AddMonitor.assert_called_with(fpath, config.metadata)
+ self.assertItemsEqual(config.extras, [fpath])
- config = self.get_config_object(core=core, watch_clients=True)
- config.add_monitor(fpath, fname)
- core.fam.AddMonitor.assert_called_with(fpath, self.metadata)
- self.assertItemsEqual(config.extras, [fname])
+ def test_Index(self):
+ # Index() isn't used on XMLMetadataConfig objects
+ pass
- @patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.add_monitor")
@patch("lxml.etree.parse")
- def test_load_xml(self, mock_parse, mock_add_monitor):
- config = self.get_config_object("clients.xml")
+ @patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig._follow_xincludes")
+ def test_load_xml(self, mock_follow, mock_parse):
+ config = self.get_obj("clients.xml")
+
+ def reset():
+ mock_parse.reset_mock()
+ mock_follow.reset_mock()
+ config.data = None
+ config.basedata = None
+
+ reset()
+ config.load_xml()
+ mock_follow.assert_called_with(xdata=mock_parse.return_value)
+ mock_parse.assert_called_with(os.path.join(config.basedir,
+ "clients.xml"),
+ parser=Bcfg2.Server.XMLParser)
+ self.assertFalse(mock_parse.return_value.xinclude.called)
+ self.assertEqual(config.data, mock_parse.return_value)
+ self.assertIsNotNone(config.basedata)
+
+ reset()
mock_parse.side_effect = lxml.etree.XMLSyntaxError(None, None, None,
None)
config.load_xml()
+ mock_parse.assert_called_with(os.path.join(config.basedir,
+ "clients.xml"),
+ parser=Bcfg2.Server.XMLParser)
self.assertIsNone(config.data)
self.assertIsNone(config.basedata)
- config.data = None
- config.basedata = None
+ reset()
mock_parse.side_effect = None
- mock_parse.return_value.findall = Mock(return_value=[])
+ def follow_xincludes(xdata=None):
+ config.extras = [Mock(), Mock()]
+ mock_follow.side_effect = follow_xincludes
config.load_xml()
- self.assertIsNotNone(config.data)
+ mock_follow.assert_called_with(xdata=mock_parse.return_value)
+ mock_parse.assert_called_with(os.path.join(config.basedir,
+ "clients.xml"),
+ parser=Bcfg2.Server.XMLParser)
+ mock_parse.return_value.xinclude.assert_any_call()
+ self.assertEqual(config.data, mock_parse.return_value)
self.assertIsNotNone(config.basedata)
- config.data = None
- config.basedata = None
-
- def side_effect(*args):
- def second_call(*args):
- return []
- mock_parse.return_value.findall.side_effect = second_call
- return [lxml.etree.Element(XI + "include", href="more.xml"),
- lxml.etree.Element(XI + "include", href="evenmore.xml")]
-
- mock_parse.return_value.findall = Mock(side_effect=side_effect)
- config.load_xml()
- mock_add_monitor.assert_any_call("more.xml")
- mock_add_monitor.assert_any_call("evenmore.xml")
@patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.write_xml")
def test_write(self, mock_write_xml):
- config = self.get_config_object("clients.xml")
+ config = self.get_obj("clients.xml")
config.basedata = "<test/>"
config.write()
mock_write_xml.assert_called_with(os.path.join(self.metadata.data,
@@ -158,7 +324,7 @@ class TestXMLMetadataConfig(unittest.TestCase):
@patch('Bcfg2.Server.Plugins.Metadata.locked', Mock(return_value=False))
@patch('fcntl.lockf', Mock())
- @patch('__builtin__.open')
+ @patch('%s.open' % builtins)
@patch('os.unlink')
@patch('os.rename')
@patch('os.path.islink')
@@ -166,14 +332,14 @@ class TestXMLMetadataConfig(unittest.TestCase):
def test_write_xml(self, mock_readlink, mock_islink, mock_rename,
mock_unlink, mock_open):
fname = "clients.xml"
- config = self.get_config_object(fname)
+ config = self.get_obj(fname)
fpath = os.path.join(self.metadata.data, fname)
tmpfile = "%s.new" % fpath
linkdest = os.path.join(self.metadata.data, "client-link.xml")
mock_islink.return_value = False
- config.write_xml(fpath, clients_test_tree)
+ config.write_xml(fpath, get_clients_test_tree())
mock_open.assert_called_with(tmpfile, "w")
self.assertTrue(mock_open.return_value.write.called)
mock_islink.assert_called_with(fpath)
@@ -181,41 +347,42 @@ class TestXMLMetadataConfig(unittest.TestCase):
mock_islink.return_value = True
mock_readlink.return_value = linkdest
- config.write_xml(fpath, clients_test_tree)
+ config.write_xml(fpath, get_clients_test_tree())
mock_rename.assert_called_with(tmpfile, linkdest)
mock_rename.side_effect = OSError
- self.assertRaises(MetadataRuntimeError,
- config.write_xml, fpath, clients_test_tree)
+ self.assertRaises(Bcfg2.Server.Plugin.MetadataRuntimeError,
+ config.write_xml, fpath, get_clients_test_tree())
mock_open.return_value.write.side_effect = IOError
- self.assertRaises(MetadataRuntimeError,
- config.write_xml, fpath, clients_test_tree)
+ self.assertRaises(Bcfg2.Server.Plugin.MetadataRuntimeError,
+ config.write_xml, fpath, get_clients_test_tree())
mock_unlink.assert_called_with(tmpfile)
mock_open.side_effect = IOError
- self.assertRaises(MetadataRuntimeError,
- config.write_xml, fpath, clients_test_tree)
+ self.assertRaises(Bcfg2.Server.Plugin.MetadataRuntimeError,
+ config.write_xml, fpath, get_clients_test_tree())
@patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock())
@patch('lxml.etree.parse')
def test_find_xml_for_xpath(self, mock_parse):
- config = self.get_config_object("groups.xml")
- config.basedata = groups_test_tree
+ config = self.get_obj("groups.xml")
+ config.basedata = get_groups_test_tree()
xpath = "//Group[@name='group1']"
self.assertItemsEqual(config.find_xml_for_xpath(xpath),
dict(filename=os.path.join(self.metadata.data,
"groups.xml"),
- xmltree=groups_test_tree,
- xquery=groups_test_tree.xpath(xpath)))
+ xmltree=get_groups_test_tree(),
+ xquery=get_groups_test_tree().xpath(xpath)))
self.assertEqual(config.find_xml_for_xpath("//boguselement"), dict())
- config.extras = ["foo.xml", "bar.xml", "clients.xml"]
+ config.extras = [os.path.join(self.metadata.data, p)
+ for p in ["foo.xml", "bar.xml", "clients.xml"]]
- def parse_side_effect(fname):
+ def parse_side_effect(fname, parser=Bcfg2.Server.XMLParser):
if fname == os.path.join(self.metadata.data, "clients.xml"):
- return clients_test_tree
+ return get_clients_test_tree()
else:
return lxml.etree.XML("<null/>").getroottree()
@@ -224,12 +391,12 @@ class TestXMLMetadataConfig(unittest.TestCase):
self.assertItemsEqual(config.find_xml_for_xpath(xpath),
dict(filename=os.path.join(self.metadata.data,
"clients.xml"),
- xmltree=clients_test_tree,
- xquery=clients_test_tree.xpath(xpath)))
+ xmltree=get_clients_test_tree(),
+ xquery=get_clients_test_tree().xpath(xpath)))
@patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml")
def test_HandleEvent(self, mock_load_xml):
- config = self.get_config_object("groups.xml")
+ config = self.get_obj("groups.xml")
evt = Mock()
evt.filename = os.path.join(self.metadata.data, "groups.xml")
evt.code2str = Mock(return_value="changed")
@@ -237,27 +404,55 @@ class TestXMLMetadataConfig(unittest.TestCase):
mock_load_xml.assert_called_with()
-class TestClientMetadata(unittest.TestCase):
+class TestClientMetadata(Bcfg2TestCase):
def test_inGroup(self):
cm = ClientMetadata("client1", "group1", ["group1", "group2"],
- ["bundle1"], [], [], [], None, None, None)
+ ["bundle1"], [], [], [], None, None, None, None)
self.assertTrue(cm.inGroup("group1"))
self.assertFalse(cm.inGroup("group3"))
-class TestMetadata(unittest.TestCase):
- def test__init_no_fam(self):
+class TestMetadata(_TestMetadata, TestStatistics, TestDatabaseBacked):
+ test_obj = Metadata
+ use_db = False
+
+ def get_obj(self, core=None, watch_clients=False):
+ return get_metadata_object(core=core, watch_clients=watch_clients,
+ use_db=self.use_db)
+
+ @skipUnless(has_django, "Django not found")
+ def test__use_db(self):
+ # with the way we've set up our metadata tests, it's unweildy
+ # to test _use_db. however, given the way get_obj works, if
+ # there was a bug in _use_db it'd be almost certain to shake
+ # out in the rest of the testing.
+ pass
+
+ def get_nonexistent_client(self, metadata, prefix="newclient"):
+ if metadata is None:
+ metadata = self.load_clients_data()
+ i = 0
+ client_name = "%s%s" % (prefix, i)
+ while client_name in metadata.clients:
+ i += 1
+ client_name = "%s%s" % (prefix, i)
+ return client_name
+
+ def test__init(self):
# test with watch_clients=False
core = Mock()
- metadata = get_metadata_object(core=core)
- self.check_metadata_object(metadata)
+ metadata = self.get_obj(core=core)
+ self.assertIsInstance(metadata, Bcfg2.Server.Plugin.Plugin)
+ self.assertIsInstance(metadata, Bcfg2.Server.Plugin.Metadata)
+ self.assertIsInstance(metadata, Bcfg2.Server.Plugin.Statistics)
+ self.assertIsInstance(metadata.clients_xml, XMLMetadataConfig)
+ self.assertIsInstance(metadata.groups_xml, XMLMetadataConfig)
+ self.assertIsInstance(metadata.query, MetadataQuery)
self.assertEqual(metadata.states, dict())
- def test__init_with_fam(self):
# test with watch_clients=True
- core = Mock()
core.fam = Mock()
- metadata = get_metadata_object(core=core, watch_clients=True)
+ metadata = self.get_obj(core=core, watch_clients=True)
self.assertEqual(len(metadata.states), 2)
core.fam.AddMonitor.assert_any_call(os.path.join(metadata.data,
"groups.xml"),
@@ -269,63 +464,43 @@ class TestMetadata(unittest.TestCase):
core.fam.reset_mock()
core.fam.AddMonitor = Mock(side_effect=IOError)
self.assertRaises(Bcfg2.Server.Plugin.PluginInitError,
- get_metadata_object,
- core=core, watch_clients=True)
-
- def check_metadata_object(self, metadata):
- self.assertIsInstance(metadata, Bcfg2.Server.Plugin.Plugin)
- self.assertIsInstance(metadata, Bcfg2.Server.Plugin.Metadata)
- self.assertIsInstance(metadata, Bcfg2.Server.Plugin.Statistics)
- self.assertIsInstance(metadata.clients_xml, XMLMetadataConfig)
- self.assertIsInstance(metadata.groups_xml, XMLMetadataConfig)
- self.assertIsInstance(metadata.query, MetadataQuery)
+ self.get_obj, core=core, watch_clients=True)
@patch('os.makedirs', Mock())
- @patch('__builtin__.open')
+ @patch('%s.open' % builtins)
def test_init_repo(self, mock_open):
- groups = "groups %s"
- os_selection = "os"
- clients = "clients %s"
- Metadata.init_repo(datastore, groups, os_selection, clients)
+ Metadata.init_repo(datastore,
+ groups_xml="groups", clients_xml="clients")
mock_open.assert_any_call(os.path.join(datastore, "Metadata",
"groups.xml"), "w")
mock_open.assert_any_call(os.path.join(datastore, "Metadata",
"clients.xml"), "w")
- @patch('lxml.etree.parse')
- def test_get_groups(self, mock_parse):
- metadata = get_metadata_object()
- mock_parse.return_value = groups_test_tree
- groups = metadata.get_groups()
- mock_parse.assert_called_with(os.path.join(datastore, "Metadata",
- "groups.xml"))
- self.assertIsInstance(groups, lxml.etree._Element)
-
- def test_search_xdata_name(self):
+ def test_search_xdata(self):
# test finding a node with the proper name
- metadata = get_metadata_object()
- tree = groups_test_tree
+ metadata = self.get_obj()
+ tree = get_groups_test_tree()
res = metadata._search_xdata("Group", "group1", tree)
self.assertIsInstance(res, lxml.etree._Element)
self.assertEqual(res.get("name"), "group1")
- def test_search_xdata_alias(self):
# test finding a node with the wrong name but correct alias
- metadata = get_metadata_object()
- tree = clients_test_tree
+ metadata = self.get_obj()
+ tree = get_clients_test_tree()
res = metadata._search_xdata("Client", "alias3", tree, alias=True)
self.assertIsInstance(res, lxml.etree._Element)
self.assertNotEqual(res.get("name"), "alias3")
- def test_search_xdata_not_found(self):
# test failure finding a node
- metadata = get_metadata_object()
- tree = clients_test_tree
- res = metadata._search_xdata("Client", "bogus_client", tree, alias=True)
+ metadata = self.get_obj()
+ tree = get_clients_test_tree()
+ res = metadata._search_xdata("Client",
+ self.get_nonexistent_client(metadata),
+ tree, alias=True)
self.assertIsNone(res)
def search_xdata(self, tag, name, tree, alias=False):
- metadata = get_metadata_object()
+ metadata = self.get_obj()
res = metadata._search_xdata(tag, name, tree, alias=alias)
self.assertIsInstance(res, lxml.etree._Element)
if not alias:
@@ -333,22 +508,22 @@ class TestMetadata(unittest.TestCase):
def test_search_group(self):
# test finding a group with the proper name
- tree = groups_test_tree
+ tree = get_groups_test_tree()
self.search_xdata("Group", "group1", tree)
def test_search_bundle(self):
# test finding a bundle with the proper name
- tree = groups_test_tree
+ tree = get_groups_test_tree()
self.search_xdata("Bundle", "bundle1", tree)
def test_search_client(self):
# test finding a client with the proper name
- tree = clients_test_tree
+ tree = get_clients_test_tree()
self.search_xdata("Client", "client1", tree, alias=True)
self.search_xdata("Client", "alias1", tree, alias=True)
def test_add_group(self):
- metadata = get_metadata_object()
+ metadata = self.get_obj()
metadata.groups_xml.write = Mock()
metadata.groups_xml.data = lxml.etree.XML('<Groups/>').getroottree()
metadata.groups_xml.basedata = copy.copy(metadata.groups_xml.data)
@@ -374,15 +549,15 @@ class TestMetadata(unittest.TestCase):
metadata.groups_xml.basedata = copy.copy(metadata.groups_xml.data)
metadata.groups_xml.write.reset_mock()
- self.assertRaises(MetadataConsistencyError,
+ self.assertRaises(Bcfg2.Server.Plugin.MetadataConsistencyError,
metadata.add_group,
"test1", dict())
self.assertFalse(metadata.groups_xml.write.called)
def test_update_group(self):
- metadata = get_metadata_object()
+ metadata = self.get_obj()
metadata.groups_xml.write_xml = Mock()
- metadata.groups_xml.data = copy.deepcopy(groups_test_tree)
+ metadata.groups_xml.data = copy.deepcopy(get_groups_test_tree())
metadata.groups_xml.basedata = copy.copy(metadata.groups_xml.data)
metadata.update_group("group1", dict(foo="bar"))
@@ -392,14 +567,14 @@ class TestMetadata(unittest.TestCase):
self.assertEqual(grp.get("foo"), "bar")
self.assertTrue(metadata.groups_xml.write_xml.called)
- self.assertRaises(MetadataConsistencyError,
+ self.assertRaises(Bcfg2.Server.Plugin.MetadataConsistencyError,
metadata.update_group,
"bogus_group", dict())
def test_remove_group(self):
- metadata = get_metadata_object()
+ metadata = self.get_obj()
metadata.groups_xml.write_xml = Mock()
- metadata.groups_xml.data = copy.deepcopy(groups_test_tree)
+ metadata.groups_xml.data = copy.deepcopy(get_groups_test_tree())
metadata.groups_xml.basedata = copy.copy(metadata.groups_xml.data)
metadata.remove_group("group5")
@@ -407,12 +582,12 @@ class TestMetadata(unittest.TestCase):
self.assertIsNone(grp)
self.assertTrue(metadata.groups_xml.write_xml.called)
- self.assertRaises(MetadataConsistencyError,
+ self.assertRaises(Bcfg2.Server.Plugin.MetadataConsistencyError,
metadata.remove_group,
"bogus_group")
def test_add_bundle(self):
- metadata = get_metadata_object()
+ metadata = self.get_obj()
metadata.groups_xml.write = Mock()
metadata.groups_xml.data = lxml.etree.XML('<Groups/>').getroottree()
metadata.groups_xml.basedata = copy.copy(metadata.groups_xml.data)
@@ -429,15 +604,15 @@ class TestMetadata(unittest.TestCase):
metadata.groups_xml.basedata = copy.copy(metadata.groups_xml.data)
metadata.groups_xml.write.reset_mock()
- self.assertRaises(MetadataConsistencyError,
+ self.assertRaises(Bcfg2.Server.Plugin.MetadataConsistencyError,
metadata.add_bundle,
"bundle1")
self.assertFalse(metadata.groups_xml.write.called)
def test_remove_bundle(self):
- metadata = get_metadata_object()
+ metadata = self.get_obj()
metadata.groups_xml.write_xml = Mock()
- metadata.groups_xml.data = copy.deepcopy(groups_test_tree)
+ metadata.groups_xml.data = copy.deepcopy(get_groups_test_tree())
metadata.groups_xml.basedata = copy.copy(metadata.groups_xml.data)
metadata.remove_bundle("bundle1")
@@ -445,46 +620,49 @@ class TestMetadata(unittest.TestCase):
self.assertIsNone(grp)
self.assertTrue(metadata.groups_xml.write_xml.called)
- self.assertRaises(MetadataConsistencyError,
+ self.assertRaises(Bcfg2.Server.Plugin.MetadataConsistencyError,
metadata.remove_bundle,
"bogus_bundle")
def test_add_client(self):
- metadata = get_metadata_object()
+ metadata = self.get_obj()
metadata.clients_xml.write = Mock()
metadata.clients_xml.data = lxml.etree.XML('<Clients/>').getroottree()
metadata.clients_xml.basedata = copy.copy(metadata.clients_xml.data)
- metadata.add_client("test1", dict())
+ new1 = self.get_nonexistent_client(metadata)
+ metadata.add_client(new1, dict())
metadata.clients_xml.write.assert_any_call()
- grp = metadata.search_client("test1", metadata.clients_xml.base_xdata)
+ grp = metadata.search_client(new1, metadata.clients_xml.base_xdata)
self.assertIsNotNone(grp)
- self.assertEqual(grp.attrib, dict(name='test1'))
+ self.assertEqual(grp.attrib, dict(name=new1))
# have to call this explicitly -- usually load_xml does this
# on FAM events
metadata.clients_xml.basedata = copy.copy(metadata.clients_xml.data)
+ metadata._handle_clients_xml_event(Mock())
- metadata.add_client("test2", dict(foo='bar'))
+ new2 = self.get_nonexistent_client(metadata)
+ metadata.add_client(new2, dict(foo='bar'))
metadata.clients_xml.write.assert_any_call()
- grp = metadata.search_client("test2", metadata.clients_xml.base_xdata)
+ grp = metadata.search_client(new2, metadata.clients_xml.base_xdata)
self.assertIsNotNone(grp)
- self.assertEqual(grp.attrib, dict(name='test2', foo='bar'))
+ self.assertEqual(grp.attrib, dict(name=new2, foo='bar'))
# have to call this explicitly -- usually load_xml does this
# on FAM events
metadata.clients_xml.basedata = copy.copy(metadata.clients_xml.data)
metadata.clients_xml.write.reset_mock()
- self.assertRaises(MetadataConsistencyError,
+ self.assertRaises(Bcfg2.Server.Plugin.MetadataConsistencyError,
metadata.add_client,
- "test1", dict())
+ new1, dict())
self.assertFalse(metadata.clients_xml.write.called)
def test_update_client(self):
- metadata = get_metadata_object()
+ metadata = self.get_obj()
metadata.clients_xml.write_xml = Mock()
- metadata.clients_xml.data = copy.deepcopy(clients_test_tree)
+ metadata.clients_xml.data = copy.deepcopy(get_clients_test_tree())
metadata.clients_xml.basedata = copy.copy(metadata.clients_xml.data)
metadata.update_client("client1", dict(foo="bar"))
@@ -494,14 +672,16 @@ class TestMetadata(unittest.TestCase):
self.assertEqual(grp.get("foo"), "bar")
self.assertTrue(metadata.clients_xml.write_xml.called)
- self.assertRaises(MetadataConsistencyError,
+ new = self.get_nonexistent_client(metadata)
+ self.assertRaises(Bcfg2.Server.Plugin.MetadataConsistencyError,
metadata.update_client,
- "bogus_client", dict())
+ new, dict())
def load_clients_data(self, metadata=None, xdata=None):
if metadata is None:
- metadata = get_metadata_object()
- metadata.clients_xml.data = xdata or copy.deepcopy(clients_test_tree)
+ metadata = self.get_obj()
+ metadata.clients_xml.data = \
+ xdata or copy.deepcopy(get_clients_test_tree())
metadata.clients_xml.basedata = copy.copy(metadata.clients_xml.data)
evt = Mock()
evt.filename = os.path.join(datastore, "Metadata", "clients.xml")
@@ -509,41 +689,43 @@ class TestMetadata(unittest.TestCase):
metadata.HandleEvent(evt)
return metadata
- @patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml")
- def test_clients_xml_event(self, mock_load_xml):
- metadata = get_metadata_object()
+ def test_handle_clients_xml_event(self):
+ metadata = self.get_obj()
metadata.profiles = ["group1", "group2"]
- self.load_clients_data(metadata=metadata)
- mock_load_xml.assert_any_call()
- self.assertItemsEqual(metadata.clients,
- dict([(c.get("name"), c.get("profile"))
- for c in clients_test_tree.findall("//Client")]))
+
+ metadata.clients_xml = Mock()
+ metadata.clients_xml.xdata = copy.deepcopy(get_clients_test_tree())
+ metadata._handle_clients_xml_event(Mock())
+
+ if not self.use_db:
+ self.assertItemsEqual(metadata.clients,
+ dict([(c.get("name"), c.get("profile"))
+ for c in get_clients_test_tree().findall("//Client")]))
aliases = dict([(a.get("name"), a.getparent().get("name"))
- for a in clients_test_tree.findall("//Alias")])
+ for a in get_clients_test_tree().findall("//Alias")])
self.assertItemsEqual(metadata.aliases, aliases)
raliases = dict([(c.get("name"), set())
- for c in clients_test_tree.findall("//Client")])
- for alias in clients_test_tree.findall("//Alias"):
+ for c in get_clients_test_tree().findall("//Client")])
+ for alias in get_clients_test_tree().findall("//Alias"):
raliases[alias.getparent().get("name")].add(alias.get("name"))
self.assertItemsEqual(metadata.raliases, raliases)
- self.assertEqual(metadata.bad_clients, dict())
self.assertEqual(metadata.secure,
[c.get("name")
- for c in clients_test_tree.findall("//Client[@secure='true']")])
- self.assertEqual(metadata.floating, ["client1"])
+ for c in get_clients_test_tree().findall("//Client[@secure='true']")])
+ self.assertEqual(metadata.floating, ["client1", "client10"])
addresses = dict([(c.get("address"), [])
- for c in clients_test_tree.findall("//*[@address]")])
+ for c in get_clients_test_tree().findall("//*[@address]")])
raddresses = dict()
- for client in clients_test_tree.findall("//Client[@address]"):
+ for client in get_clients_test_tree().findall("//Client[@address]"):
addresses[client.get("address")].append(client.get("name"))
try:
raddresses[client.get("name")].append(client.get("address"))
except KeyError:
raddresses[client.get("name")] = [client.get("address")]
- for alias in clients_test_tree.findall("//Alias[@address]"):
+ for alias in get_clients_test_tree().findall("//Alias[@address]"):
addresses[alias.get("address")].append(alias.getparent().get("name"))
try:
raddresses[alias.getparent().get("name")].append(alias.get("address"))
@@ -554,25 +736,11 @@ class TestMetadata(unittest.TestCase):
self.assertItemsEqual(metadata.raddresses, raddresses)
self.assertTrue(metadata.states['clients.xml'])
- @patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock())
- def test_clients_xml_event_bad_clients(self):
- metadata = get_metadata_object()
- metadata.profiles = ["group2"]
- self.load_clients_data(metadata=metadata)
- clients = dict()
- badclients = dict()
- for client in clients_test_tree.findall("//Client"):
- if client.get("profile") in metadata.profiles:
- clients[client.get("name")] = client.get("profile")
- else:
- badclients[client.get("name")] = client.get("profile")
- self.assertItemsEqual(metadata.clients, clients)
- self.assertItemsEqual(metadata.bad_clients, badclients)
-
def load_groups_data(self, metadata=None, xdata=None):
if metadata is None:
- metadata = get_metadata_object()
- metadata.groups_xml.data = xdata or copy.deepcopy(groups_test_tree)
+ metadata = self.get_obj()
+ metadata.groups_xml.data = \
+ xdata or copy.deepcopy(get_groups_test_tree())
metadata.groups_xml.basedata = copy.copy(metadata.groups_xml.data)
evt = Mock()
evt.filename = os.path.join(datastore, "Metadata", "groups.xml")
@@ -580,65 +748,109 @@ class TestMetadata(unittest.TestCase):
metadata.HandleEvent(evt)
return metadata
- @patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml")
- def test_groups_xml_event(self, mock_load_xml):
- dup_data = copy.deepcopy(groups_test_tree)
- lxml.etree.SubElement(dup_data.getroot(),
- "Group", name="group1")
- metadata = self.load_groups_data(xdata=dup_data)
- mock_load_xml.assert_any_call()
- self.assertEqual(metadata.public, ["group1", "group2"])
- self.assertEqual(metadata.private, ["group3"])
- self.assertEqual(metadata.profiles, ["group1", "group2"])
+ def test_handle_groups_xml_event(self):
+ metadata = self.get_obj()
+ metadata.groups_xml = Mock()
+ metadata.groups_xml.xdata = get_groups_test_tree()
+ metadata._handle_groups_xml_event(Mock())
+
+ self.assertTrue(metadata.states['groups.xml'])
+ self.assertTrue(metadata.groups['group1'].is_public)
+ self.assertTrue(metadata.groups['group2'].is_public)
+ self.assertFalse(metadata.groups['group3'].is_public)
+ self.assertFalse(metadata.groups['group1'].is_private)
+ self.assertFalse(metadata.groups['group2'].is_private)
+ self.assertTrue(metadata.groups['group3'].is_private)
+ self.assertTrue(metadata.groups['group1'].is_profile)
+ self.assertTrue(metadata.groups['group2'].is_profile)
+ self.assertFalse(metadata.groups['group3'].is_profile)
self.assertItemsEqual(metadata.groups.keys(),
- [g.get("name")
- for g in groups_test_tree.findall("/Group")])
- self.assertEqual(metadata.categories,
- dict(group1="category1",
- group2="category1",
- group3="category2",
- group4="category1"))
+ set(g.get("name")
+ for g in get_groups_test_tree().findall("//Group")))
+ self.assertEqual(metadata.groups['group1'].category, 'category1')
+ self.assertEqual(metadata.groups['group2'].category, 'category1')
+ self.assertEqual(metadata.groups['group3'].category, 'category2')
+ self.assertEqual(metadata.groups['group4'].category, 'category1')
self.assertEqual(metadata.default, "group1")
- self.assertTrue(metadata.states['groups.xml'])
+ all_groups = []
+ negated_groups = []
+ for group in get_groups_test_tree().xpath("//Groups/Client//*") + \
+ get_groups_test_tree().xpath("//Groups/Group//*"):
+ if group.tag == 'Group' and not group.getchildren():
+ if group.get("negate", "false").lower() == 'true':
+ negated_groups.append(group.get("name"))
+ else:
+ all_groups.append(group.get("name"))
+ self.assertItemsEqual([g.name
+ for g in metadata.group_membership.values()],
+ all_groups)
+ self.assertItemsEqual([g.name
+ for g in metadata.negated_groups.values()],
+ negated_groups)
+
@patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock())
- @patch("Bcfg2.Server.Plugins.Metadata.Metadata.add_client")
- @patch("Bcfg2.Server.Plugins.Metadata.Metadata.update_client")
- def test_set_profile(self, mock_update_client, mock_add_client):
- metadata = get_metadata_object()
- metadata.states['clients.xml'] = False
- self.assertRaises(MetadataRuntimeError,
- metadata.set_profile,
- None, None, None)
+ def test_set_profile(self):
+ metadata = self.get_obj()
+ if 'clients.xml' in metadata.states:
+ metadata.states['clients.xml'] = False
+ self.assertRaises(Bcfg2.Server.Plugin.MetadataRuntimeError,
+ metadata.set_profile,
+ None, None, None)
self.load_groups_data(metadata=metadata)
self.load_clients_data(metadata=metadata)
- self.assertRaises(MetadataConsistencyError,
+ self.assertRaises(Bcfg2.Server.Plugin.MetadataConsistencyError,
metadata.set_profile,
"client1", "group5", None)
- metadata.clients_xml.write = Mock()
- metadata.set_profile("client1", "group2", None)
- mock_update_client.assert_called_with("client1", dict(profile="group2"))
- metadata.clients_xml.write.assert_any_call()
- self.assertEqual(metadata.clients["client1"], "group2")
+ self.assertRaises(Bcfg2.Server.Plugin.MetadataConsistencyError,
+ metadata.set_profile,
+ "client1", "group3", None)
- metadata.clients_xml.write.reset_mock()
- metadata.set_profile("client_new", "group1", None)
- mock_add_client.assert_called_with("client_new", dict(profile="group1"))
- metadata.clients_xml.write.assert_any_call()
- self.assertEqual(metadata.clients["client_new"], "group1")
+ @patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock())
+ def test_set_profile_db(self):
+ metadata = self.load_clients_data(metadata=self.load_groups_data())
+ if metadata._use_db:
+ profile = "group1"
+ client_name = self.get_nonexistent_client(metadata)
+ metadata.set_profile(client_name, profile, None)
+ self.assertIn(client_name, metadata.clients)
+ self.assertRaises(Bcfg2.Server.Plugin.PluginExecutionError,
+ metadata.set_profile,
+ client_name, profile, None)
- metadata.session_cache[('1.2.3.6', None)] = (None, 'client_new2')
- metadata.clients_xml.write.reset_mock()
- metadata.set_profile("uuid_new", "group1", ('1.2.3.6', None))
- mock_add_client.assert_called_with("client_new2",
- dict(uuid='uuid_new',
- profile="group1",
- address='1.2.3.6'))
- metadata.clients_xml.write.assert_any_call()
- self.assertEqual(metadata.clients["uuid_new"], "group1")
+ @patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock())
+ @patch("Bcfg2.Server.Plugins.Metadata.Metadata.add_client")
+ @patch("Bcfg2.Server.Plugins.Metadata.Metadata.update_client")
+ def test_set_profile_xml(self, mock_update_client, mock_add_client):
+ metadata = self.load_clients_data(metadata=self.load_groups_data())
+ if not metadata._use_db:
+ metadata.clients_xml.write = Mock()
+ metadata.set_profile("client1", "group2", None)
+ mock_update_client.assert_called_with("client1",
+ dict(profile="group2"))
+ metadata.clients_xml.write.assert_any_call()
+ self.assertEqual(metadata.clientgroups["client1"], ["group2"])
+
+ metadata.clients_xml.write.reset_mock()
+ new1 = self.get_nonexistent_client(metadata)
+ metadata.set_profile(new1, "group1", None)
+ mock_add_client.assert_called_with(new1, dict(profile="group1"))
+ metadata.clients_xml.write.assert_any_call()
+ self.assertEqual(metadata.clientgroups[new1], ["group1"])
+
+ metadata.clients_xml.write.reset_mock()
+ new2 = self.get_nonexistent_client(metadata)
+ metadata.session_cache[('1.2.3.6', None)] = (None, new2)
+ metadata.set_profile("uuid_new", "group1", ('1.2.3.6', None))
+ mock_add_client.assert_called_with(new2,
+ dict(uuid='uuid_new',
+ profile="group1",
+ address='1.2.3.6'))
+ metadata.clients_xml.write.assert_any_call()
+ self.assertEqual(metadata.clientgroups["uuid_new"], ["group1"])
@patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock())
@patch("socket.gethostbyaddr")
@@ -647,7 +859,7 @@ class TestMetadata(unittest.TestCase):
metadata.session_cache[('1.2.3.3', None)] = (time.time(), 'client3')
self.assertEqual(metadata.resolve_client(('1.2.3.3', None)), 'client3')
- self.assertRaises(MetadataConsistencyError,
+ self.assertRaises(Bcfg2.Server.Plugin.MetadataConsistencyError,
metadata.resolve_client,
('1.2.3.2', None))
self.assertEqual(metadata.resolve_client(('1.2.3.1', None)), 'client1')
@@ -671,7 +883,7 @@ class TestMetadata(unittest.TestCase):
mock_gethostbyaddr.reset_mock()
mock_gethostbyaddr.return_value = None
mock_gethostbyaddr.side_effect = socket.herror
- self.assertRaises(MetadataConsistencyError,
+ self.assertRaises(Bcfg2.Server.Plugin.MetadataConsistencyError,
metadata.resolve_client,
('1.2.3.8', None))
mock_gethostbyaddr.assert_called_with('1.2.3.8')
@@ -680,63 +892,115 @@ class TestMetadata(unittest.TestCase):
@patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.write_xml", Mock())
@patch("Bcfg2.Server.Plugins.Metadata.ClientMetadata")
def test_get_initial_metadata(self, mock_clientmetadata):
- metadata = get_metadata_object()
- metadata.states['clients.xml'] = False
- self.assertRaises(MetadataRuntimeError,
- metadata.get_initial_metadata, None)
+ metadata = self.get_obj()
+ if 'clients.xml' in metadata.states:
+ metadata.states['clients.xml'] = False
+ self.assertRaises(Bcfg2.Server.Plugin.MetadataRuntimeError,
+ metadata.get_initial_metadata, None)
self.load_groups_data(metadata=metadata)
self.load_clients_data(metadata=metadata)
+ # test address, password
metadata.get_initial_metadata("client1")
- self.assertEqual(mock_clientmetadata.call_args[0][:9],
- ("client1", "group1", set(["group1"]), set(), set(),
- set(["1.2.3.1"]), dict(), None, 'password2'))
-
+ mock_clientmetadata.assert_called_with("client1", "group1",
+ set(["group1"]), set(), set(),
+ set(["1.2.3.1"]),
+ dict(category1='group1'), None,
+ 'password2', None,
+ metadata.query)
+
+ # test address, bundles, category suppression
metadata.get_initial_metadata("client2")
- self.assertEqual(mock_clientmetadata.call_args[0][:9],
- ("client2", "group2", set(["group1", "group2"]),
- set(["bundle1", "bundle2"]), set(),
- set(["1.2.3.2"]), dict(category1="group1"),
- None, None))
-
+ mock_clientmetadata.assert_called_with("client2", "group2",
+ set(["group2"]),
+ set(["bundle1", "bundle2"]),
+ set(), set(["1.2.3.2"]),
+ dict(category1="group2"),
+ None, None, None,
+ metadata.query)
+
+ # test aliases, address, uuid, password
imd = metadata.get_initial_metadata("alias1")
- self.assertEqual(mock_clientmetadata.call_args[0][:9],
- ("client3", "group1", set(["group1"]), set(),
- set(['alias1']), set(["1.2.3.3"]), dict(), 'uuid1',
- 'password2'))
+ mock_clientmetadata.assert_called_with("client3", "group1",
+ set(["group1"]), set(),
+ set(['alias1']),
+ set(["1.2.3.3"]),
+ dict(category1="group1"),
+ 'uuid1', 'password2', None,
+ metadata.query)
+
+ # test new client creation
+ new1 = self.get_nonexistent_client(metadata)
+ imd = metadata.get_initial_metadata(new1)
+ mock_clientmetadata.assert_called_with(new1, "group1", set(["group1"]),
+ set(), set(), set(),
+ dict(category1="group1"), None,
+ None, None, metadata.query)
+
+ # test nested groups, address, per-client groups
+ imd = metadata.get_initial_metadata("client8")
+ mock_clientmetadata.assert_called_with("client8", "group1",
+ set(["group1", "group8",
+ "group9", "group10"]),
+ set(),
+ set(), set(["1.2.3.5"]),
+ dict(category1="group1"),
+ None, None, None, metadata.query)
+
+ # test setting per-client groups, group negation, nested groups
+ imd = metadata.get_initial_metadata("client9")
+ mock_clientmetadata.assert_called_with("client9", "group2",
+ set(["group2", "group8",
+ "group11"]),
+ set(["bundle1", "bundle2"]),
+ set(), set(),
+ dict(category1="group2"), None,
+ "password3", None,
+ metadata.query)
+
+ # test new client with no default profile
+ metadata.default = None
+ new2 = self.get_nonexistent_client(metadata)
+ self.assertRaises(Bcfg2.Server.Plugin.MetadataConsistencyError,
+ metadata.get_initial_metadata, new2)
- imd = metadata.get_initial_metadata("client_new")
- self.assertEqual(mock_clientmetadata.call_args[0][:9],
- ("client_new", "group1", set(["group1"]), set(),
- set(), set(), dict(), None, None))
+ @patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock())
+ def test_merge_groups(self):
+ metadata = self.get_obj()
+ self.load_groups_data(metadata=metadata)
+ self.load_clients_data(metadata=metadata)
- metadata.default = None
- self.assertRaises(MetadataConsistencyError,
- metadata.get_initial_metadata,
- "client_new2")
+ self.assertEqual(metadata._merge_groups("client1", set(["group1"]),
+ categories=dict(group1="category1")),
+ (set(["group1"]), dict(group1="category1")))
+ self.assertEqual(metadata._merge_groups("client8",
+ set(["group1", "group8", "group9"]),
+ categories=dict(group1="category1")),
+ (set(["group1", "group8", "group9", "group10"]),
+ dict(group1="category1")))
@patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock())
def test_get_all_group_names(self):
metadata = self.load_groups_data()
self.assertItemsEqual(metadata.get_all_group_names(),
set([g.get("name")
- for g in groups_test_tree.findall("//Group")]))
+ for g in get_groups_test_tree().findall("//Group")]))
@patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock())
def test_get_all_groups_in_category(self):
metadata = self.load_groups_data()
self.assertItemsEqual(metadata.get_all_groups_in_category("category1"),
set([g.get("name")
- for g in groups_test_tree.findall("//Group[@category='category1']")]))
+ for g in get_groups_test_tree().findall("//Group[@category='category1']")]))
@patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock())
def test_get_client_names_by_profiles(self):
metadata = self.load_clients_data(metadata=self.load_groups_data())
- self.assertItemsEqual(metadata.get_client_names_by_profiles("group2"),
+ self.assertItemsEqual(metadata.get_client_names_by_profiles(["group2"]),
[c.get("name")
- for c in clients_test_tree.findall("//Client[@profile='group2']")])
+ for c in get_clients_test_tree().findall("//Client[@profile='group2']")])
@patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock())
def test_get_client_names_by_groups(self):
@@ -751,7 +1015,7 @@ class TestMetadata(unittest.TestCase):
lambda c: metadata.get_initial_metadata(c)
self.assertItemsEqual(metadata.get_client_names_by_groups(["group2"]),
[c.get("name")
- for c in clients_test_tree.findall("//Client[@profile='group2']")])
+ for c in get_clients_test_tree().findall("//Client[@profile='group2']")])
@patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock())
def test_merge_additional_groups(self):
@@ -782,6 +1046,14 @@ class TestMetadata(unittest.TestCase):
self.assertItemsEqual(imd.groups,
oldgroups.union(["group6", "group8", "group9"]))
+ # test adding a group that is not defined in groups.xml
+ imd = metadata.get_initial_metadata("client2")
+ oldgroups = imd.groups
+ metadata.merge_additional_groups(imd, ["group6", "newgroup"])
+ self.assertItemsEqual(imd.groups,
+ oldgroups.union(["group6", "newgroup"]))
+
+
@patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock())
def test_merge_additional_data(self):
metadata = self.load_clients_data(metadata=self.load_groups_data())
@@ -856,7 +1128,8 @@ class TestMetadata(unittest.TestCase):
self.assertTrue(metadata.AuthenticateConnection(None, "root",
"password1", "1.2.3.8"))
- mock_resolve_client.side_effect = MetadataConsistencyError
+ mock_resolve_client.side_effect = \
+ Bcfg2.Server.Plugin.MetadataConsistencyError
self.assertFalse(metadata.AuthenticateConnection(None, "root",
"password1",
"1.2.3.8"))
@@ -903,3 +1176,295 @@ class TestMetadata(unittest.TestCase):
def test_viz(self):
pass
+
+
+class TestMetadataBase(TestMetadata):
+ """ base test object for testing Metadata with database enabled """
+ __test__ = False
+ use_db = True
+
+ @skipUnless(has_django, "Django not found")
+ def setUp(self):
+ syncdb(TestMetadataDB)
+
+ def load_clients_data(self, metadata=None, xdata=None):
+ if metadata is None:
+ metadata = get_obj()
+ for client in get_clients_test_tree().findall("Client"):
+ metadata.add_client(client.get("name"))
+ return metadata
+
+ def get_nonexistent_client(self, _, prefix="newclient"):
+ clients = [o.hostname for o in MetadataClientModel.objects.all()]
+ i = 0
+ client_name = "%s%s" % (prefix, i)
+ while client_name in clients:
+ i += 1
+ client_name = "%s%s" % (prefix, i)
+ return client_name
+
+ @patch('os.path.exists')
+ def test__init(self, mock_exists):
+ core = Mock()
+ core.fam = Mock()
+ mock_exists.return_value = False
+ metadata = self.get_obj(core=core, watch_clients=True)
+ self.assertIsInstance(metadata, Bcfg2.Server.Plugin.DatabaseBacked)
+ core.fam.AddMonitor.assert_called_once_with(os.path.join(metadata.data,
+ "groups.xml"),
+ metadata)
+
+ mock_exists.return_value = True
+ core.fam.reset_mock()
+ metadata = self.get_obj(core=core, watch_clients=True)
+ core.fam.AddMonitor.assert_any_call(os.path.join(metadata.data,
+ "groups.xml"),
+ metadata)
+ core.fam.AddMonitor.assert_any_call(os.path.join(metadata.data,
+ "clients.xml"),
+ metadata)
+
+ def test_add_group(self):
+ pass
+
+ def test_add_bundle(self):
+ pass
+
+ def test_add_client(self):
+ metadata = self.get_obj()
+ hostname = self.get_nonexistent_client(metadata)
+ client = metadata.add_client(hostname)
+ self.assertIsInstance(client, MetadataClientModel)
+ self.assertEqual(client.hostname, hostname)
+ self.assertIn(hostname, metadata.clients)
+ self.assertIn(hostname, metadata.list_clients())
+ self.assertItemsEqual(metadata.clients,
+ [c.hostname
+ for c in MetadataClientModel.objects.all()])
+
+ def test_update_group(self):
+ pass
+
+ def test_update_bundle(self):
+ pass
+
+ def test_update_client(self):
+ pass
+
+ def test_list_clients(self):
+ metadata = self.get_obj()
+ self.assertItemsEqual(metadata.list_clients(),
+ [c.hostname
+ for c in MetadataClientModel.objects.all()])
+
+ def test_remove_group(self):
+ pass
+
+ def test_remove_bundle(self):
+ pass
+
+ def test_remove_client(self):
+ metadata = self.get_obj()
+ client_name = self.get_nonexistent_client(metadata)
+
+ self.assertRaises(Bcfg2.Server.Plugin.MetadataConsistencyError,
+ metadata.remove_client,
+ client_name)
+
+ metadata.add_client(client_name)
+ metadata.remove_client(client_name)
+ self.assertNotIn(client_name, metadata.clients)
+ self.assertNotIn(client_name, metadata.list_clients())
+ self.assertItemsEqual(metadata.clients,
+ [c.hostname
+ for c in MetadataClientModel.objects.all()])
+
+ def test_process_statistics(self):
+ pass
+
+
+class TestMetadata_NoClientsXML(TestMetadataBase):
+ """ test Metadata without a clients.xml. we have to disable or
+ override tests that rely on client options """
+ # only run these tests if it's possible to skip tests or if we
+ # have django. otherwise they'll all get run because our fake
+ # skipping decorators for python < 2.7 won't work when they
+ # decorate setUp()
+ if can_skip or has_django:
+ __test__ = True
+
+ def load_groups_data(self, metadata=None, xdata=None):
+ if metadata is None:
+ metadata = self.get_obj()
+ if not xdata:
+ xdata = copy.deepcopy(get_groups_test_tree())
+ for client in get_clients_test_tree().findall("Client"):
+ newclient = \
+ lxml.etree.SubElement(xdata.getroot(),
+ "Client", name=client.get("name"))
+ lxml.etree.SubElement(newclient, "Group",
+ name=client.get("profile"))
+ metadata.groups_xml.data = xdata
+ metadata.groups_xml.basedata = copy.copy(metadata.groups_xml.data)
+ evt = Mock()
+ evt.filename = os.path.join(datastore, "Metadata", "groups.xml")
+ evt.code2str = Mock(return_value="changed")
+ metadata.HandleEvent(evt)
+ return metadata
+
+ @patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock())
+ @patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.write_xml", Mock())
+ @patch("Bcfg2.Server.Plugins.Metadata.ClientMetadata")
+ def test_get_initial_metadata(self, mock_clientmetadata):
+ metadata = self.get_obj()
+ if 'clients.xml' in metadata.states:
+ metadata.states['clients.xml'] = False
+ self.assertRaises(Bcfg2.Server.Plugin.MetadataRuntimeError,
+ metadata.get_initial_metadata, None)
+
+ self.load_groups_data(metadata=metadata)
+ self.load_clients_data(metadata=metadata)
+
+ # test basic client metadata
+ metadata.get_initial_metadata("client1")
+ mock_clientmetadata.assert_called_with("client1", "group1",
+ set(["group1"]), set(), set(),
+ set(), dict(category1='group1'),
+ None, None, None, metadata.query)
+
+ # test bundles, category suppression
+ metadata.get_initial_metadata("client2")
+ mock_clientmetadata.assert_called_with("client2", "group2",
+ set(["group2"]),
+ set(["bundle1", "bundle2"]),
+ set(), set(),
+ dict(category1="group2"), None,
+ None, None, metadata.query)
+
+ # test new client creation
+ new1 = self.get_nonexistent_client(metadata)
+ imd = metadata.get_initial_metadata(new1)
+ mock_clientmetadata.assert_called_with(new1, "group1", set(["group1"]),
+ set(), set(), set(),
+ dict(category1="group1"), None,
+ None, None, metadata.query)
+
+ # test nested groups, per-client groups
+ imd = metadata.get_initial_metadata("client8")
+ mock_clientmetadata.assert_called_with("client8", "group1",
+ set(["group1", "group8",
+ "group9", "group10"]),
+ set(), set(), set(),
+ dict(category1="group1"), None,
+ None, None, metadata.query)
+
+ # test per-client groups, group negation, nested groups
+ imd = metadata.get_initial_metadata("client9")
+ mock_clientmetadata.assert_called_with("client9", "group2",
+ set(["group2", "group8",
+ "group11"]),
+ set(["bundle1", "bundle2"]),
+ set(), set(),
+ dict(category1="group2"), None,
+ None, None, metadata.query)
+
+ # test exception on new client with no default profile
+ metadata.default = None
+ new2 = self.get_nonexistent_client(metadata)
+ self.assertRaises(Bcfg2.Server.Plugin.MetadataConsistencyError,
+ metadata.get_initial_metadata,
+ new2)
+
+ @patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock())
+ @patch("Bcfg2.Server.Plugins.Metadata.Metadata.resolve_client")
+ def test_validate_client_address(self, mock_resolve_client):
+ metadata = self.load_clients_data(metadata=self.load_groups_data())
+ # this is upper case to ensure that case is folded properly in
+ # validate_client_address()
+ mock_resolve_client.return_value = "CLIENT4"
+ self.assertTrue(metadata.validate_client_address("client4",
+ ("1.2.3.7", None)))
+ mock_resolve_client.assert_called_with(("1.2.3.7", None))
+
+ mock_resolve_client.reset_mock()
+ self.assertFalse(metadata.validate_client_address("client5",
+ ("1.2.3.5", None)))
+
+ @patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock())
+ @patch("Bcfg2.Server.Plugins.Metadata.Metadata.validate_client_address")
+ @patch("Bcfg2.Server.Plugins.Metadata.Metadata.resolve_client")
+ def test_AuthenticateConnection(self, mock_resolve_client,
+ mock_validate_client_address):
+ metadata = self.load_clients_data(metadata=self.load_groups_data())
+ metadata.password = "password1"
+
+ cert = dict(subject=[[("commonName", "client1")]])
+ mock_validate_client_address.return_value = False
+ self.assertFalse(metadata.AuthenticateConnection(cert, "root", None,
+ "1.2.3.1"))
+ mock_validate_client_address.return_value = True
+ self.assertTrue(metadata.AuthenticateConnection(cert, "root",
+ metadata.password,
+ "1.2.3.1"))
+
+ cert = dict(subject=[[("commonName", "client8")]])
+
+ mock_resolve_client.return_value = "client5"
+ self.assertTrue(metadata.AuthenticateConnection(None, "root",
+ "password1", "1.2.3.8"))
+
+ mock_resolve_client.side_effect = \
+ Bcfg2.Server.Plugin.MetadataConsistencyError
+ self.assertFalse(metadata.AuthenticateConnection(None, "root",
+ "password1",
+ "1.2.3.8"))
+
+ @patch("Bcfg2.Server.Plugins.Metadata.XMLMetadataConfig.load_xml", Mock())
+ @patch("socket.gethostbyaddr")
+ def test_resolve_client(self, mock_gethostbyaddr):
+ metadata = self.load_clients_data(metadata=self.load_groups_data())
+ metadata.session_cache[('1.2.3.3', None)] = (time.time(), 'client3')
+ self.assertEqual(metadata.resolve_client(('1.2.3.3', None)), 'client3')
+
+ metadata.session_cache[('1.2.3.3', None)] = (time.time() - 100,
+ 'client3')
+ mock_gethostbyaddr.return_value = ("client3", [], ['1.2.3.3'])
+ self.assertEqual(metadata.resolve_client(('1.2.3.3', None),
+ cleanup_cache=True), 'client3')
+ self.assertEqual(metadata.session_cache, dict())
+
+ mock_gethostbyaddr.return_value = ('client6', [], ['1.2.3.6'])
+ self.assertEqual(metadata.resolve_client(('1.2.3.6', None)), 'client6')
+ mock_gethostbyaddr.assert_called_with('1.2.3.6')
+
+ mock_gethostbyaddr.reset_mock()
+ mock_gethostbyaddr.return_value = None
+ mock_gethostbyaddr.side_effect = socket.herror
+ self.assertRaises(Bcfg2.Server.Plugin.MetadataConsistencyError,
+ metadata.resolve_client,
+ ('1.2.3.8', None))
+ mock_gethostbyaddr.assert_called_with('1.2.3.8')
+
+ def test_handle_clients_xml_event(self):
+ pass
+
+
+class TestMetadata_ClientsXML(TestMetadataBase):
+ """ test Metadata with a clients.xml. """
+ # only run these tests if it's possible to skip tests or if we
+ # have django. otherwise they'll all get run because our fake
+ # skipping decorators for python < 2.7 won't work when they
+ # decorate setUp()
+ if can_skip or has_django:
+ __test__ = True
+
+ def load_clients_data(self, metadata=None, xdata=None):
+ if metadata is None:
+ metadata = self.get_obj()
+ metadata.core.fam = Mock()
+ metadata._handle_file("clients.xml")
+ metadata = TestMetadata.load_clients_data(self, metadata=metadata,
+ xdata=xdata)
+ return TestMetadataBase.load_clients_data(self, metadata=metadata,
+ xdata=xdata)
+
diff --git a/testsuite/Testlib/TestServer/TestPlugins/TestProbes.py b/testsuite/Testlib/TestServer/TestPlugins/TestProbes.py
new file mode 100644
index 000000000..0a971c245
--- /dev/null
+++ b/testsuite/Testlib/TestServer/TestPlugins/TestProbes.py
@@ -0,0 +1,549 @@
+import os
+import sys
+import time
+import lxml.etree
+import Bcfg2.Server
+import Bcfg2.Server.Plugin
+from mock import Mock, MagicMock, patch
+
+# add all parent testsuite directories to sys.path to allow (most)
+# relative imports in python 2.4
+path = os.path.dirname(__file__)
+while path != "/":
+ if os.path.basename(path).lower().startswith("test"):
+ sys.path.append(path)
+ if os.path.basename(path) == "testsuite":
+ break
+ path = os.path.dirname(path)
+from common import XI_NAMESPACE, XI, inPy3k, call, builtins, u, can_skip, \
+ skip, skipIf, skipUnless, Bcfg2TestCase, DBModelTestCase, syncdb, \
+ patchIf, datastore
+from Bcfg2.Server.Plugins.Probes import *
+from TestPlugin import TestEntrySet, TestProbing, TestConnector, \
+ TestDatabaseBacked
+
+# test data for JSON and YAML tests
+test_data = dict(a=1, b=[1, 2, 3], c="test")
+
+class FakeList(list):
+ pass
+
+
+class TestProbesDB(DBModelTestCase):
+ if has_django:
+ models = [ProbesGroupsModel, ProbesDataModel]
+
+
+class TestClientProbeDataSet(Bcfg2TestCase):
+ def test__init(self):
+ ds = ClientProbeDataSet()
+ self.assertLessEqual(ds.timestamp, time.time())
+ self.assertIsInstance(ds, dict)
+ self.assertNotIn("timestamp", ds)
+
+ ds = ClientProbeDataSet(timestamp=123)
+ self.assertEqual(ds.timestamp, 123)
+ self.assertNotIn("timestamp", ds)
+
+class TestProbeData(Bcfg2TestCase):
+ def test_str(self):
+ # a value that is not valid XML, JSON, or YAML
+ val = "'test"
+
+ # test string behavior
+ data = ProbeData(val)
+ self.assertIsInstance(data, str)
+ self.assertEqual(data, val)
+ # test 1.2.0-1.2.2 broken behavior
+ self.assertEqual(data.data, val)
+ # test that formatted data accessors return None
+ self.assertIsNone(data.xdata)
+ self.assertIsNone(data.yaml)
+ self.assertIsNone(data.json)
+
+ def test_xdata(self):
+ xdata = lxml.etree.Element("test")
+ lxml.etree.SubElement(xdata, "test2")
+ data = ProbeData(lxml.etree.tostring(xdata,
+ xml_declaration=False).decode('UTF-8'))
+ self.assertIsNotNone(data.xdata)
+ self.assertIsNotNone(data.xdata.find("test2"))
+
+ @skipUnless(has_json, "JSON libraries not found, skipping JSON tests")
+ def test_json(self):
+ jdata = json.dumps(test_data)
+ data = ProbeData(jdata)
+ self.assertIsNotNone(data.json)
+ self.assertItemsEqual(test_data, data.json)
+
+ @skipUnless(has_yaml, "YAML libraries not found, skipping YAML tests")
+ def test_yaml(self):
+ jdata = yaml.dump(test_data)
+ data = ProbeData(jdata)
+ self.assertIsNotNone(data.yaml)
+ self.assertItemsEqual(test_data, data.yaml)
+
+
+class TestProbeSet(TestEntrySet):
+ test_obj = ProbeSet
+ basenames = ["test", "_test", "test-test"]
+ ignore = ["foo~", ".#foo", ".foo.swp", ".foo.swx", "probed.xml"]
+ bogus_names = ["test.py"]
+
+ def get_obj(self, path=datastore, fam=None, encoding=None,
+ plugin_name="Probes", basename=None):
+ # get_obj() accepts the basename argument, accepted by the
+ # parent get_obj() method, and just throws it away, since
+ # ProbeSet uses a regex for the "basename"
+ if fam is None:
+ fam = Mock()
+ rv = self.test_obj(path, fam, encoding, plugin_name)
+ rv.entry_type = MagicMock()
+ return rv
+
+ def test__init(self):
+ fam = Mock()
+ ps = self.get_obj(fam=fam)
+ self.assertEqual(ps.plugin_name, "Probes")
+ fam.AddMonitor.assert_called_with(datastore, ps)
+ TestEntrySet.test__init(self)
+
+ def test_HandleEvent(self):
+ ps = self.get_obj()
+ ps.handle_event = Mock()
+
+ # test that events on the data store itself are skipped
+ evt = Mock()
+ evt.filename = datastore
+ ps.HandleEvent(evt)
+ self.assertFalse(ps.handle_event.called)
+
+ # test that events on probed.xml are skipped
+ evt.reset_mock()
+ evt.filename = "probed.xml"
+ ps.HandleEvent(evt)
+ self.assertFalse(ps.handle_event.called)
+
+ # test that other events are processed appropriately
+ evt.reset_mock()
+ evt.filename = "fooprobe"
+ ps.HandleEvent(evt)
+ ps.handle_event.assert_called_with(evt)
+
+ @patch("%s.list" % builtins, FakeList)
+ def test_get_probe_data(self):
+ ps = self.get_obj()
+
+ # build some fairly complex test data for this. in the end,
+ # we want the probe data to include only the most specific
+ # version of a given probe, and by basename only, not full
+ # (specific) name. We don't fully test the specificity stuff,
+ # we just check to make sure sort() is called and trust that
+ # sort() does the right thing on Specificity objects. (I.e.,
+ # trust that Specificity is well-tested. Hah!) We also test
+ # to make sure the interpreter is determined correctly.
+ ps.get_matching = Mock()
+ matching = FakeList()
+ matching.sort = Mock()
+
+ p1 = Mock()
+ p1.specific = Bcfg2.Server.Plugin.Specificity(group=True, prio=10)
+ p1.name = "fooprobe.G10_foogroup"
+ p1.data = """#!/bin/bash
+group-specific"""
+ matching.append(p1)
+
+ p2 = Mock()
+ p2.specific = Bcfg2.Server.Plugin.Specificity(all=True)
+ p2.name = "fooprobe"
+ p2.data = "#!/bin/bash"
+ matching.append(p2)
+
+ p3 = Mock()
+ p3.specific = Bcfg2.Server.Plugin.Specificity(all=True)
+ p3.name = "barprobe"
+ p3.data = "#! /usr/bin/env python"
+ matching.append(p3)
+
+ p4 = Mock()
+ p4.specific = Bcfg2.Server.Plugin.Specificity(all=True)
+ p4.name = "bazprobe"
+ p4.data = ""
+ matching.append(p4)
+
+ ps.get_matching.return_value = matching
+
+ metadata = Mock()
+ pdata = ps.get_probe_data(metadata)
+ ps.get_matching.assert_called_with(metadata)
+ # we can't create a matching operator.attrgetter object, and I
+ # don't feel the need to mock that out -- this is a good
+ # enough check
+ self.assertTrue(matching.sort.called)
+
+ self.assertEqual(len(pdata), 3,
+ "Found: %s" % [p.get("name") for p in pdata])
+ for probe in pdata:
+ if probe.get("name") == "fooprobe":
+ self.assertIn("group-specific", probe.text)
+ self.assertEqual(probe.get("interpreter"), "/bin/bash")
+ elif probe.get("name") == "barprobe":
+ self.assertEqual(probe.get("interpreter"),
+ "/usr/bin/env python")
+ elif probe.get("name") == "bazprobe":
+ self.assertIsNotNone(probe.get("interpreter"))
+ else:
+ assert False, "Strange probe found in get_probe_data() return"
+
+
+class TestProbes(TestProbing, TestConnector, TestDatabaseBacked):
+ test_obj = Probes
+
+ def get_test_probedata(self):
+ test_xdata = lxml.etree.Element("test")
+ lxml.etree.SubElement(test_xdata, "test", foo="foo")
+ rv = dict()
+ rv["foo.example.com"] = ClientProbeDataSet(timestamp=time.time())
+ rv["foo.example.com"]["xml"] = \
+ ProbeData(lxml.etree.tostring(test_xdata,
+ xml_declaration=False).decode('UTF-8'))
+ rv["foo.example.com"]["text"] = ProbeData("freeform text")
+ rv["foo.example.com"]["multiline"] = ProbeData("""multiple
+lines
+of
+freeform
+text
+""")
+ rv["bar.example.com"] = ClientProbeDataSet(timestamp=time.time())
+ rv["bar.example.com"]["empty"] = ProbeData("")
+ if has_yaml:
+ rv["bar.example.com"]["yaml"] = ProbeData(yaml.dump(test_data))
+ if has_json:
+ rv["bar.example.com"]["json"] = ProbeData(json.dumps(test_data))
+ return rv
+
+ def get_test_cgroups(self):
+ return {"foo.example.com": ["group", "group with spaces",
+ "group-with-dashes"],
+ "bar.example.com": []}
+
+ def get_probes_object(self, use_db=False, load_data=None):
+ core = Mock()
+ core.setup.cfp.getboolean = Mock()
+ core.setup.cfp.getboolean.return_value = use_db
+ if load_data is None:
+ load_data = MagicMock()
+ # we have to patch load_data() in a funny way because
+ # different versions of Mock have different scopes for
+ # patching. in some versions, a patch applied to
+ # get_probes_object() would only apply to that function, while
+ # in others it would also apply to the calling function (e.g.,
+ # test__init(), which relies on being able to check the calls
+ # of load_data(), and thus on load_data() being consistently
+ # mocked)
+ @patch("Bcfg2.Server.Plugins.Probes.Probes.load_data", new=load_data)
+ def inner():
+ return Probes(core, datastore)
+
+ return inner()
+
+ def test__init(self):
+ mock_load_data = Mock()
+ probes = self.get_probes_object(load_data=mock_load_data)
+ probes.core.fam.AddMonitor.assert_called_with(os.path.join(datastore,
+ probes.name),
+ probes.probes)
+ mock_load_data.assert_any_call()
+ self.assertEqual(probes.probedata, ClientProbeDataSet())
+ self.assertEqual(probes.cgroups, dict())
+
+ @patch("Bcfg2.Server.Plugins.Probes.Probes.load_data", Mock())
+ def test__use_db(self):
+ probes = self.get_probes_object()
+ self.assertFalse(probes._use_db)
+ probes.core.setup.cfp.getboolean.assert_called_with("probes",
+ "use_database",
+ default=False)
+
+ @skipUnless(has_django, "Django not found, skipping")
+ @patch("Bcfg2.Server.Plugins.Probes.Probes._write_data_db", Mock())
+ @patch("Bcfg2.Server.Plugins.Probes.Probes._write_data_xml", Mock())
+ def test_write_data_xml(self):
+ probes = self.get_probes_object(use_db=False)
+ probes.write_data("test")
+ probes._write_data_xml.assert_called_with("test")
+ self.assertFalse(probes._write_data_db.called)
+
+ @skipUnless(has_django, "Django not found, skipping")
+ @patch("Bcfg2.Server.Plugins.Probes.Probes._write_data_db", Mock())
+ @patch("Bcfg2.Server.Plugins.Probes.Probes._write_data_xml", Mock())
+ def test_write_data_db(self):
+ probes = self.get_probes_object(use_db=True)
+ probes.write_data("test")
+ probes._write_data_db.assert_called_with("test")
+ self.assertFalse(probes._write_data_xml.called)
+
+ @patch("%s.open" % builtins)
+ def test__write_data_xml(self, mock_open):
+ probes = self.get_probes_object(use_db=False)
+ probes.probedata = self.get_test_probedata()
+ probes.cgroups = self.get_test_cgroups()
+ probes._write_data_xml(None)
+
+ mock_open.assert_called_with(os.path.join(datastore, probes.name,
+ "probed.xml"), "w")
+ data = lxml.etree.XML(mock_open.return_value.write.call_args[0][0])
+ self.assertEqual(len(data.xpath("//Client")), 2)
+
+ foodata = data.find("Client[@name='foo.example.com']")
+ self.assertIsNotNone(foodata)
+ self.assertIsNotNone(foodata.get("timestamp"))
+ self.assertEqual(len(foodata.findall("Probe")),
+ len(probes.probedata['foo.example.com']))
+ self.assertEqual(len(foodata.findall("Group")),
+ len(probes.cgroups['foo.example.com']))
+ xml = foodata.find("Probe[@name='xml']")
+ self.assertIsNotNone(xml)
+ self.assertIsNotNone(xml.get("value"))
+ xdata = lxml.etree.XML(xml.get("value"))
+ self.assertIsNotNone(xdata)
+ self.assertIsNotNone(xdata.find("test"))
+ self.assertEqual(xdata.find("test").get("foo"), "foo")
+ text = foodata.find("Probe[@name='text']")
+ self.assertIsNotNone(text)
+ self.assertIsNotNone(text.get("value"))
+ multiline = foodata.find("Probe[@name='multiline']")
+ self.assertIsNotNone(multiline)
+ self.assertIsNotNone(multiline.get("value"))
+ self.assertGreater(len(multiline.get("value").splitlines()), 1)
+
+ bardata = data.find("Client[@name='bar.example.com']")
+ self.assertIsNotNone(bardata)
+ self.assertIsNotNone(bardata.get("timestamp"))
+ self.assertEqual(len(bardata.findall("Probe")),
+ len(probes.probedata['bar.example.com']))
+ self.assertEqual(len(bardata.findall("Group")),
+ len(probes.cgroups['bar.example.com']))
+ empty = bardata.find("Probe[@name='empty']")
+ self.assertIsNotNone(empty)
+ self.assertIsNotNone(empty.get("value"))
+ self.assertEqual(empty.get("value"), "")
+ if has_yaml:
+ ydata = bardata.find("Probe[@name='yaml']")
+ self.assertIsNotNone(ydata)
+ self.assertIsNotNone(ydata.get("value"))
+ self.assertItemsEqual(test_data, yaml.load(ydata.get("value")))
+ if has_json:
+ jdata = bardata.find("Probe[@name='json']")
+ self.assertIsNotNone(jdata)
+ self.assertIsNotNone(jdata.get("value"))
+ self.assertItemsEqual(test_data, json.loads(jdata.get("value")))
+
+ @skipUnless(has_django, "Django not found, skipping")
+ def test__write_data_db(self):
+ syncdb(TestProbesDB)
+ probes = self.get_probes_object(use_db=True)
+ probes.probedata = self.get_test_probedata()
+ probes.cgroups = self.get_test_cgroups()
+
+ for cname in ["foo.example.com", "bar.example.com"]:
+ client = Mock()
+ client.hostname = cname
+ probes._write_data_db(client)
+
+ pdata = ProbesDataModel.objects.filter(hostname=cname).all()
+ self.assertEqual(len(pdata), len(probes.probedata[cname]))
+
+ for probe in pdata:
+ self.assertEqual(probe.hostname, client.hostname)
+ self.assertIsNotNone(probe.data)
+ if probe.probe == "xml":
+ xdata = lxml.etree.XML(probe.data)
+ self.assertIsNotNone(xdata)
+ self.assertIsNotNone(xdata.find("test"))
+ self.assertEqual(xdata.find("test").get("foo"), "foo")
+ elif probe.probe == "text":
+ pass
+ elif probe.probe == "multiline":
+ self.assertGreater(len(probe.data.splitlines()), 1)
+ elif probe.probe == "empty":
+ self.assertEqual(probe.data, "")
+ elif probe.probe == "yaml":
+ self.assertItemsEqual(test_data, yaml.load(probe.data))
+ elif probe.probe == "json":
+ self.assertItemsEqual(test_data, json.loads(probe.data))
+ else:
+ assert False, "Strange probe found in _write_data_db data"
+
+ pgroups = ProbesGroupsModel.objects.filter(hostname=cname).all()
+ self.assertEqual(len(pgroups), len(probes.cgroups[cname]))
+
+ # test that old probe data is removed properly
+ cname = 'foo.example.com'
+ del probes.probedata[cname]['text']
+ probes.cgroups[cname].pop()
+ client = Mock()
+ client.hostname = cname
+ probes._write_data_db(client)
+
+ pdata = ProbesDataModel.objects.filter(hostname=cname).all()
+ self.assertEqual(len(pdata), len(probes.probedata[cname]))
+ pgroups = ProbesGroupsModel.objects.filter(hostname=cname).all()
+ self.assertEqual(len(pgroups), len(probes.cgroups[cname]))
+
+ @skipUnless(has_django, "Django not found, skipping")
+ @patch("Bcfg2.Server.Plugins.Probes.Probes._load_data_db", Mock())
+ @patch("Bcfg2.Server.Plugins.Probes.Probes._load_data_xml", Mock())
+ def test_load_data_xml(self):
+ probes = self.get_probes_object(use_db=False)
+ probes.load_data()
+ probes._load_data_xml.assert_any_call()
+ self.assertFalse(probes._load_data_db.called)
+
+ @skipUnless(has_django, "Django not found, skipping")
+ @patch("Bcfg2.Server.Plugins.Probes.Probes._load_data_db", Mock())
+ @patch("Bcfg2.Server.Plugins.Probes.Probes._load_data_xml", Mock())
+ def test_load_data_db(self):
+ probes = self.get_probes_object(use_db=True)
+ probes.load_data()
+ probes._load_data_db.assert_any_call()
+ self.assertFalse(probes._load_data_xml.called)
+
+ @patch("%s.open" % builtins)
+ @patch("lxml.etree.parse")
+ def test__load_data_xml(self, mock_parse, mock_open):
+ probes = self.get_probes_object(use_db=False)
+ # to get the value for lxml.etree.parse to parse, we call
+ # _write_data_xml, mock the open() call, and grab the data
+ # that gets "written" to probed.xml
+ probes.probedata = self.get_test_probedata()
+ probes.cgroups = self.get_test_cgroups()
+ probes._write_data_xml(None)
+ xdata = \
+ lxml.etree.XML(str(mock_open.return_value.write.call_args[0][0]))
+ mock_parse.return_value = xdata.getroottree()
+ probes.probedata = dict()
+ probes.cgroups = dict()
+
+ probes._load_data_xml()
+ mock_parse.assert_called_with(os.path.join(datastore, probes.name,
+ 'probed.xml'),
+ parser=Bcfg2.Server.XMLParser)
+ self.assertItemsEqual(probes.probedata, self.get_test_probedata())
+ self.assertItemsEqual(probes.cgroups, self.get_test_cgroups())
+
+ @skipUnless(has_django, "Django not found, skipping")
+ def test__load_data_db(self):
+ syncdb(TestProbesDB)
+ probes = self.get_probes_object(use_db=True)
+ probes.probedata = self.get_test_probedata()
+ probes.cgroups = self.get_test_cgroups()
+ for cname in probes.probedata.keys():
+ client = Mock()
+ client.hostname = cname
+ probes._write_data_db(client)
+
+ probes.probedata = dict()
+ probes.cgroups = dict()
+ probes._load_data_db()
+ self.assertItemsEqual(probes.probedata, self.get_test_probedata())
+ # the db backend does not store groups at all if a client has
+ # no groups set, so we can't just use assertItemsEqual here,
+ # because loading saved data may _not_ result in the original
+ # data if some clients had no groups set.
+ test_cgroups = self.get_test_cgroups()
+ for cname, groups in test_cgroups.items():
+ if cname in probes.cgroups:
+ self.assertEqual(groups, probes.cgroups[cname])
+ else:
+ self.assertEqual(groups, [])
+
+ @patch("Bcfg2.Server.Plugins.Probes.ProbeSet.get_probe_data")
+ def test_GetProbes(self, mock_get_probe_data):
+ TestProbing.test_GetProbes(self)
+
+ probes = self.get_probes_object()
+ metadata = Mock()
+ probes.GetProbes(metadata)
+ mock_get_probe_data.assert_called_with(metadata)
+
+ @patch("Bcfg2.Server.Plugins.Probes.Probes.write_data")
+ @patch("Bcfg2.Server.Plugins.Probes.Probes.ReceiveDataItem")
+ def test_ReceiveData(self, mock_ReceiveDataItem, mock_write_data):
+ TestProbing.test_ReceiveData(self)
+
+ # we use a simple (read: bogus) datalist here to make this
+ # easy to test
+ datalist = ["a", "b", "c"]
+
+ probes = self.get_probes_object()
+ client = Mock()
+ client.hostname = "foo.example.com"
+ probes.ReceiveData(client, datalist)
+
+ self.assertItemsEqual(mock_ReceiveDataItem.call_args_list,
+ [call(client, "a"), call(client, "b"),
+ call(client, "c")])
+ mock_write_data.assert_called_with(client)
+
+ def test_ReceiveDataItem(self):
+ probes = self.get_probes_object()
+ for cname, cdata in self.get_test_probedata().items():
+ client = Mock()
+ client.hostname = cname
+ for pname, pdata in cdata.items():
+ dataitem = lxml.etree.Element("Probe", name=pname)
+ if pname == "text":
+ # add some groups to the plaintext test to test
+ # group parsing
+ data = [pdata]
+ for group in self.get_test_cgroups()[cname]:
+ data.append("group:%s" % group)
+ dataitem.text = "\n".join(data)
+ else:
+ dataitem.text = str(pdata)
+
+ probes.ReceiveDataItem(client, dataitem)
+
+ self.assertIn(client.hostname, probes.probedata)
+ self.assertIn(pname, probes.probedata[cname])
+ self.assertEqual(pdata, probes.probedata[cname][pname])
+ self.assertIn(client.hostname, probes.cgroups)
+ self.assertEqual(probes.cgroups[cname],
+ self.get_test_cgroups()[cname])
+
+ def test_get_additional_groups(self):
+ TestConnector.test_get_additional_groups(self)
+
+ probes = self.get_probes_object()
+ test_cgroups = self.get_test_cgroups()
+ probes.cgroups = self.get_test_cgroups()
+ for cname in test_cgroups.keys():
+ metadata = Mock()
+ metadata.hostname = cname
+ self.assertEqual(test_cgroups[cname],
+ probes.get_additional_groups(metadata))
+ # test a non-existent client
+ metadata = Mock()
+ metadata.hostname = "nonexistent"
+ self.assertEqual(probes.get_additional_groups(metadata),
+ list())
+
+ def test_get_additional_data(self):
+ TestConnector.test_get_additional_data(self)
+
+ probes = self.get_probes_object()
+ test_probedata = self.get_test_probedata()
+ probes.probedata = self.get_test_probedata()
+ for cname in test_probedata.keys():
+ metadata = Mock()
+ metadata.hostname = cname
+ self.assertEqual(test_probedata[cname],
+ probes.get_additional_data(metadata))
+ # test a non-existent client
+ metadata = Mock()
+ metadata.hostname = "nonexistent"
+ self.assertEqual(probes.get_additional_data(metadata),
+ ClientProbeDataSet())
+
+
diff --git a/testsuite/Testlib/TestServer/TestPlugins/TestSEModules.py b/testsuite/Testlib/TestServer/TestPlugins/TestSEModules.py
new file mode 100644
index 000000000..bacb80d3f
--- /dev/null
+++ b/testsuite/Testlib/TestServer/TestPlugins/TestSEModules.py
@@ -0,0 +1,109 @@
+import os
+import sys
+import lxml.etree
+import Bcfg2.Server.Plugin
+from Bcfg2.Bcfg2Py3k import b64encode
+from mock import Mock, MagicMock, patch
+from Bcfg2.Server.Plugins.SEModules import *
+
+# add all parent testsuite directories to sys.path to allow (most)
+# relative imports in python 2.4
+path = os.path.dirname(__file__)
+while path != "/":
+ if os.path.basename(path).lower().startswith("test"):
+ sys.path.append(path)
+ if os.path.basename(path) == "testsuite":
+ break
+ path = os.path.dirname(path)
+from common import XI_NAMESPACE, XI, inPy3k, call, builtins, u, can_skip, \
+ skip, skipIf, skipUnless, Bcfg2TestCase, DBModelTestCase, syncdb, \
+ patchIf, datastore
+from TestPlugin import TestSpecificData, TestGroupSpool
+
+class TestSEModuleData(TestSpecificData):
+ test_obj = SEModuleData
+ path = os.path.join(datastore, "SEModules", "test.pp", "test.pp")
+
+ def test_bind_entry(self):
+ data = self.get_obj()
+ data.data = "test"
+ entry = lxml.etree.Element("test", name=self.path)
+ data.bind_entry(entry, Mock())
+ self.assertEqual(entry.get("name"), self.path)
+ self.assertEqual(entry.get("encoding"), "base64")
+ self.assertEqual(entry.text, b64encode(data.data))
+
+
+class TestSEModules(TestGroupSpool):
+ test_obj = SEModules
+
+ def test__get_module_name(self):
+ modules = self.get_obj()
+ for mname in ["foo", "foo.pp"]:
+ entry = lxml.etree.Element("SELinux", type="module", name=mname)
+ self.assertEqual(modules._get_module_name(entry), "/foo.pp")
+
+ @patch("Bcfg2.Server.Plugins.SEModules.SEModules._get_module_name")
+ def test_HandlesEntry(self, mock_get_name):
+ modules = self.get_obj()
+ modules.Entries['SELinux']['/foo.pp'] = Mock()
+ modules.Entries['SELinux']['/bar.pp'] = Mock()
+ for el in [lxml.etree.Element("Path", name="/foo.pp"),
+ lxml.etree.Element("SELinux", type="fcontext",
+ name="/foo.pp"),
+ lxml.etree.Element("SELinux", type="module",
+ name="/baz.pp")]:
+ mock_get_name.return_value = el.get("name")
+ self.assertFalse(modules.HandlesEntry(el, Mock()))
+ mock_get_name.assert_called_with(el)
+
+ for el in [lxml.etree.Element("SELinux", type="module",
+ name="/foo.pp"),
+ lxml.etree.Element("SELinux", type="module",
+ name="/bar.pp")]:
+ mock_get_name.return_value = el.get("name")
+ self.assertTrue(modules.HandlesEntry(el, Mock()),
+ msg="SEModules fails to handle %s" % el.get("name"))
+ mock_get_name.assert_called_with(el)
+
+ TestGroupSpool.test_HandlesEntry(self)
+
+ @patch("Bcfg2.Server.Plugins.SEModules.SEModules._get_module_name")
+ def test_HandlesEntry(self, mock_get_name):
+ modules = self.get_obj()
+ handler = Mock()
+ modules.Entries['SELinux']['/foo.pp'] = handler
+ mock_get_name.return_value = "/foo.pp"
+
+ entry = lxml.etree.Element("SELinux", type="module", name="foo")
+ metadata = Mock()
+ self.assertEqual(modules.HandleEntry(entry, metadata),
+ handler.return_value)
+ mock_get_name.assert_called_with(entry)
+ self.assertEqual(entry.get("name"), mock_get_name.return_value)
+ handler.assert_called_with(entry, metadata)
+
+ TestGroupSpool.test_HandlesEntry(self)
+
+ def test_add_entry(self):
+ @patch("%s.%s.event_path" %
+ (self.test_obj.__module__, self.test_obj.__name__))
+ @patch("%s.%s.add_entry" % (self.test_obj.__base__.__module__,
+ self.test_obj.__base__.__name__))
+ def inner(mock_add_entry, mock_event_path):
+ modules = self.get_obj()
+
+ evt = Mock()
+ evt.filename = "test.pp.G10_foo"
+
+ mock_event_path.return_value = os.path.join(datastore,
+ self.test_obj.__name__,
+ "test.pp",
+ "test.pp.G10_foo")
+ modules.add_entry(evt)
+ self.assertEqual(modules.filename_pattern, "test.pp")
+ mock_add_entry.assert_called_with(modules, evt)
+ mock_event_path.assert_called_with(evt)
+
+ inner()
+ TestGroupSpool.test_add_entry(self)
diff --git a/testsuite/Testlib/TestServer/TestPlugins/TestTemplateHelper.py b/testsuite/Testlib/TestServer/TestPlugins/TestTemplateHelper.py
new file mode 100644
index 000000000..556487288
--- /dev/null
+++ b/testsuite/Testlib/TestServer/TestPlugins/TestTemplateHelper.py
@@ -0,0 +1,120 @@
+import os
+import sys
+import Bcfg2.Server.Plugin
+from mock import Mock, MagicMock, patch
+from Bcfg2.Server.Plugins.TemplateHelper import *
+
+# add all parent testsuite directories to sys.path to allow (most)
+# relative imports in python 2.4
+path = os.path.dirname(__file__)
+while path != "/":
+ if os.path.basename(path).lower().startswith("test"):
+ sys.path.append(path)
+ if os.path.basename(path) == "testsuite":
+ break
+ path = os.path.dirname(path)
+from common import XI_NAMESPACE, XI, inPy3k, call, builtins, u, can_skip, \
+ skip, skipIf, skipUnless, Bcfg2TestCase, DBModelTestCase, syncdb, \
+ patchIf, datastore
+from TestPlugin import TestDirectoryBacked, TestConnector, TestPlugin, \
+ TestFileBacked
+
+
+class TestHelperModule(TestFileBacked):
+ test_obj = HelperModule
+ path = os.path.join(datastore, "test.py")
+
+ def test__init(self):
+ hm = self.get_obj()
+ self.assertEqual(hm._module_name, "test")
+ self.assertEqual(hm._attrs, [])
+
+ @patch("imp.load_source")
+ def test_Index(self, mock_load_source):
+ hm = self.get_obj()
+
+ mock_load_source.side_effect = ImportError
+ attrs = dir(hm)
+ hm.Index()
+ mock_load_source.assert_called_with(hm._module_name, hm.name)
+ self.assertEqual(attrs, dir(hm))
+ self.assertEqual(hm._attrs, [])
+
+ mock_load_source.reset()
+ mock_load_source.side_effect = None
+ # a regular Mock (not a MagicMock) won't automatically create
+ # __export__, so this triggers a failure condition in Index
+ mock_load_source.return_value = Mock()
+ attrs = dir(hm)
+ hm.Index()
+ mock_load_source.assert_called_with(hm._module_name, hm.name)
+ self.assertEqual(attrs, dir(hm))
+ self.assertEqual(hm._attrs, [])
+
+ # test reserved attributes
+ module = Mock()
+ module.__export__ = ["_attrs", "Index", "__init__"]
+ mock_load_source.reset()
+ mock_load_source.return_value = module
+ attrs = dir(hm)
+ hm.Index()
+ mock_load_source.assert_called_with(hm._module_name, hm.name)
+ self.assertEqual(attrs, dir(hm))
+ self.assertEqual(hm._attrs, [])
+
+ # test adding attributes
+ module = Mock()
+ module.__export__ = ["foo", "bar", "baz", "Index"]
+ mock_load_source.reset()
+ mock_load_source.return_value = module
+ hm.Index()
+ mock_load_source.assert_called_with(hm._module_name, hm.name)
+ self.assertTrue(hasattr(hm, "foo"))
+ self.assertTrue(hasattr(hm, "bar"))
+ self.assertTrue(hasattr(hm, "baz"))
+ self.assertEqual(hm._attrs, ["foo", "bar", "baz"])
+
+ # test removing attributes
+ module = Mock()
+ module.__export__ = ["foo", "bar", "quux", "Index"]
+ mock_load_source.reset()
+ mock_load_source.return_value = module
+ hm.Index()
+ mock_load_source.assert_called_with(hm._module_name, hm.name)
+ self.assertTrue(hasattr(hm, "foo"))
+ self.assertTrue(hasattr(hm, "bar"))
+ self.assertTrue(hasattr(hm, "quux"))
+ self.assertFalse(hasattr(hm, "baz"))
+ self.assertEqual(hm._attrs, ["foo", "bar", "quux"])
+
+
+
+class TestHelperSet(TestDirectoryBacked):
+ test_obj = HelperSet
+ testfiles = ['foo.py', 'foo_bar.py', 'foo.bar.py']
+ ignore = ['fooo.py~', 'fooo.pyc', 'fooo.pyo']
+ badevents = ['foo']
+
+
+class TestTemplateHelper(TestPlugin, TestConnector):
+ test_obj = TemplateHelper
+
+ def test__init(self):
+ TestPlugin.test__init(self)
+
+ th = self.get_obj()
+ self.assertIsInstance(th.helpers, HelperSet)
+
+ def test_get_additional_data(self):
+ TestConnector.test_get_additional_data(self)
+
+ th = self.get_obj()
+ modules = ['foo', 'bar']
+ rv = dict()
+ for mname in modules:
+ module = Mock()
+ module._module_name = mname
+ rv[mname] = module
+ th.helpers.entries['%s.py' % mname] = module
+ actual = th.get_additional_data(Mock())
+ self.assertItemsEqual(actual, rv)
diff --git a/testsuite/Testlib/TestServer/TestPlugins/__init__.py b/testsuite/Testlib/TestServer/TestPlugins/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testsuite/Testlib/TestServer/TestPlugins/__init__.py
diff --git a/testsuite/Testlib/TestServer/__init__.py b/testsuite/Testlib/TestServer/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testsuite/Testlib/TestServer/__init__.py
diff --git a/testsuite/Testlib/__init__.py b/testsuite/Testlib/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testsuite/Testlib/__init__.py
diff --git a/testsuite/__init__.py b/testsuite/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testsuite/__init__.py
diff --git a/testsuite/before_install.sh b/testsuite/before_install.sh
new file mode 100755
index 000000000..c972b6972
--- /dev/null
+++ b/testsuite/before_install.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+# before_install script for Travis-CI
+
+sudo apt-get update -qq
+sudo apt-get install -qq swig
+if [[ "$WITH_OPTIONAL_DEPS" == "yes" ]]; then
+ sudo apt-get install -qq python-selinux python-pylibacl
+fi
diff --git a/testsuite/common.py b/testsuite/common.py
new file mode 100644
index 000000000..42efe9fd6
--- /dev/null
+++ b/testsuite/common.py
@@ -0,0 +1,287 @@
+import os
+import sys
+import unittest
+from mock import patch, MagicMock, _patch, DEFAULT
+
+try:
+ from functools import wraps
+except ImportError:
+ def wraps(wrapped):
+ return lambda f: f
+
+datastore = "/"
+
+XI_NAMESPACE = "http://www.w3.org/2001/XInclude"
+XI = "{%s}" % XI_NAMESPACE
+
+if sys.hexversion >= 0x03000000:
+ inPy3k = True
+else:
+ inPy3k = False
+
+try:
+ from django.core.management import setup_environ
+ has_django = True
+
+ os.environ['DJANGO_SETTINGS_MODULE'] = "Bcfg2.settings"
+
+ import Bcfg2.settings
+ Bcfg2.settings.DATABASE_NAME = \
+ os.path.join(os.path.dirname(os.path.abspath(__file__)), "test.sqlite")
+ Bcfg2.settings.DATABASES['default']['NAME'] = Bcfg2.settings.DATABASE_NAME
+except ImportError:
+ has_django = False
+
+try:
+ from mock import call
+except ImportError:
+ def call(*args, **kwargs):
+ """ the Mock call object is a fairly recent addition, but it's
+ very very useful, so we create our own function to create Mock
+ calls """
+ return (args, kwargs)
+
+if inPy3k:
+ builtins = "builtins"
+
+ def u(x):
+ return x
+else:
+ builtins = "__builtin__"
+
+ import codecs
+ def u(x):
+ return codecs.unicode_escape_decode(x)[0]
+
+
+if hasattr(unittest, "skip"):
+ can_skip = True
+ skip = unittest.skip
+ skipIf = unittest.skipIf
+ skipUnless = unittest.skipUnless
+else:
+ # we can't actually skip tests, we just make them pass
+ can_skip = False
+
+ def skip(msg):
+ def decorator(func):
+ return lambda *args, **kwargs: None
+ return decorator
+
+ def skipIf(condition, msg):
+ if not condition:
+ return lambda f: f
+ else:
+ return skip(msg)
+
+ def skipUnless(condition, msg):
+ if condition:
+ return lambda f: f
+ else:
+ return skip(msg)
+
+
+needs_assertItemsEqual = False
+needs_others = False
+if not hasattr(unittest.TestCase, "assertItemsEqual"):
+ # TestCase in Py3k lacks assertItemsEqual, but has the other
+ # convenience methods. this code is (mostly) cribbed from the
+ # py2.7 unittest library
+ needs_assertItemsEqual = True
+
+ def _count_diff_all_purpose(actual, expected):
+ '''Returns list of (cnt_act, cnt_exp, elem) triples where the
+ counts differ'''
+ # elements need not be hashable
+ s, t = list(actual), list(expected)
+ m, n = len(s), len(t)
+ NULL = object()
+ result = []
+ for i, elem in enumerate(s):
+ if elem is NULL:
+ continue
+ cnt_s = cnt_t = 0
+ for j in range(i, m):
+ if s[j] == elem:
+ cnt_s += 1
+ s[j] = NULL
+ for j, other_elem in enumerate(t):
+ if other_elem == elem:
+ cnt_t += 1
+ t[j] = NULL
+ if cnt_s != cnt_t:
+ diff = (cnt_s, cnt_t, elem)
+ result.append(diff)
+
+ for i, elem in enumerate(t):
+ if elem is NULL:
+ continue
+ cnt_t = 0
+ for j in range(i, n):
+ if t[j] == elem:
+ cnt_t += 1
+ t[j] = NULL
+ diff = (0, cnt_t, elem)
+ result.append(diff)
+ return result
+
+
+if not hasattr(unittest.TestCase, "assertIn"):
+ # versions of TestCase before python 2.7 and python 3.1 lacked a
+ # lot of the really handy convenience methods, so we provide them
+ # -- at least the easy ones and the ones we use.
+ needs_others = True
+
+ def _assertion(predicate, default_msg=None):
+ @wraps(predicate)
+ def inner(self, *args, **kwargs):
+ if 'msg' in kwargs:
+ msg = kwargs['msg']
+ del kwargs['msg']
+ else:
+ msg = default_msg % args
+ assert predicate(*args, **kwargs), msg
+ return inner
+
+
+class Bcfg2TestCase(unittest.TestCase):
+ if needs_assertItemsEqual:
+ def assertItemsEqual(self, expected_seq, actual_seq, msg=None):
+ first_seq, second_seq = list(actual_seq), list(expected_seq)
+ differences = _count_diff_all_purpose(first_seq, second_seq)
+
+ if differences:
+ standardMsg = 'Element counts were not equal:\n'
+ lines = ['First has %d, Second has %d: %r' % diff
+ for diff in differences]
+ diffMsg = '\n'.join(lines)
+ standardMsg = self._truncateMessage(standardMsg, diffMsg)
+ msg = self._formatMessage(msg, standardMsg)
+ self.fail(msg)
+
+ if needs_others:
+ assertIs = _assertion(lambda a, b: a is b, "%s is not %s")
+ assertIsNot = _assertion(lambda a, b: a is not b, "%s is %s")
+ assertIsNone = _assertion(lambda x: x is None, "%s is not None")
+ assertIsNotNone = _assertion(lambda x: x is not None, "%s is None")
+ assertIn = _assertion(lambda a, b: a in b, "%s is not in %s")
+ assertNotIn = _assertion(lambda a, b: a not in b, "%s is in %s")
+ assertIsInstance = _assertion(isinstance, "%s is not instance of %s")
+ assertNotIsInstance = _assertion(lambda a, b: not isinstance(a, b),
+ "%s is instance of %s")
+ assertGreater = _assertion(lambda a, b: a > b,
+ "%s is not greater than %s")
+ assertGreaterEqual = _assertion(lambda a, b: a >= b,
+ "%s is not greater than or equal to %s")
+ assertLess = _assertion(lambda a, b: a < b, "%s is not less than %s")
+ assertLessEqual = _assertion(lambda a, b: a <= b,
+ "%s is not less than or equal to %s")
+
+
+ def assertXMLEqual(self, el1, el2, msg=None):
+ self.assertEqual(el1.tag, el2.tag, msg=msg)
+ self.assertEqual(el1.text, el2.text, msg=msg)
+ self.assertItemsEqual(el1.attrib, el2.attrib, msg=msg)
+ self.assertEqual(len(el1.getchildren()),
+ len(el2.getchildren()))
+ for child1 in el1.getchildren():
+ cname = child1.get("name")
+ self.assertIsNotNone(cname,
+ msg="Element %s has no 'name' attribute" %
+ child1.tag)
+ children2 = el2.xpath("*[@name='%s']" % cname)
+ self.assertEqual(len(children2), 1,
+ msg="More than one element named %s" % cname)
+ self.assertXMLEqual(child1, children2[0], msg=msg)
+
+
+class DBModelTestCase(Bcfg2TestCase):
+ models = []
+
+ @skipUnless(has_django, "Django not found, skipping")
+ def test_syncdb(self):
+ # create the test database
+ setup_environ(Bcfg2.settings)
+ from django.core.management.commands import syncdb
+ cmd = syncdb.Command()
+ cmd.handle_noargs(interactive=False)
+ self.assertTrue(os.path.exists(Bcfg2.settings.DATABASE_NAME))
+
+ @skipUnless(has_django, "Django not found, skipping")
+ def test_cleandb(self):
+ # ensure that we a) can connect to the database; b) start with
+ # a clean database
+ for model in self.models:
+ model.objects.all().delete()
+ self.assertItemsEqual(list(model.objects.all()), [])
+
+
+def syncdb(modeltest):
+ inst = modeltest(methodName='test_syncdb')
+ inst.test_syncdb()
+ inst.test_cleandb()
+
+
+# in order for patchIf() to decorate a function in the same way as
+# patch(), we override the default behavior of __enter__ and __exit__
+# on the _patch() object to basically be noops.
+class _noop_patch(_patch):
+ def __enter__(self):
+ return MagicMock(name=self.attribute)
+
+ def __exit__(self, *args):
+ pass
+
+
+class patchIf(object):
+ """ perform conditional patching. this is necessary because some
+ libraries might not be installed (e.g., selinux, pylibacl), and
+ patching will barf on that. Other workarounds are not available
+ to us; e.g., context managers aren't in python 2.4, and using
+ inner functions doesn't work because python 2.6 applies all
+ decorators at compile-time, not at run-time, so decorating inner
+ functions does not prevent the decorators from being run. """
+ def __init__(self, condition, target, new=DEFAULT, spec=None, create=False,
+ spec_set=None, autospec=None, new_callable=None, **kwargs):
+ self.condition = condition
+ self.target = target
+ self.patch_args = dict(new=new, spec=spec, create=create,
+ spec_set=spec_set)
+ self.extra_patch_args = dict(autospec=autospec,
+ new_callable=new_callable)
+ self.kwargs = kwargs
+
+ def __call__(self, func):
+ if self.condition:
+ try:
+ # in newer versions of mock, patch() takes arbitrary
+ # keyword arguments
+ args = dict(**self.patch_args)
+ args.update(self.extra_patch_args)
+ args.update(self.kwargs)
+ return patch(self.target, **args)(func)
+ except TypeError:
+ # in older versions of mock, patch() doesn't take
+ # autospec, new_callable or arbitrary keyword
+ # arguments
+ return patch(self.target, **self.patch_args)(func)
+ else:
+ try:
+ args = [lambda: True,
+ self.target.rsplit('.', 1)[-1],
+ self.patch_args['new'], self.patch_args['spec'],
+ self.patch_args['create'], None,
+ self.patch_args['spec_set']]
+ # in older versions of mock _patch() takes 8 args
+ return _noop_patch(*args)(func)
+ except TypeError:
+ # in new versions of mock _patch() takes 10 args
+ args = [lambda: True,
+ self.target.rsplit('.', 1)[-1],
+ self.patch_args['new'], self.patch_args['spec'],
+ self.patch_args['create'], self.patch_args['spec_set'],
+ self.extra_patch_args['autospec'],
+ self.extra_patch_args['new_callable'],
+ self.kwargs]
+ return _noop_patch(*args)(func)
+
diff --git a/testsuite/install.sh b/testsuite/install.sh
new file mode 100755
index 000000000..565e158df
--- /dev/null
+++ b/testsuite/install.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+# install script for Travis-CI
+
+pip install -r testsuite/requirements.txt --use-mirrors
+
+PYVER=$(python -c 'import sys;print ".".join(str(v) for v in sys.version_info[0:2])')
+
+if [[ "$WITH_OPTIONAL_DEPS" == "yes" ]]; then
+ if [[ $PYVER == "2.5" ]]; then
+ # markdown 2.2.0 is broken on py2.5, so until 2.2.1 is released use 2.1
+ pip install --use-mirrors 'markdown<2.2'
+ fi
+ pip install --use-mirrors genshi cheetah 'django<1.4' M2Crypto
+else
+ # python < 2.6 requires M2Crypto for SSL communication, not just
+ # for encryption support
+ if [[ $PYVER == "2.5" || $PYVER == "2.4" ]]; then
+ pip install --use-mirrors M2crypto
+ fi
+fi
diff --git a/testsuite/requirements.txt b/testsuite/requirements.txt
new file mode 100644
index 000000000..39418e98b
--- /dev/null
+++ b/testsuite/requirements.txt
@@ -0,0 +1,4 @@
+lxml
+nose
+mock
+sphinx
diff --git a/tools/accounts2xml.py b/tools/accounts2xml.py
index 46d38aca9..749f3b68c 100755
--- a/tools/accounts2xml.py
+++ b/tools/accounts2xml.py
@@ -71,7 +71,7 @@ def main(args):
with file(filename, 'w') as modified: modified.write("name:pass:uid:gid:gecos:home:shell\n" + data); modified.close()
safe_filename = "Properties"
except IndexError:
- print "ERROR: Please provide a filename.csv as the first argument"
+ print("ERROR: Please provide a filename.csv as the first argument")
sys.exit()
node_user = "UnixUser"
@@ -108,7 +108,7 @@ def main(args):
output_file = "accounts.xml"
doc.writexml(open(output_file, 'w'), addindent=' ', newl='\n') # Write file
- print "Done: Created %s" % output_file
+ print("Done: Created %s" % output_file)
os.remove(filename)
def get_extra_group_str(group_str, p_group):
diff --git a/tools/bcfg2-profile-templates.py b/tools/bcfg2-profile-templates.py
index 1cecc0274..d60d7584a 100755
--- a/tools/bcfg2-profile-templates.py
+++ b/tools/bcfg2-profile-templates.py
@@ -120,9 +120,9 @@ def main():
continue
if avg > 0.01 or templates:
tmpltimes.append((tmpl, avg))
- print "%-50s %s" % ("Template", "Average Render Time")
+ print("%-50s %s" % ("Template", "Average Render Time"))
for tmpl, avg in reversed(sorted(tmpltimes, key=operator.itemgetter(1))):
- print "%-50s %.02f" % (tmpl, avg)
+ print("%-50s %.02f" % (tmpl, avg))
# TODO: complain about templates that on average were quick but
# for which some clients were slow
diff --git a/tools/bcfg2_svnlog.py b/tools/bcfg2_svnlog.py
index af0624788..5a03bf993 100755
--- a/tools/bcfg2_svnlog.py
+++ b/tools/bcfg2_svnlog.py
@@ -468,7 +468,7 @@ def main():
smtp.sendmail(msg['From'], [msg['To']], msg.as_string())
smtp.quit()
else:
- print "\n".join(body)
+ print("\n".join(body))
if __name__ == "__main__":
sys.exit(main())
diff --git a/tools/export.py b/tools/export.py
index e4f16330d..16229e209 100755
--- a/tools/export.py
+++ b/tools/export.py
@@ -39,7 +39,7 @@ def run(command):
def find_and_replace(f, iftest, rline, startswith=False, dryrun=False):
if dryrun:
inplace = 0
- print "*** dry-run: New '%s' will look like this:" % f
+ print("*** dry-run: New '%s' will look like this:" % f)
else:
inplace = 1
for line in fileinput.input(f, inplace):
@@ -52,7 +52,7 @@ def find_and_replace(f, iftest, rline, startswith=False, dryrun=False):
line = line.replace(line, rline)
sys.stdout.write(line)
if dryrun:
- print "*** End '%s'" % f
+ print("*** End '%s'" % f)
def main():
@@ -87,8 +87,8 @@ def main():
options = p.parse_args()[0]
if options.debug:
- print options
- print "What should debug mode do?"
+ print(options)
+ print("What should debug mode do?")
# py3k compatibility
try:
@@ -113,9 +113,9 @@ def main():
version_info['micro'])
if options.debug:
- print "version is %s" % version
- print "version_info is %s" % version_info
- print "version_release is %s" % version_release
+ print("version is %s" % version)
+ print("version_info is %s" % version_info)
+ print("version_release is %s" % version_release)
if not version_info["major"].isdigit() \
or not version_info["minor"].isdigit() \
@@ -126,11 +126,11 @@ def main():
'IFMinorVersion restrictions in '
'Mac OS X Packaging')
except:
- print """Version must be of the form Major.Minor.MicroBuild,
+ print("""Version must be of the form Major.Minor.MicroBuild,
where Major and Minor are integers and
Micro is a single digit optionally followed by Build (i.e. pre##)
E.G. 1.2.0pre1 is a valid version.
-"""
+""")
quit()
tarname = '/tmp/%s-%s.tar.gz' % (pkgname, version)
@@ -160,8 +160,8 @@ E.G. 1.2.0pre1 is a valid version.
f.write(newchangelog + old)
f.close()
except:
- print "Problem opening debian/changelog"
- print help_message
+ print("Problem opening debian/changelog")
+ print(help_message)
quit()
# Update redhat directory versions
@@ -266,13 +266,13 @@ E.G. 1.2.0pre1 is a valid version.
if options.dryrun:
for cmd in commando_orders:
- print "*** dry-run: %s" % commando[cmd]
+ print("*** dry-run: %s" % commando[cmd])
else:
for cmd in commando_orders:
output = run(commando[cmd])[0].strip()
if options.verbose:
- print output
- print "Ran '%s' with above output." % cmd
+ print(output)
+ print("Ran '%s' with above output." % cmd)
if __name__ == '__main__':
sys.exit(main())
diff --git a/tools/manpagegen/bcfg2-admin.8.ronn b/tools/manpagegen/bcfg2-admin.8.ronn
new file mode 100644
index 000000000..f90865d9c
--- /dev/null
+++ b/tools/manpagegen/bcfg2-admin.8.ronn
@@ -0,0 +1,220 @@
+bcfg2-admin(8) -- Perform repository administration tasks
+=========================================================
+
+## SYNOPSIS
+
+`bcfg2-admin` [-C <configfile>] <mode> [<mode args>] [<mode options>]
+
+## DESCRIPTION
+
+`bcfg2-admin` is used to perform Bcfg2 repository administration
+
+## OPTIONS
+
+ * `-C` <configfile>:
+ Specify alternate bcfg2.conf location.
+
+ * `-E` <encoding>:
+ Specify the encoding of Cfg files.
+
+ * `-Q` <repository path>:
+ Specify the path to the server repository.
+
+ * `-S` <https://server:port>:
+ Manually specify the server location (as opposed to using the value
+ in bcfg2.conf).
+
+ * `-d`:
+ Enable debugging output.
+
+ * `-h`:
+ Print Usage information.
+
+ * `-o` <logfile path>:
+ Writes a log to the specified path.
+
+ * `-v`:
+ Enable verbose output.
+
+ * `-x` <password>:
+ Use ’password’ for client communication.
+
+ * `--ssl-key=`<ssl key>:
+ Specifiy the path to the SSL key.
+
+## MODES
+
+ * `backup`:
+ Create an archive of the entire Bcfg2 repository.
+
+ * `bundle` <action>:
+ Display details about the available bundles (See [`BUNDLE
+ OPTIONS`](###BUNDLE OPTIONS) below).
+
+ * `client` <action> <client> [attribute=value]:
+ Add, edit, or remove clients entries in metadata (See [`CLIENT
+ OPTIONS`](###CLIENT OPTIONS) below).
+
+ * `compare` <old> <new>:
+ Compare two client configurations. Can be used to verify consistent
+ behavior between releases. Determine differences between files or
+ directories (See [`COMPARE OPTIONS`](###COMPARE OPTIONS) below).
+
+ * `init`:
+ Initialize a new repository (interactive).
+
+ * `minestruct` <client> [-f xml-file] [-g groups]:
+ Build structure entries based on client statistics extra entries
+ (See [`MINESTRUCT OPTIONS`](###MINESTRUCT OPTIONS) below).
+
+ * `perf`:
+ Query server for performance data.
+
+ * `pull` <client> <entry-type> <entry-name>:
+ Install configuration information into repo based on client bad
+ entries (See [`PULL OPTIONS`](###PULL OPTIONS) below).
+
+ * `query` [g=group] [p=profile] [-f output-file] [-n] [-c]:
+ Search for clients based on group or profile (See [`QUERY
+ OPTIONS`](###QUERY OPTIONS) below).
+
+ * `reports` [init|load_stats|purge|scrub|update]:
+ Interact with the dynamic reporting system (See [`REPORTS
+ OPTIONS`](###REPORTS OPTIONS) below).
+
+ * `snapshots` [init|dump|query|reports]:
+ Interact with the Snapshots database (See [`SNAPSHOTS
+ OPTIONS`](###SNAPSHOTS OPTIONS) below).
+
+ * `syncdb`:
+ Sync the Django ORM with the configured database.
+
+ * `tidy`:
+ Remove unused files from repository.
+
+ * `viz` [-H] [-b] [-k] [-o png-file]:
+ Create a graphviz diagram of client, group and bundle information
+ (See [`VIZ OPTIONS`](###VIZ OPTIONS) below).
+
+ * `xcmd`:
+ Provides a XML-RPC Command Interface to the bcfg2-server.
+
+### BUNDLE OPTIONS
+
+ * `mode`:
+ List all available xml bundles ’list-xml’ or for all available
+ genshi bundles ’list-genshi’. ’show’ provides an interactive
+ dialog to get details about the available bundles.
+
+### CLIENT OPTIONS
+
+ * `mode`:
+ Add a client ’add’, delete a client ’del’, or ’list’
+ all client entries.
+
+ * `client`:
+ Specify the client’s name.
+
+ * `attribute=value`:
+ Set attribute values when adding a new client. Allowed attributes
+ are ’profile’, ’uuid’, ’password’, ’location’,
+ ’secure’, and ’address’.
+
+### QUERY OPTIONS
+
+ * `b=bundle`:
+ Specify a bundle to search for within client metadata.
+
+ * `g=group`:
+ Specify a group to search within.
+
+ * `p=profile`:
+ Specify a profile to search within.
+
+ * `-f` <output file>:
+ Write the results of the query to a file.
+
+ * `-n`:
+ Print the results, one on each line.
+
+ * `-c`:
+ Print the results, separated by commas.
+
+### COMPARE OPTIONS
+
+ * `old`:
+ Specify the location of the old configuration file.
+
+ * `new`:
+ Specify the location of the new configuration file.
+
+### MINESTRUCT OPTIONS
+
+ * `client`:
+ Client whose metadata is to be searched for extra entries.
+
+ * `-g` <groups>:
+ Hierarchy of groups in which to place the extra entries in.
+
+ * `-f` <xml output file>:
+ Specify the xml file in which to write the extra entries.
+
+### PULL OPTIONS
+
+ * `client`:
+ Specify the name of the client to search for.
+
+ * `entry type`:
+ Specify the type of the entry to pull.
+
+ * `entry name`:
+ Specify the name of the entry to pull.
+
+### REPORTS OPTIONS
+
+ * `init`:
+ Initialize the database.
+
+ * `load_stats` [-s] [-c] [-03]:
+ Load statistics data.
+
+ * `purge` [--client [n]] [--days [n]] [--expired]:
+ Purge historic and expired data.
+
+ * `scrub`:
+ Scrub the database for duplicate reasons and orphaned entries.
+
+ * `update`:
+ Apply any updates to the reporting database.
+
+### SNAPSHOTS OPTIONS
+
+ * `init`:
+ Initialize the snapshots database.
+
+ * `query`:
+ Query the snapshots database.
+
+ * `dump`:
+ Dump some of the contents of the snapshots database.
+
+ * `reports` [-a] [-b] [-e] [--date=<MM-DD-YYYY>]:
+ Generate reports for clients in the snapshots database.
+
+### VIZ OPTIONS
+
+ * `-H`:
+ Include hosts in diagram.
+
+ * `-b`:
+ Include bundles in diagram.
+
+ * `-o` <output file>:
+ Write to outfile file instead of stdout.
+
+ * `-k`:
+ Add a shape/color key.
+
+## SEE ALSO
+
+bcfg2-info(8), bcfg2-server(8)
diff --git a/tools/manpagegen/bcfg2-build-reports.8.ronn b/tools/manpagegen/bcfg2-build-reports.8.ronn
new file mode 100644
index 000000000..43fca5755
--- /dev/null
+++ b/tools/manpagegen/bcfg2-build-reports.8.ronn
@@ -0,0 +1,34 @@
+bcfg2-build-reports(8) -- Generate state reports for Bcfg2 clients
+==================================================================
+
+## SYNOPSIS
+
+`bcfg2-build-reports` [<-A>] [<-c>] [<-s>] [<-N>]
+
+## DESCRIPTION
+
+`bcfg2-build-reports` is used to build all client state reports. See the
+Bcfg2 manual for report setup information.
+
+## OPTIONS
+
+ * `-A`:
+ Displays all data.
+
+ * `-c` <configuration file>:
+ Specify an alternate report configuration path. The default is
+ repo/etc/reports-configuration.xml.
+
+ * `-h`:
+ Produce a help message.
+
+ * `-s` <statistics path>:
+ Use an alternative path for the statistics file. The default is
+ repo/etc/statistics.xml.
+
+ * `-N`:
+ No pinging.
+
+## SEE ALSO
+
+bcfg2(1), bcfg2-server(8)
diff --git a/tools/manpagegen/bcfg2-crypt.8.ronn b/tools/manpagegen/bcfg2-crypt.8.ronn
new file mode 100644
index 000000000..a164d47f1
--- /dev/null
+++ b/tools/manpagegen/bcfg2-crypt.8.ronn
@@ -0,0 +1,108 @@
+bcfg2-crypt(8) -- Bcfg2 encryption and decryption utility
+=========================================================
+
+## SYNOPSIS
+
+`bcfg2-crypt` [<-C configfile>] [--decrypt|--encrypt] [--cfg|--properties] [--remove] [--xpath <xpath>] [-p <passphrase-or-name>] [-v] <filename> [<filename>...]
+
+## DESCRIPTION
+
+`bcfg2-crypt` performs encryption and decryption of Cfg and Properties
+files. It's often sufficient to run `bcfg2-crypt` with only the name
+of the file you wish to encrypt or decrypt; it can usually figure out
+what to do.
+
+## OPTIONS
+
+ * `-C` <configfile>:
+ Specify alternate bcfg2.conf location
+
+ * `--decrypt`, `--encrypt`:
+ Specify which operation you'd like to perform. `bcfg2-crypt` can
+ usually determine which is necessary based on the contents of each
+ file.
+
+ * `--cfg`:
+ Tell `bcfg2-crypt` that an XML file should be encrypted in its
+ entirety rather than element-by-element. This is only necessary
+ if the file is an XML file whose name ends with `.xml` and whose
+ top-level tag is `<Properties>`. See [MODES] below for details.
+
+ * `--properties`:
+ Tell `bcfg2-crypt` to process a file as an XML Properties file,
+ and encrypt the text of each element separately. This is
+ necessary if, for example, you've used a different top-level tag
+ than `<Properties>` in your Properties files. See [MODES] below
+ for details.
+
+ * `--remove`:
+ Remove the plaintext file after it has been encrypted. Only
+ meaningful for Cfg files.
+
+ * `--xpath <xpath>`:
+ Encrypt the character content of all elements that match the
+ specified XPath expression. The default is `*[@encrypted]`
+ or `*`; see [MODES] below for more details. Only meaningful for
+ Properties files.
+
+ * `-p <passphrase>`:
+ Specify the name of a passphrase specified in the `[encryption]`
+ section of `bcfg2.conf`. See [SELECTING PASSPHRASE] below for
+ more details.
+
+ * `-v`:
+ Be verbose.
+
+ * `-h`:
+ Display help and exit.
+
+## MODES
+
+`bcfg2-crypt` can encrypt Cfg files or Properties files; they are
+handled very differently.
+
+ * Cfg:
+ When `bcfg2-crypt` is used on a Cfg file, the entire file is
+ encrypted. This is the default behavior on files that are not
+ XML, or that are XML but whose top-level tag is not
+ `<Properties>`. This can be enforced by use of the `--cfg`
+ option.
+
+ * Properties:
+ When `bcfg2-crypt` is used on a Properties file, it encrypts the
+ character content of elements matching the XPath expression given
+ by `--xpath`. By default the expression is `*[@encrypted]`, which
+ matches all elements with an `encrypted` attribute. If you are
+ encrypting a file and that expression doesn't match any elements,
+ then the default is `*`, which matches everything. When
+ `bcfg2-crypt` encrypts the character content of an element, it
+ also adds the `encrypted` attribute, set to the name of the
+ passphrase used to encrypt that element. When it decrypts an
+ element it does not remove `encrypted`, though; this lets you
+ easily and efficiently run `bcfg2-crypt` against a single
+ Properties file to encrypt and decrypt it without needing to
+ specify a long list of options. See the online Bcfg2 docs on
+ Properties files for more information on how this works.
+
+## SELECTING PASSPHRASE
+
+The passphrase used to encrypt or decrypt a file is discovered in the
+following order:
+
+ * First, the passphrase given on the command line using `-p` is
+ used.
+
+ * Next, if exactly one passphrase is specified in `bcfg2.conf`, it
+ will be used.
+
+ * Next, if operating in Properties mode, `bcfg2-crypt` will attempt
+ to read the name of the passphrase from the encrypted elements.
+
+ * Next, if decrypting, all passphrases will be tried sequentially.
+
+ * If no passphrase has been determined at this point, an error is
+ produced and the file being encrypted or decrypted is skipped.
+
+## SEE ALSO
+
+bcfg2-server(8)
diff --git a/tools/manpagegen/bcfg2-info.8.ronn b/tools/manpagegen/bcfg2-info.8.ronn
new file mode 100644
index 000000000..e19149ca8
--- /dev/null
+++ b/tools/manpagegen/bcfg2-info.8.ronn
@@ -0,0 +1,110 @@
+bcfg2-info(8) -- Creates a local version of the Bcfg2 server core for state observation
+=======================================================================================
+
+## SYNOPSIS
+
+`bcfg2-info` [<-C configfile>] [-E <encoding>] [-Q <repository path>]
+[-h] [-p] [-x <password>] [<mode>] [<mode args>] [<mode options>]
+
+## DESCRIPTION
+
+`bcfg2-info` instantiates an instance of the Bcfg2 core for data
+examination and debugging purposes.
+
+## OPTIONS
+
+ * `-C` <configfile>:
+ Specify alternate bcfg2.conf location
+
+ * `-E` <encoding>:
+ Specify the encoding of config files.
+
+ * `-Q` <repository path>:
+ Specify the server repository path.
+
+ * `-d`:
+ Run in debug mode.
+
+ * `-h`:
+ Give a bit of help about the command line arguments and options.
+ After this bcfg2-info exits.
+
+ * `-p`:
+ Specify a profile.
+
+ * `-x` <password>:
+ Set the communication password.
+
+## MODES
+
+
+ * `build` <hostname> <filename>:
+ Build config for hostname, writing to filename.
+
+ * `buildall` <directory>:
+ Build configs for all clients in directory.
+
+ * `buildallfile` <directory> <filename> [<hostnames>]:
+ Build config file for all clients in directory.
+
+ * `buildbundle` <filename> <hostname>:
+ Build bundle for hostname (not written to disk). If filename is a
+ bundle template, it is rendered.
+
+ * `builddir` <hostname> <dirname>:
+ Build config for hostname, writing separate files to dirname.
+
+ * `buildfile` [--altsrc=<altsrc>] <filename> <hostname>:
+ Build config file for hostname (not written to disk).
+
+ * `bundles`:
+ Print out group/bundle information.
+
+ * `clients`:
+ Print out client/profile information.
+
+ * `config`:
+ Print out the configuration of the Bcfg2 server.
+
+ * `debug`:
+ Shell out to native python interpreter.
+
+ * `event_debug`:
+ Display filesystem events as they are processed.
+
+ * `groups`:
+ List groups.
+
+ * `help`:
+ Print the list of available commands.
+
+ * `mappings` [<entry type>] [<entry name>]:
+ Print generator mappings for optional type and name.
+
+ * `packageresolve` <hostname> <package> [<package>...]:
+ Resolve the specified set of packages.
+
+ * `packagesources` <hostname>:
+ Show package sources.
+
+ * `profile` <command> <args>:
+ Profile a single bcfg2-info command.
+
+ * `quit`:
+ Exit bcfg2-info command line.
+
+ * `showentries` <hostname> <type>:
+ Show abstract configuration entries for a given host.
+
+ * `showclient` <client1> <client2>:
+ Show metadata for given hosts.
+
+ * `update`:
+ Process pending file events.
+
+ * `version`:
+ Print version of this tool.
+
+## SEE ALSO
+
+bcfg2(1), bcfg2-server(8)
diff --git a/tools/manpagegen/bcfg2-lint.8.ronn b/tools/manpagegen/bcfg2-lint.8.ronn
new file mode 100644
index 000000000..e089bf2e7
--- /dev/null
+++ b/tools/manpagegen/bcfg2-lint.8.ronn
@@ -0,0 +1,119 @@
+bcfg2-lint(8) -- Check Bcfg2 specification for validity, common mistakes, and style
+===================================================================================
+
+## SYNOPSIS
+
+`bcfg2-lint` [<options>] [<plugin> [<plugin>...]]
+
+## DESCRIPTION
+
+`bcfg2-lint` checks the Bcfg2 specification for schema validity, common
+mistakes, and other criteria. It can be quite helpful in finding typos
+or malformed data.
+
+`bcfg2-lint` exits with a return value of 2 if errors were found, and 3
+if warnings (but no errors) were found. Any other non-0 exit value
+denotes some failure in the script itself.
+
+`bcfg2-lint` is a rewrite of the older bcfg2-repo-validate tool.
+
+## OPTIONS
+
+ * `-C` <configfile>:
+ Specify alternate bcfg2.conf location.
+
+ * `-Q`:
+ Specify the server repository path.
+
+ * `-v`:
+ Be verbose.
+
+ * `--lint-config`:
+ Specify path to bcfg2-lint.conf (default `/etc/bcfg2-lint.conf`).
+
+ * `--stdin`:
+ Rather than operating on all files in the Bcfg2 specification, only
+ validate a list of files supplied on stdin. This mode is
+ particularly useful in pre-commit hooks.
+
+ This makes a few assumptions:
+
+ Metadata files will only be checked if a valid chain of XIncludes
+ can be followed all the way from clients.xml or groups.xml. Since
+ there are multiple formats of metadata stored in Metadata/ (i.e.,
+ clients and groups), there is no way to determine which sort of
+ data a file contains unless there is a valid chain of XIncludes.
+ It may be useful to always specify all metadata files should be
+ checked, even if not all of them have changed.
+
+ Property files will only be validated if both the property file
+ itself and its matching schema are included on stdin.
+
+ * `require-schema`:
+ Require property files to have matching schema files.
+
+## PLUGINS
+
+See `bcfg2-lint.conf`(5) for more information on the configuration of
+the plugins listed below.
+
+ * `Bundles`:
+ Check the specification for several issues with Bundler: bundles
+ referenced in metadata but not found in `Bundler/`; bundles whose
+ *name* attribute does not match the filename; and Genshi template
+ bundles that use the *<Group>* tag (which is not processed in
+ templated bundles).
+
+ * `Comments`:
+ Check the specification for VCS keywords and any comments that are
+ required. By default, this only checks that the *$Id$* keyword is
+ included and expanded in all files. You may specify VCS keywords to
+ check and comments to be required in the config file. (For instance,
+ you might require that every file have a "Maintainer" comment.)
+
+ In XML files, only comments are checked for the keywords and
+ comments required.
+
+ * `Duplicates`:
+ Check for several types of duplicates in the Metadata: duplicate
+ groups; duplicate clients; and multiple default groups.
+
+ * `InfoXML`:
+ Check that certain attributes are specified in `info.xml` files. By
+ default, requires that *owner*, *group*, and *perms* are specified.
+ Can also require that an `info.xml` exists for all Cfg files, and
+ that paranoid mode be enabled for all files.
+
+ * `MergeFiles`:
+ Suggest that similar probes and config files be merged into single
+ probes or TGenshi templates.
+
+ * `Pkgmgr`:
+ Check for duplicate packages specified in Pkgmgr.
+
+ * `RequiredAttrs`:
+ Check that all *Path* and *BoundPath* tags have the attributes that
+ are required by their type (e.g., a path of type symlink must have
+ name and to specified to be valid). This sort of validation is
+ beyond the scope of an XML schema.
+
+ * `Validate`:
+ Validate the Bcfg2 specification against the XML schemas.
+
+ Property files are freeform XML, but if a `.xsd` file with a
+ matching filename is provided, then schema validation will be
+ performed on property files individually as well. For instance, if
+ you have a property file named `ntp.xml` then by placing a schema
+ for that file in `ntp.xsd` schema validation will be performed on
+ `ntp.xml`.
+
+## BUGS
+
+`bcfg2-lint` may not handle some older plugins as well as it handles
+newer ones. For instance, there may be some places where it expects all
+of your configuration files to be handled by Cfg rather than by a mix of
+Cfg and TGenshi or TCheetah.
+
+## SEE ALSO
+
+bcfg2(1), bcfg2-server(8), bcfg2-lint.conf(5)
diff --git a/tools/manpagegen/bcfg2-lint.conf.5.ronn b/tools/manpagegen/bcfg2-lint.conf.5.ronn
new file mode 100644
index 000000000..657ea6e74
--- /dev/null
+++ b/tools/manpagegen/bcfg2-lint.conf.5.ronn
@@ -0,0 +1,114 @@
+bcfg2-lint.conf(5) -- configuration parameters for bcfg2-lint
+=============================================================
+
+## DESCRIPTION
+
+`bcfg2-lint.conf` includes configuration parameters for `bcfg2-lint`.
+
+## FILE FORMAT
+
+The file is INI-style and consists of sections and options. A section
+begins with the name of the sections in square brackets and continues
+until the next section begins.
+
+Options are specified in the form "name=value".
+
+The file is line-based each newline-terminated line represents either a
+comment, a section name or an option.
+
+Any line beginning with a hash (#) is ignored, as are lines containing
+only whitespace.
+
+The file consists of one `[lint]` section, up to one `[errors]` section,
+and then any number of plugin-specific sections, documented below.
+(Note that this makes it quite feasible to combine your
+`bcfg2-lint.conf` into your `bcfg2.conf`(5) file, if you so desire).
+
+## GLOBAL OPTIONS
+
+These options apply to `bcfg2-lint` generally, and must be in the
+`[lint]` section.
+
+ * `plugins`:
+ A comma-delimited list of plugins to run. By default, all plugins
+ are run. This can be overridden by listing plugins on the command
+ line. See `bcfg2-lint`(8) for a list of the available plugins.
+
+## ERROR HANDLING
+
+Error handling is configured in the `[errors]` section. Each option
+should be the name of an error and one of *error*, *warning*, or
+*silent*, which tells `bcfg2-lint`(8) how to handle the warning. Error
+names and their defaults can be displayed by running `bcfg2-lint`(8)
+with the `--list-errors` option.
+
+## PLUGIN OPTIONS
+
+These options apply only to a single plugin. Each option should be in a
+section named for its plugin; for instance, options for the InfoXML
+plugin would be in a section called `[InfoXML]`.
+
+If a plugin is not listed below, then it has no configuration.
+
+In many cases, the behavior of a plugin can be configured by modifying
+how errors from it are handled. See [`ERROR HANDLING`](### ERROR
+HANDLING), above.
+
+### Comments
+
+The `Comments` plugin configuration specifies which VCS keywords and
+comments are required for which file types. The valid types of file are
+*global* (all file types), *bundler* (non-templated bundle files),
+*sgenshi* (templated bundle files), *properties* (property files), *cfg*
+(non-templated Cfg files), *tgenshi* (templated Cfg files), *infoxml*
+(info.xml files), and *probe* (probe files).
+
+The specific types (i.e., types other than "global") all supplement
+global; they do not override it. The exception is if you specify an
+empty option, e.g.:
+
+ cfg_keywords =
+
+By default, the *$Id$* keyword is checked for and nothing else.
+
+Multiple keywords or comments should be comma-delimited.
+
+· `<type>_keywords`
+
+Ensure that files of the specified type have the given VCS keyword. Do
+*not* include the dollar signs. I.e.:
+
+ infoxml_keywords = Revision
+
+*not*:
+
+ infoxml_keywords = $Revision$
+
+`· <type>_comments`
+
+Ensure that files of the specified type have a comment containing the
+given string. In XML files, only comments are checked. In plain text
+files, all lines are checked since comment characters may vary.
+
+### InfoXML
+
+ * `required_attrs`:
+ A comma-delimited list of attributes to require on `<Info>` tags.
+ Default is "owner,group,perms".
+
+### MergeFiles
+
+ * `threshold`:
+ The threshold at which MergeFiles will suggest merging config files
+ and probes. Default is 75% similar.
+
+### Validate
+
+ * `schema`:
+ The full path to the XML Schema files. Default is
+ `/usr/share/bcfg2/schema`. This can be overridden with the
+ *--schema* command-line option
+
+## SEE ALSO
+
+bcfg2-lint(8)
diff --git a/tools/manpagegen/bcfg2-reports.8.ronn b/tools/manpagegen/bcfg2-reports.8.ronn
new file mode 100644
index 000000000..1cb999dc7
--- /dev/null
+++ b/tools/manpagegen/bcfg2-reports.8.ronn
@@ -0,0 +1,82 @@
+bcfg2-reports(8) -- Query reporting system for client status
+============================================================
+
+## SYNOPSIS
+
+`bcfg2-reports` [-a] [-b <NAME>] [-c] [-d] [-e <NAME>] [-h] [-m <NAME>]
+[-s <NAME>] [-x <NAME>] [--badentry=<KIND,NAME>]
+[--extraentry=<KIND,NAME>] [--fields=<ARG1,ARG2,...>]
+[--modifiedentry=<KIND,NAME>] [--sort=<ARG1,ARG2,...>] [--stale] [-v]
+
+## DESCRIPTION
+
+`bcfg2-reports` allows you to retrieve data from the database about
+clients, and the states of their current interactions. It also allows
+you to change the expired/unexpired states. The utility runs as a
+standalone application. It does, however, use the models from
+`/src/lib/Server/Reports/reports/models.py`.
+
+## OPTIONS
+
+ * `-a`:
+ Specify alternate bcfg2.conf location
+
+ * `-b` <hostname>:
+ Single host mode - shows bad entries from the current interaction of
+ *hostname*.
+
+ * `-c`:
+ Shows only clean hosts.
+
+ * `-d`:
+ Shows only dirty hosts.
+
+ * `-e` <hostname>:
+ Single host mode - shows extra entries from the current interaction
+ of *hostname*.
+
+ * `-h`:
+ Shows help and usage info about `bcfg2-reports`.
+
+ * `-m` <hostname>:
+ Single host mode - shows modified entries from the current
+ interaction of *hostname*.
+
+ * `-s` <hostname>:
+ Single host mode - shows bad, modified, and extra entries from the
+ current interaction of *hostname*.
+
+ * `-x` <hostname>:
+ Toggles expired/unexpired state of *hostname*.
+
+ * `--badentry=`<entry type, entry name>:
+ Shows only hosts whose current interaction has bad entries of type
+ *entry type* and name *entry name*. If a single argument ARG1 is
+ given, then *entry type*,*entry name* pairs will be read from a file
+ of name ARG1.
+
+ * `--extraentry=`<entry type, entry name>:
+ Shows only hosts whose current interaction has extra entries of type
+ *entry type* and name *entry name*. If a single argument ARG1 is
+ given, then *entry type*,*entry name* pairs will be read from a file
+ of name ARG1.
+
+ * `--fields=`<ARG1,ARG2,...>:
+ Only displays the fields *ARG1,ARG2,...* (name, time, state, total,
+ good, bad).
+
+ * `--modifiedentry=`<entry type, entry name>:
+ Shows only hosts whose current interaction has modified entries of
+ type *entry type* and name *entry name*. If a single argument ARG1
+ is given, then *entry type*,*entry name* pairs will be read from a
+ file of name ARG1.
+
+ * `--sort=`<ARG1,ARG2,...>:
+ Sorts output on ARG1,ARG2,... (name, time, state, total, good, bad).
+
+ * `--stale`:
+ Shows hosts which haven’t run in the last 24 hours.
+
+## SEE ALSO
+
+bcfg2(1), bcfg2-server(8)
diff --git a/tools/manpagegen/bcfg2-server.8.ronn b/tools/manpagegen/bcfg2-server.8.ronn
new file mode 100644
index 000000000..c306fa6a4
--- /dev/null
+++ b/tools/manpagegen/bcfg2-server.8.ronn
@@ -0,0 +1,43 @@
+bcfg2-server(8) -- Server for client configuration specifications
+=================================================================
+
+## SYNOPSIS
+
+`bcfg2-server` [-d] [-v] [-C <configfile>] [-D <pidfile>] [-E
+<encoding>] [-Q <repo path>] [-S <server url>] [-o <logfile>] [-x
+<password>] [--ssl-key=<ssl key>]
+
+## DESCRIPTION
+
+`bcfg2-server` is the daemon component of Bcfg2 which serves
+configurations to clients based on the data in its repository.
+
+## OPTIONS
+
+ * `-C` <configfile>:
+ Specify alternate bcfg2.conf location.
+
+ * `-D` <pidfile>:
+ Daemonize, placing the program pid in the specified pidfile.
+
+ * `-E` <encoding>:
+ Specify alternate encoding (default is UTF-8).
+
+ * `-Q` <repo path>:
+ Set repository path.
+
+ * `-S` <server url>:
+ Set server address.
+
+ * `-d`:
+ Run `bcfg2-server` in debug mode.
+
+ * `-v`:
+ Run `bcfg2-server` in verbose mode.
+
+ * `--ssl-key=`<ssl key>:
+ Set path to SSL key.
+
+## SEE ALSO
+
+bcfg2(1), bcfg2-lint(8)
diff --git a/tools/manpagegen/bcfg2.1.ronn b/tools/manpagegen/bcfg2.1.ronn
new file mode 100644
index 000000000..8b3ea1e60
--- /dev/null
+++ b/tools/manpagegen/bcfg2.1.ronn
@@ -0,0 +1,158 @@
+bcfg2(1) -- Bcfg2 client tool
+=============================
+
+## SYNOPSIS
+
+`bcfg2` [_options_][_..._]
+
+## DESCRIPTION
+
+`bcfg2` runs the Bcfg2 configuration process on the current host. This
+process consists of the following steps.
+
+* Fetch and execute probes
+* Upload probe results
+* Fetch the client configuration
+* Check the current client state
+* Attempt to install the desired configuration
+* Upload statistics about the Bcfg2 execution and client state
+
+## OPTIONS
+
+ * `-B`:
+ Configure everything except the given bundle(s).
+
+ * `-C` <configfile>:
+ Specify alternate bcfg2.conf location.
+
+ * `-D` [<driver1>,<driver2>]:
+ Specify a set of Bcfg2 tool drivers.
+
+ *NOTE: only drivers listed will be loaded. (e.g., if you do not
+ include POSIX, you will be unable to verify/install Path entries).*
+
+ * `-E` <encoding>:
+ Specify the encoding of Cfg files.
+
+ * `-I`:
+ Run bcfg2 in interactive mode. The user will be prompted before
+ each change.
+
+ * `-O`:
+ Omit lock check.
+
+ * `-P`:
+ Run bcfg2 in paranoid mode. Diffs will be logged for configuration
+ files marked as paranoid by the Bcfg2 server.
+
+ * `-R` <retry count>:
+ Specify the number of times that the client will attempt to retry
+ network communication.
+
+ * `-S` <https://server:port>:
+ Manually specify the server location (as opposed to using the value
+ in bcfg2.conf).
+
+ * `-Z`:
+ Do not configure independent entries.
+
+ * `-b` [_bundle1:bundle2_]:
+ Run bcfg2 against one or multiple bundles in the configuration.
+
+ * `-c` <cachefile>:
+ Cache a copy of the configuration in cachefile.
+
+ * `--ca-cert=`<ca cert>:
+ Specifiy the path to the SSL CA certificate.
+
+ * `-d`:
+ Run bcfg2 in debug mode.
+
+ * `-e`:
+ When in verbose mode, display extra entry information (temporary
+ until verbosity rework).
+
+ * `-f` <specification path>:
+ Configure from a file rather than querying the server.
+
+ * `-h`:
+ Print Usage information.
+
+ * `-k`:
+ Run in bulletproof mode. This currently only affects behavior in
+ the debian toolset; it calls apt-get update and clean and dpkg
+ --configure --pending.
+
+ * `-l` <whitelist|blacklist|none>:
+ Run the client in the server decision list mode (unless "none"
+ is specified, which can be done in order to override the decision
+ list mode specified in bcfg2.conf). This approach is needed when
+ particular changes are deemed "high risk". It gives the ability to
+ centrally specify these changes, but only install them on clients
+ when administrator supervision is available. Because collaborative
+ configuration is one of the remaining hard issues in configuration
+ management, these issues typically crop up in environments with
+ several administrators and much configuration variety. (This setting
+ will be ignored if the -f option is also specified).
+
+ * `-n`:
+ Run bcfg2 in dry-run mode. No changes will be made to the system.
+
+ * `-o` <logfile path>:
+ Writes a log to the specified path.
+
+ * `-p` <profile>:
+ Assert a profile for the current client.
+
+ * `-q`:
+ Run bcfg2 in quick mode. Package checksum verification won’t be
+ performed. This mode relaxes the constraints of correctness, and
+ thus should only be used in safe conditions.
+
+ * `-Q`:
+ Run bcfg2 in "bundle quick" mode, where only entries in a bundle are
+ verified or installed. This runs much faster than -q, but doesn’t
+ provide statistics to the server at all. In order for this option to
+ work, the -b option must also be provided. This option is incompatible
+ with -r.
+
+ * `-r` <mode>:
+ Cause bcfg2 to remove extra configuration elements it detects. Mode is
+ one of all, Services, or Packages. All removes all entries. Likewise,
+ Services and Packages remove only the extra configuration elements
+ of the respective type.
+
+ * `-s` <service mode>:
+ Set bcfg2 interaction level for services. Default behavior is to
+ modify all services affected by reconfiguration. build mode attempts
+ to stop all services started. disabled suppresses all attempts to
+ modify services.
+
+ * `--ssl-cert=`<ssl cert>:
+ Specifiy the path to the SSL certificate.
+
+ * `--ssl-cns=`[_CN1:CN2_]:
+ List of acceptable SSL server Common Names.
+
+ * `--ssl-key=`<ssl key>:
+ Specifiy the path to the SSL key.
+
+ * `-u` <user>:
+ Attempt to authenticate as ’user’.
+
+ * `-x` <password>:
+ Use ’password’ for client communication.
+
+ * `-t` <timeout>:
+ Set the timeout (in seconds) for client communication. Default is
+ 90 seconds.
+
+ * `-v`:
+ Run bcfg2 in verbose mode.
+
+ * `-z`:
+ Only configure independent entries, ignore bundles.
+
+## SEE ALSO
+
+bcfg2-server(8), bcfg2-info(8)
diff --git a/tools/manpagegen/bcfg2.conf.5.ronn b/tools/manpagegen/bcfg2.conf.5.ronn
new file mode 100644
index 000000000..544c4ccc7
--- /dev/null
+++ b/tools/manpagegen/bcfg2.conf.5.ronn
@@ -0,0 +1,539 @@
+bcfg2.conf(5) -- configuration parameters for Bcfg2
+===================================================
+
+## DESCRIPTION
+
+`bcfg2.conf` includes configuration parameters for the Bcfg2 server and
+client.
+
+## FILE FORMAT
+
+The file is INI-style and consists of sections and options. A section
+begins with the name of the sections in square brackets and continues
+until the next section begins.
+
+Options are specified in the form "name=value".
+
+The file is line-based each newline-terminated line represents either a
+comment, a section name or an option.
+
+Any line beginning with a hash (#) is ignored, as are lines containing
+only whitespace.
+
+## SERVER OPTIONS
+
+These options are only necessary on the Bcfg2 server. They are
+specified in the `[server]` section of the configuration file.
+
+ * `repository`:
+ Specifies the path to the Bcfg2 repository containing all of the
+ configuration specifications. The repository should be created
+ using the `bcfg2-admin init` command.
+
+ * `filemonitor`:
+ The file monitor used to watch for changes in the repository. The
+ default is the best available monitor. The following values are
+ valid:
+
+ `inotify`,
+ `gamin`,
+ `fam`,
+ `pseudo`
+
+ * `ignore_files`:
+ A comma-separated list of globs that should be ignored by the file
+ monitor. Default values are:
+
+ `*~`,
+ `*#`,
+ `.#*`,
+ `*.swp`,
+ `.*.swx`,
+ `SCCS`,
+ `.svn`,
+ `4913`,
+ `.gitignore`
+
+ * `listen_all`:
+ This setting tells the server to listen on all available
+ interfaces. The default is to only listen on those interfaces
+ specified by the bcfg2 setting in the components section of
+ `bcfg2.conf`.
+
+ * `plugins`:
+ A comma-delimited list of enabled server plugins. Currently
+ available plugins are:
+
+ `Account`,
+ `Actions`,
+ `Base`,
+ `Bundler`,
+ `Bzr`,
+ `Cfg`,
+ `Cvs`,
+ `Darcs`,
+ `DBStats`,
+ `Decisions`,
+ `Deps`,
+ `Editor`,
+ `Fossil`,
+ `Git`,
+ `GroupPatterns`,
+ `Hg`,
+ `Hostbase`,
+ `Metadata`,
+ `NagiosGen`,
+ `Ohai`,
+ `Packages`,
+ `Pkgmgr`,
+ `Probes`,
+ `Properties`,
+ `Rules`,
+ `Snapshots`,
+ `SSHbase`,
+ `Svn`,
+ `Svn2`,
+ `TCheetah`,
+ `TGenshi`,
+ `Trigger`
+
+ Descriptions of each plugin can be found in their respective
+ sections below.
+
+ * `prefix`:
+ Specifies a prefix if the Bcfg2 installation isn’t placed in the
+ default location (e.g. /usr/local).
+
+### Account Plugin
+
+The account plugin manages authentication data, including the following.
+
+ * `/etc/passwd`
+ * `/etc/group`
+ * `/etc/security/limits.conf`
+ * `/etc/sudoers`
+ * `/root/.ssh/authorized_keys`
+
+### Base Plugin
+
+A structure plugin that provides the ability to add lists of unrelated
+entries into client configuration entry inventories. Base works much
+like Bundler in its file format. This structure plugin is good for the
+pile of independent configs needed for most actual systems.
+
+### Bundler Plugin
+
+Bundler is used to describe groups of inter-dependent configuration
+entries, such as the combination of packages, configuration files,
+and service activations that comprise typical Unix daemons. Bundles are
+used to add groups of configuration entries to the inventory of client
+configurations, as opposed to describing particular versions of those
+entries.
+
+### Bzr Plugin
+
+The Bzr plugin allows you to track changes to your Bcfg2 repository
+using a GNU Bazaar version control backend. Currently, it enables you to
+get revision information out of your repository for reporting purposes.
+
+### Cfg Plugin
+
+The Cfg plugin provides a repository to describe configuration file
+contents for clients. In its simplest form, the Cfg repository is just a
+directory tree modeled off of the directory tree on your client
+machines.
+
+### Cvs Plugin (experimental)
+
+The Cvs plugin allows you to track changes to your Bcfg2 repository
+using a Concurrent version control backend. Currently, it enables you to
+get revision information out of your repository for reporting purposes.
+
+### Darcs Plugin (experimental)
+
+The Darcs plugin allows you to track changes to your Bcfg2 repository
+using a Darcs version control backend. Currently, it enables you to get
+revision information out of your repository for reporting purposes.
+
+### DBStats Plugin
+
+Direct to database statistics plugin.
+
+### Decisions Plugin
+
+The Decisions plugin has support for a centralized set of per-entry
+installation decisions. This approach is needed when particular changes
+are deemed "*high risk*"; this gives the ability to centrally specify
+these changes, but only install them on clients when administrator
+supervision is available.
+
+### Deps Plugin
+
+The Deps plugin allows you to make a series of assertions like "Package
+X requires Package Y (and optionally also Package Z etc.)"
+
+### Editor Plugin
+
+The Editor plugin attempts to allow you to partially manage
+configuration for a file. Its use is not recommended and not well
+documented.
+
+### Fossil Plugin
+
+The Fossil plugin allows you to track changes to your Bcfg2 repository
+using a Fossil SCM version control backend. Currently, it enables you to
+get revision information out of your repository for reporting purposes.
+
+### Git Plugin
+
+The Git plugin allows you to track changes to your Bcfg2 repository
+using a Git version control backend. Currently, it enables you to get
+revision information out of your repository for reporting purposes.
+
+### GroupPatterns Plugin
+
+The GroupPatterns plugin is a connector that can assign clients group
+membership based on patterns in client hostnames.
+
+### Hg Plugin (experimental)
+
+The Hg plugin allows you to track changes to your Bcfg2 repository using
+a Mercurial version control backend. Currently, it enables you to get
+revision information out of your repository for reporting purposes.
+
+### Hostbase Plugin
+
+The Hostbase plugin is an IP management system built on top of Bcfg2.
+
+### Metadata Plugin
+
+The Metadata plugin is the primary method of specifying Bcfg2 server
+metadata.
+
+### NagiosGen Plugin
+
+NagiosGen is a Bcfg2 plugin that dynamically generates Nagios
+configuration files based on Bcfg2 data.
+
+### Ohai Plugin (experimental)
+
+The Ohai plugin is used to detect information about the client operating
+system. The data is reported back to the server using JSON.
+
+### Packages Plugin
+
+The Packages plugin is an alternative to Pkgmgr for specifying package
+entries for clients. Where Pkgmgr explicitly specifies package entry
+information, Packages delegates control of package version information
+to the underlying package manager, installing the latest version
+available from through those channels.
+
+### Pkgmgr Plugin
+
+The Pkgmgr plugin resolves the Abstract Configuration Entity "Package"
+to a package specification that the client can use to detect, verify and
+install the specified package.
+
+### Probes Plugin
+
+The Probes plugin gives you the ability to gather information from a
+client machine before you generate its configuration. This information
+can be used with the various templating systems to generate
+configuration based on the results.
+
+### Properties Plugin
+
+The Properties plugin is a connector plugin that adds information from
+properties files into client metadata instances.
+
+### Rules Plugin
+
+The Rules plugin provides literal configuration entries that resolve the
+abstract configuration entries normally found in the Bundler and Base
+plugins. The literal entries in Rules are suitable for consumption by
+the appropriate client drivers.
+
+### Snapshots Plugin
+
+The Snapshots plugin stores various aspects of a client’s state when the
+client checks in to the server.
+
+### SSHbase Plugin
+
+The SSHbase generator plugin manages ssh host keys (both v1 and v2) for
+hosts. It also manages the ssh_known_hosts file. It can integrate host
+keys from other management domains and similarly export its keys.
+
+### Svn Plugin
+
+The Svn plugin allows you to track changes to your Bcfg2 repository
+using a Subversion backend. Currently, it enables you to get revision
+information out of your repository for reporting purposes.
+
+### Svn2 Plugin
+
+The Svn2 plugin extends on the capabilities in the Svn plugin. It
+provides Update and Commit methods which provide hooks for modifying
+subversion-backed Bcfg2 repositories.
+
+### TCheetah Plugin
+
+The TCheetah plugin allows you to use the cheetah templating system to
+create files. It also allows you to include the results of probes
+executed on the client in the created files.
+
+### TGenshi Plugin
+
+The TGenshi plugin allows you to use the Genshi templating system to
+create files. It also allows you to include the results of probes
+executed on the client in the created files.
+
+### Trigger Plugin
+
+The Trigger plugin provides a method for calling external scripts when
+clients are configured.
+
+## CLIENT OPTIONS
+
+These options only affect client functionality, specified in the
+`[client]` section.
+
+ * `decision`:
+ Specify the server decision list mode (whitelist or blacklist).
+ (This settiing will be ignored if the client is called with the -f
+ option.)
+
+ * `drivers`:
+ Specify tool driver set to use. This option can be used to
+ explicitly specify the client tool drivers you want to use when the
+ client is run.
+
+ * `paranoid`:
+ Run the client in paranoid mode.
+
+## COMMUNICATION OPTIONS
+
+Specified in the `[communication]` section. These options define
+settings used for client-server communication.
+
+ * `ca`:
+ The path to a file containing the CA certificate. This file is
+ required on the server, and optional on clients. However, if the
+ cacert is not present on clients, the server cannot be verified.
+
+ * `certificate`:
+ The path to a file containing a PEM formatted certificate which
+ signs the key with the ca certificate. This setting is required on
+ the server in all cases, and required on clients if using client
+ certificates.
+
+ * `key`:
+ Specifies the path to a file containing the SSL Key. This is
+ required on the server in all cases, and required on clients if
+ using client certificates.
+
+ * `password`:
+ Required on both the server and clients. On the server, sets the
+ password clients need to use to communicate. On a client, sets the
+ password to use to connect to the server.
+
+ * `protocol`:
+ Communication protocol to use. Defaults to xmlrpc/ssl.
+
+ * `retries`:
+ A client-only option. Number of times to retry network
+ communication.
+
+ * `serverCommonNames`:
+ A client-only option. A colon-separated list of Common Names the
+ client will accept in the SSL certificate presented by the server.
+
+ * `user`:
+ A client-only option. The UUID of the client.
+
+## COMPONENT OPTIONS
+
+Specified in the `[components]` section.
+
+ * `bcfg2`:
+ URL of the server. On the server this specifies which interface and
+ port the server listens on. On the client, this specifies where the
+ client will attempt to contact the server.
+
+ e.g. `bcfg2 = https://10.3.1.6:6789`
+
+ * `encoding`:
+ Text encoding of configuration files. Defaults to UTF-8.
+
+## LOGGING OPTIONS
+
+Specified in the `[logging]` section. These options control the server
+logging functionality.
+
+ * `path`:
+ Server log file path.
+
+## MDATA OPTIONS
+
+These options affect the default metadata settings for Paths with
+type=’file’.
+
+ * `owner`:
+ Global owner for Paths (defaults to root)
+
+ * `group`:
+ Global group for Paths (defaults to root)
+
+ * `perms`:
+ Global permissions for Paths (defaults to 644)
+
+ * `paranoid`:
+ Global paranoid settings for Paths (defaults to false)
+
+ * `sensitive`:
+ Global sensitive settings for Paths (defaults to false)
+
+## PACKAGES OPTIONS
+
+The following options are specified in the `[packages]` section of the
+configuration file.
+
+ * `resolver`:
+ Enable dependency resolution. Default is 1 (true).
+
+ * `metadata`:
+ Enable metadata processing. Default is 1 (true). If metadata is
+ disabled, it’s implied that resolver is also disabled.
+
+ * `yum_config`:
+ The path at which to generate Yum configs. No default.
+
+ * `apt_config`:
+ The path at which to generate APT configs. No default.
+
+ * `gpg_keypath`:
+ The path on the client where RPM GPG keys will be copied before they
+ are imported on the client. Default is `/etc/pki/rpm-gpg`.
+
+ * `version`:
+ Set the version attribute used when binding Packages. Default is
+ auto.
+
+The following options are specified in the `[packages:yum]` section of
+the configuration file.
+
+ * `use_yum_libraries`:
+ By default, Bcfg2 uses an internal implementation of Yum’s
+ dependency resolution and other routines so that the Bcfg2 server
+ can be run on a host that does not support Yum itself. If you run
+ the Bcfg2 server on a machine that does have Yum libraries, however,
+ you can enable use of those native libraries in Bcfg2 by setting
+ this to 1.
+
+ * `helper`:
+ Path to bcfg2-yum-helper. By default, Bcfg2 looks first in $PATH and
+ then in `/usr/sbin/bcfg2-yum-helper` for the helper.
+
+ All other options in the `[packages:yum]` section will be passed along
+ verbatim to the Yum configuration if you are using the native Yum
+ library support.
+
+The following options are specified in the `[packages:pulp]` section of
+the configuration file.
+
+ * `username`:
+ The username of a Pulp user that will be used to register new
+ clients and bind them to repositories.
+
+ * `password`:
+ The password of a Pulp user that will be used to register new
+ clients and bind them to repositories.
+
+## PARANOID OPTIONS
+
+These options allow for finer-grained control of the paranoid mode on
+the Bcfg2 client. They are specified in the `[paranoid]` section of the
+configuration file.
+
+ * `path`:
+ Custom path for backups created in paranoid mode. The default is in
+ `/var/cache/bcfg2`.
+
+ * `max_copies`:
+ Specify a maximum number of copies for the server to keep when
+ running in paranoid mode. Only the most recent versions of these
+ copies will be kept.
+
+## SNAPSHOTS OPTIONS
+
+Specified in the `[snapshots]` section. These options control the server
+snapshots functionality.
+
+ * `driver`:
+ sqlite
+
+ * `database`:
+ The name of the database to use for statistics data.
+
+ eg: `$REPOSITORY_DIR/etc/bcfg2.sqlite`
+
+## SSLCA OPTIONS
+
+These options are necessary to configure the SSLCA plugin and can be
+found in the `[sslca_default]` section of the configuration file.
+
+ * `config`:
+ Specifies the location of the openssl configuration file for your
+ CA.
+
+ * `passphrase`:
+ Specifies the passphrase for the CA’s private key (if necessary).
+ If no passphrase exists, it is assumed that the private key is
+ stored unencrypted.
+
+ * `chaincert`:
+ Specifies the location of your ssl chaining certificate. This is
+ used when pre-existing certifcate hostfiles are found, so that they
+ can be validated and only regenerated if they no longer meet the
+ specification. If you’re using a self signing CA this would be the
+ CA cert that you generated.
+
+## STATISTICS OPTIONS
+
+Server-only, specified in the `[statistics]` section. These options
+control the statistics collection functionality of the server.
+
+ * `database_engine`:
+ The database engine used by the statistics module. One of the
+ following:
+
+ `postgresql`,
+ `mysql`,
+ `sqlite3`,
+ `ado_mssql`
+
+ * `database_name`:
+ The name of the database to use for statistics data. If
+ ‘database_engine’ is set to ‘sqlite3’ this is a file path to sqlite
+ file and defaults to `$REPOSITORY_DIR/etc/brpt.sqlite`.
+
+ * `database_user`:
+ User for database connections. Not used for sqlite3.
+
+ * `database_password`:
+ Password for database connections. Not used for sqlite3.
+
+ * `database_host`:
+ Host for database connections. Not used for sqlite3.
+
+ * `database_port`:
+ Port for database connections. Not used for sqlite3.
+
+ * `time_zone`:
+ Specify a time zone other than that used on the system. (Note that
+ this will cause the Bcfg2 server to log messages in this time zone
+ as well).
+
+## SEE ALSO
+
+bcfg2(1), bcfg2-server(8)
diff --git a/tools/manpagegen/generate-manpages.bash b/tools/manpagegen/generate-manpages.bash
new file mode 100644
index 000000000..62006c953
--- /dev/null
+++ b/tools/manpagegen/generate-manpages.bash
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+# This makes building our manpages easier and more consistent. More
+# information about the tool used to do this can be found at:
+#
+# https://github.com/rtomayko/ronn
+
+if [ ! -d man -o ! -d tools ]
+then
+ echo "Must be in the top-level bcfg2 source directory"
+ exit 1
+fi
+
+for f in $(ls man)
+do
+ ronn -r --pipe tools/manpagegen/${f}.ronn | grep -iv ronn > man/${f}
+done
diff --git a/tools/selinux_baseline.py b/tools/selinux_baseline.py
new file mode 100755
index 000000000..b6997bb29
--- /dev/null
+++ b/tools/selinux_baseline.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+
+import sys
+import logging
+import lxml.etree
+
+import Bcfg2.Logger
+import Bcfg2.Options
+from Bcfg2.Client.Tools.SELinux import *
+
+LOGGER = None
+
+def get_setup():
+ global LOGGER
+ optinfo = Bcfg2.Options.CLIENT_COMMON_OPTIONS
+ setup = Bcfg2.Options.OptionParser(optinfo)
+ setup.parse(sys.argv[1:])
+
+ if setup['args']:
+ print("selinux_baseline.py takes no arguments, only options")
+ print(setup.buildHelpMessage())
+ raise SystemExit(1)
+ level = 30
+ if setup['verbose']:
+ level = 20
+ if setup['debug']:
+ level = 0
+ Bcfg2.Logger.setup_logging('selinux_base',
+ to_syslog=False,
+ level=level,
+ to_file=setup['logging'])
+ LOGGER = logging.getLogger('bcfg2')
+ return setup
+
+def main():
+ setup = get_setup()
+ config = lxml.etree.Element("Configuration")
+ selinux = SELinux(LOGGER, setup, config)
+
+ baseline = lxml.etree.Element("Bundle", name="selinux_baseline")
+ for etype, handler in selinux.handlers.items():
+ baseline.append(lxml.etree.Comment("%s entries" % etype))
+ extra = handler.FindExtra()
+ for entry in extra:
+ entry.tag = "BoundSELinux"
+ baseline.extend(extra)
+
+ print(lxml.etree.tostring(baseline, pretty_print=True))
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/tools/upgrade/1.3/migrate_configs.py b/tools/upgrade/1.3/migrate_configs.py
index c6e6cd2c3..dd8e5708b 100755
--- a/tools/upgrade/1.3/migrate_configs.py
+++ b/tools/upgrade/1.3/migrate_configs.py
@@ -35,17 +35,47 @@ def main():
setup = Bcfg2.Options.OptionParser(opts)
setup.parse(sys.argv[1:])
- copy_section(os.path.join(setup['repo'], 'Rules', 'rules.conf'), setup.cfp,
- "rules")
+ # files that you should remove manually
+ remove = []
+
+ # move rules config out of rules.conf and into bcfg2.conf
+ rules_conf = os.path.join(setup['repo'], 'Rules', 'rules.conf')
+ if os.path.exists(rules_conf):
+ remove.append(rules_conf)
+ copy_section(rules_conf, setup.cfp, "rules")
+
+ # move packages config out of packages.conf and into bcfg2.conf
pkgs_conf = os.path.join(setup['repo'], 'Packages', 'packages.conf')
- copy_section(pkgs_conf, setup.cfp, "global", newsection="packages")
- for section in ["apt", "yum", "pulp"]:
- copy_section(pkgs_conf, setup.cfp, section,
- newsection="packages:" + section)
+ if os.path.exists(pkgs_conf):
+ remove.append(pkgs_conf)
+ copy_section(pkgs_conf, setup.cfp, "global", newsection="packages")
+ for section in ["apt", "yum", "pulp"]:
+ copy_section(pkgs_conf, setup.cfp, section,
+ newsection="packages:" + section)
+
+ # move reports database config into [database] section
+ if setup.cfp.has_section("statistics"):
+ if not setup.cfp.has_section("database"):
+ setup.cfp.add_section("database")
+ for opt in setup.cfp.options("statistics"):
+ if opt.startswith("database_"):
+ newopt = opt[9:]
+ if setup.cfp.has_option("database", newopt):
+ print("%s in [database] already populated, skipping" %
+ newopt)
+ else:
+ setup.cfp.set("database", newopt,
+ setup.cfp.get("statistics", opt))
+ setup.cfp.remove_option("statistics", opt)
print("Writing %s" % setup['configfile'])
try:
setup.cfp.write(open(setup['configfile'], "w"))
+ if len(remove):
+ print("Settings were migrated, but you must remove these files "
+ "manually:")
+ for path in remove:
+ print(" %s" % path)
except IOError:
err = sys.exc_info()[1]
print("Could not write %s: %s" % (setup['configfile'], err))
diff --git a/tools/upgrade/1.3/migrate_info.py b/tools/upgrade/1.3/migrate_info.py
new file mode 100755
index 000000000..f6c095df6
--- /dev/null
+++ b/tools/upgrade/1.3/migrate_info.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+
+import os
+import sys
+import lxml.etree
+import Bcfg2.Options
+from Bcfg2.Server.Plugin import info_regex
+
+def convert(info_file):
+ info_xml = os.path.join(os.path.dirname(info_file), "info.xml")
+ if os.path.exists(info_xml):
+ print("%s already exists, not converting %s" % (info_xml, info_file))
+ return
+ print("Converting %s to %s" % (info_file, info_xml))
+ fileinfo = lxml.etree.Element("FileInfo")
+ info = lxml.etree.SubElement(fileinfo, "Info")
+ for line in open(info_file).readlines():
+ match = info_regex.match(line)
+ if match:
+ mgd = match.groupdict()
+ for key, value in list(mgd.items()):
+ if value:
+ info.set(key, value)
+
+ open(info_xml, "w").write(lxml.etree.tostring(fileinfo, pretty_print=True))
+ os.unlink(info_file)
+
+def main():
+ opts = dict(repo=Bcfg2.Options.SERVER_REPOSITORY,
+ configfile=Bcfg2.Options.CFILE,
+ plugins=Bcfg2.Options.SERVER_PLUGINS)
+ setup = Bcfg2.Options.OptionParser(opts)
+ setup.parse(sys.argv[1:])
+
+ for plugin in setup['plugins']:
+ if plugin not in ['SSLCA', 'Cfg', 'TGenshi', 'TCheetah', 'SSHbase']:
+ continue
+ for root, dirs, files in os.walk(os.path.join(setup['repo'], plugin)):
+ for fname in files:
+ if fname in [":info", "info"]:
+ convert(os.path.join(root, fname))
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/tools/upgrade/1.3/service_modes.py b/tools/upgrade/1.3/service_modes.py
new file mode 100755
index 000000000..0c458c3a9
--- /dev/null
+++ b/tools/upgrade/1.3/service_modes.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+
+import os
+import sys
+import glob
+import lxml.etree
+import Bcfg2.Options
+
+def main():
+ opts = dict(repo=Bcfg2.Options.SERVER_REPOSITORY)
+ setup = Bcfg2.Options.OptionParser(opts)
+ setup.parse(sys.argv[1:])
+
+ files = []
+ for plugin in ['Bundler', 'Rules', 'Default']:
+ files.extend(glob.glob(os.path.join(setup['repo'], plugin, "*")))
+
+ for bfile in files:
+ bdata = lxml.etree.parse(bfile)
+ changed = False
+ for svc in bdata.xpath("//Service|//BoundService"):
+ if "mode" not in svc.attrib:
+ continue
+ mode = svc.get("mode")
+ del svc.attrib["mode"]
+ if mode not in ["default", "supervised", "interactive_only",
+ "manual"]:
+ print("Unrecognized mode on Service:%s: %s. Assuming default" %
+ (svc.get("name"), mode))
+ mode = "default"
+ if mode == "default" or mode == "supervised":
+ svc.set("restart", "true")
+ svc.set("install", "true")
+ elif mode == "interactive_only":
+ svc.set("restart", "interactive")
+ svc.set("install", "true")
+ elif mode == "manual":
+ svc.set("restart", "false")
+ svc.set("install", "false")
+ changed = True
+ if changed:
+ print("Writing %s" % bfile)
+ try:
+ open(bfile, "w").write(lxml.etree.tostring(bdata))
+ except IOError:
+ err = sys.exc_info()[1]
+ print("Could not write %s: %s" % (bfile, err))
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/tools/yum-listpkgs-xml.py b/tools/yum-listpkgs-xml.py
index 2df5abbcd..a052e75af 100644..100755
--- a/tools/yum-listpkgs-xml.py
+++ b/tools/yum-listpkgs-xml.py
@@ -19,7 +19,6 @@ def myListPkgs(self, lst, description, outputType):
thingslisted = 0
if len(lst) > 0:
thingslisted = 1
- #print '%s' % description
from yum.misc import sortPkgObj
lst.sort(sortPkgObj)
for pkg in lst: