summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/lib/Bcfg2/Client/Client.py33
-rw-r--r--src/lib/Bcfg2/Client/Frame.py71
-rw-r--r--src/lib/Bcfg2/Client/Proxy.py (renamed from src/lib/Bcfg2/Proxy.py)4
-rw-r--r--src/lib/Bcfg2/Client/Tools/APK.py5
-rw-r--r--src/lib/Bcfg2/Client/Tools/APT.py14
-rw-r--r--src/lib/Bcfg2/Client/Tools/Action.py31
-rw-r--r--src/lib/Bcfg2/Client/Tools/Blast.py4
-rw-r--r--src/lib/Bcfg2/Client/Tools/FreeBSDInit.py4
-rw-r--r--src/lib/Bcfg2/Client/Tools/IPS.py5
-rw-r--r--src/lib/Bcfg2/Client/Tools/MacPorts.py5
-rw-r--r--src/lib/Bcfg2/Client/Tools/OpenCSW.py4
-rw-r--r--src/lib/Bcfg2/Client/Tools/POSIX/__init__.py10
-rw-r--r--src/lib/Bcfg2/Client/Tools/POSIXUsers.py14
-rw-r--r--src/lib/Bcfg2/Client/Tools/Pacman.py7
-rw-r--r--src/lib/Bcfg2/Client/Tools/Portage.py5
-rw-r--r--src/lib/Bcfg2/Client/Tools/RPM.py1109
-rw-r--r--src/lib/Bcfg2/Client/Tools/RPMng.py9
-rw-r--r--src/lib/Bcfg2/Client/Tools/SELinux.py38
-rw-r--r--src/lib/Bcfg2/Client/Tools/SYSV.py4
-rw-r--r--src/lib/Bcfg2/Client/Tools/YUM.py44
-rw-r--r--src/lib/Bcfg2/Client/Tools/YUM24.py399
-rw-r--r--src/lib/Bcfg2/Client/Tools/YUMng.py9
-rw-r--r--src/lib/Bcfg2/Client/Tools/__init__.py106
-rw-r--r--src/lib/Bcfg2/Client/Tools/launchd.py10
-rwxr-xr-xsrc/lib/Bcfg2/Client/Tools/rpmtools.py1091
-rw-r--r--src/lib/Bcfg2/Options.py197
-rw-r--r--src/lib/Bcfg2/Server/Admin/Compare.py13
-rw-r--r--src/lib/Bcfg2/Server/Admin/Init.py62
-rw-r--r--src/lib/Bcfg2/Server/Admin/Perf.py37
-rw-r--r--src/lib/Bcfg2/Server/Admin/Pull.py4
-rw-r--r--src/lib/Bcfg2/Server/Admin/Reports.py4
-rw-r--r--src/lib/Bcfg2/Server/Admin/Snapshots.py163
-rw-r--r--src/lib/Bcfg2/Server/Admin/Syncdb.py10
-rw-r--r--src/lib/Bcfg2/Server/Admin/Viz.py60
-rw-r--r--src/lib/Bcfg2/Server/Admin/Xcmd.py48
-rw-r--r--src/lib/Bcfg2/Server/Admin/__init__.py24
-rw-r--r--src/lib/Bcfg2/Server/BuiltinCore.py17
-rw-r--r--src/lib/Bcfg2/Server/Cache.py (renamed from src/lib/Bcfg2/Cache.py)0
-rw-r--r--src/lib/Bcfg2/Server/CherryPyCore.py21
-rw-r--r--src/lib/Bcfg2/Server/Core.py198
-rwxr-xr-xsrc/lib/Bcfg2/Server/Encryption.py (renamed from src/lib/Bcfg2/Encryption.py)68
-rw-r--r--src/lib/Bcfg2/Server/FileMonitor/Fam.py105
-rw-r--r--src/lib/Bcfg2/Server/FileMonitor/__init__.py45
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/.gitignore3
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/__init__.py0
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/backends.py63
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/__init__.py0
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/admin.py15
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/models.py210
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/sql/zone.sql2
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/urls.py68
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/views.py970
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/base.html34
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/confirm.html117
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/copy.html122
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/dns.html40
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/dnsedit.html98
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/edit.html191
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/errors.html31
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/host.html80
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/hostbase/host_confirm_delete.html89
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/hostbase/log_detail.html23
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/index.html16
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/login.html37
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/logout.html13
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/logout.tmpl6
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/logviewer.html27
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/navbar.tmpl5
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/new.html102
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/remove.html89
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/results.html45
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/search.html57
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zoneedit.html81
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zonenew.html43
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zones.html37
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zoneview.html71
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/ldapauth.py179
-rwxr-xr-xsrc/lib/Bcfg2/Server/Hostbase/manage.py11
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/media/base.css5
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/media/boxypastel.css179
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/media/global.css8
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/media/layout.css62
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/nisauth.py40
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/regex.py6
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/settings.py143
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/templates/batchadd.tmpl29
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/templates/dhcpd.conf.head5
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/templates/dhcpd.tmpl17
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/templates/hosts.tmpl26
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/templates/hostsappend.tmpl5
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/templates/named.tmpl69
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/templates/namedviews.tmpl92
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/templates/reverseappend.tmpl4
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/templates/reversesoa.tmpl13
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/templates/zone.tmpl18
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/urls.py27
-rwxr-xr-xsrc/lib/Bcfg2/Server/Lint/Genshi.py19
-rw-r--r--src/lib/Bcfg2/Server/Lint/GroupNames.py9
-rw-r--r--src/lib/Bcfg2/Server/Lint/InfoXML.py11
-rw-r--r--src/lib/Bcfg2/Server/Lint/RequiredAttrs.py20
-rw-r--r--src/lib/Bcfg2/Server/Lint/Validate.py22
-rw-r--r--src/lib/Bcfg2/Server/Plugin/helpers.py649
-rw-r--r--src/lib/Bcfg2/Server/Plugin/interfaces.py32
-rw-r--r--src/lib/Bcfg2/Server/Plugins/ACL.py145
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Account.py102
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Base.py33
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Bundler.py237
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgAuthorizedKeysGenerator.py12
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgCatFilter.py28
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgCheetahGenerator.py4
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgDiffFilter.py35
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenerator.py8
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenshiGenerator.py16
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgExternalCommandVerifier.py22
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgGenshiGenerator.py104
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgInfoXML.py16
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgLegacyInfo.py46
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgPrivateKeyCreator.py91
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgPublicKeyCreator.py3
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py95
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cvs.py17
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Darcs.py20
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Decisions.py50
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Deps.py86
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Editor.py80
-rw-r--r--src/lib/Bcfg2/Server/Plugins/FileProbes.py4
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Fossil.py24
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Git.py13
-rw-r--r--src/lib/Bcfg2/Server/Plugins/GroupLogic.py21
-rw-r--r--src/lib/Bcfg2/Server/Plugins/GroupPatterns.py6
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Guppy.py3
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Hostbase.py599
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Ldap.py6
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Metadata.py8
-rw-r--r--src/lib/Bcfg2/Server/Plugins/NagiosGen.py23
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Ohai.py2
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Apt.py9
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Collection.py47
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Pac.py9
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py19
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Source.py52
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Yum.py99
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/__init__.py35
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Pkgmgr.py223
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Probes.py14
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Properties.py86
-rw-r--r--src/lib/Bcfg2/Server/Plugins/PuppetENC.py32
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Rules.py29
-rw-r--r--src/lib/Bcfg2/Server/Plugins/SEModules.py3
-rw-r--r--src/lib/Bcfg2/Server/Plugins/SSHbase.py44
-rw-r--r--src/lib/Bcfg2/Server/Plugins/SSLCA.py65
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Snapshots.py129
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Statistics.py160
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Svn.py26
-rw-r--r--src/lib/Bcfg2/Server/Plugins/TCheetah.py79
-rw-r--r--src/lib/Bcfg2/Server/Plugins/TGenshi.py139
-rw-r--r--src/lib/Bcfg2/Server/Plugins/TemplateHelper.py5
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Trigger.py24
-rw-r--r--src/lib/Bcfg2/Server/Plugins/__init__.py4
-rw-r--r--src/lib/Bcfg2/Server/SSLServer.py (renamed from src/lib/Bcfg2/SSLServer.py)23
-rw-r--r--src/lib/Bcfg2/Server/Snapshots/__init__.py31
-rw-r--r--src/lib/Bcfg2/Server/Snapshots/model.py323
-rw-r--r--src/lib/Bcfg2/Server/Statistics.py (renamed from src/lib/Bcfg2/Statistics.py)37
-rw-r--r--src/lib/Bcfg2/Server/__init__.py3
-rw-r--r--src/lib/Bcfg2/Server/models.py24
-rw-r--r--src/lib/Bcfg2/Utils.py24
-rw-r--r--src/lib/Bcfg2/settings.py74
-rwxr-xr-xsrc/sbin/bcfg27
-rwxr-xr-xsrc/sbin/bcfg2-admin4
-rwxr-xr-xsrc/sbin/bcfg2-build-reports306
-rwxr-xr-xsrc/sbin/bcfg2-crypt56
-rwxr-xr-xsrc/sbin/bcfg2-info13
-rwxr-xr-xsrc/sbin/bcfg2-lint40
-rwxr-xr-xsrc/sbin/bcfg2-report-collector4
-rwxr-xr-xsrc/sbin/bcfg2-server2
-rwxr-xr-xsrc/sbin/bcfg2-test2
176 files changed, 3120 insertions, 9740 deletions
diff --git a/src/lib/Bcfg2/Client/Client.py b/src/lib/Bcfg2/Client/Client.py
index 1676ee717..994ce7c84 100644
--- a/src/lib/Bcfg2/Client/Client.py
+++ b/src/lib/Bcfg2/Client/Client.py
@@ -8,10 +8,10 @@ import fcntl
import socket
import logging
import tempfile
-import Bcfg2.Proxy
import Bcfg2.Logger
import Bcfg2.Options
import Bcfg2.Client.XML
+import Bcfg2.Client.Proxy
import Bcfg2.Client.Frame
import Bcfg2.Client.Tools
from Bcfg2.Utils import locked, Executor
@@ -22,12 +22,12 @@ from Bcfg2.version import __version__
class Client(object):
""" The main Bcfg2 client class """
- def __init__(self, setup):
+ def __init__(self):
self.toolset = None
self.tools = None
self.config = None
self._proxy = None
- self.setup = setup
+ self.setup = Bcfg2.Options.get_option_parser()
if self.setup['debug']:
level = logging.DEBUG
@@ -53,7 +53,7 @@ class Client(object):
raise SystemExit(1)
if 'drivers' in self.setup and self.setup['drivers'] == 'help':
self.logger.info("The following drivers are available:")
- self.logger.info(Bcfg2.Client.Tools.drivers)
+ self.logger.info(Bcfg2.Client.Tools.__all__)
raise SystemExit(0)
if self.setup['remove'] and 'services' in self.setup['remove'].lower():
self.logger.error("Service removal is nonsensical; "
@@ -129,7 +129,7 @@ class Client(object):
def proxy(self):
""" get an XML-RPC proxy to the server """
if self._proxy is None:
- self._proxy = Bcfg2.Proxy.ComponentProxy(
+ self._proxy = Bcfg2.Client.Proxy.ComponentProxy(
self.setup['server'],
self.setup['user'],
self.setup['password'],
@@ -149,8 +149,8 @@ class Client(object):
try:
probes = Bcfg2.Client.XML.XML(str(self.proxy.GetProbes()))
- except (Bcfg2.Proxy.ProxyError,
- Bcfg2.Proxy.CertificateError,
+ except (Bcfg2.Client.Proxy.ProxyError,
+ Bcfg2.Client.Proxy.CertificateError,
socket.gaierror,
socket.error):
err = sys.exc_info()[1]
@@ -174,7 +174,7 @@ class Client(object):
Bcfg2.Client.XML.tostring(
probedata,
xml_declaration=False).decode('utf-8'))
- except Bcfg2.Proxy.ProxyError:
+ except Bcfg2.Client.Proxy.ProxyError:
err = sys.exc_info()[1]
self.fatal_error("Failed to upload probe data: %s" % err)
@@ -200,7 +200,7 @@ class Client(object):
if self.setup['profile']:
try:
self.proxy.AssertProfile(self.setup['profile'])
- except Bcfg2.Proxy.ProxyError:
+ except Bcfg2.Client.Proxy.ProxyError:
err = sys.exc_info()[1]
self.fatal_error("Failed to set client profile: %s" % err)
@@ -215,8 +215,8 @@ class Client(object):
"client version")
else:
self.logger.error("Failed to declare version: %s" % err)
- except (Bcfg2.Proxy.ProxyError,
- Bcfg2.Proxy.CertificateError,
+ except (Bcfg2.Client.Proxy.ProxyError,
+ Bcfg2.Client.Proxy.CertificateError,
socket.gaierror,
socket.error):
err = sys.exc_info()[1]
@@ -230,13 +230,13 @@ class Client(object):
self.proxy.GetDecisionList(self.setup['decision'])
self.logger.info("Got decision list from server:")
self.logger.info(self.setup['decision_list'])
- except Bcfg2.Proxy.ProxyError:
+ except Bcfg2.Client.Proxy.ProxyError:
err = sys.exc_info()[1]
self.fatal_error("Failed to get decision list: %s" % err)
try:
rawconfig = self.proxy.GetConfig().encode('utf-8')
- except Bcfg2.Proxy.ProxyError:
+ except Bcfg2.Client.Proxy.ProxyError:
err = sys.exc_info()[1]
self.fatal_error("Failed to download configuration from "
"Bcfg2: %s" % err)
@@ -288,10 +288,7 @@ class Client(object):
newconfig.append(bundle)
self.config = newconfig
- self.tools = Bcfg2.Client.Frame.Frame(self.config,
- self.setup,
- times, self.setup['drivers'],
- self.setup['dryrun'])
+ self.tools = Bcfg2.Client.Frame.Frame(self.config, times)
if not self.setup['omit_lock_check']:
#check lock here
@@ -331,7 +328,7 @@ class Client(object):
Bcfg2.Client.XML.tostring(
feedback,
xml_declaration=False).decode('utf-8'))
- except Bcfg2.Proxy.ProxyError:
+ except Bcfg2.Client.Proxy.ProxyError:
err = sys.exc_info()[1]
self.logger.error("Failed to upload configuration statistics: "
"%s" % err)
diff --git a/src/lib/Bcfg2/Client/Frame.py b/src/lib/Bcfg2/Client/Frame.py
index d30708e83..b24b46dbc 100644
--- a/src/lib/Bcfg2/Client/Frame.py
+++ b/src/lib/Bcfg2/Client/Frame.py
@@ -6,7 +6,16 @@ import fnmatch
import logging
import Bcfg2.Client.Tools
from Bcfg2.Client import prompt
-from Bcfg2.Compat import any, all # pylint: disable=W0622
+from Bcfg2.Options import get_option_parser
+from Bcfg2.Compat import any, all, cmp # pylint: disable=W0622
+
+
+def cmpent(ent1, ent2):
+ """Sort entries."""
+ if ent1.tag != ent2.tag:
+ return cmp(ent1.tag, ent2.tag)
+ else:
+ return cmp(ent1.get('name'), ent2.get('name'))
def matches_entry(entryspec, entry):
@@ -40,20 +49,25 @@ def passes_black_list(entry, blacklist):
class Frame(object):
"""Frame is the container for all Tool objects and state information."""
- def __init__(self, config, setup, times, drivers, dryrun):
+ def __init__(self, config, times):
+ self.setup = get_option_parser()
self.config = config
self.times = times
- self.dryrun = dryrun
+ self.dryrun = self.setup['dryrun']
self.times['initialization'] = time.time()
- self.setup = setup
self.tools = []
+
+ #: A dict of the state of each entry. Keys are the entries.
+ #: Values are boolean: True means that the entry is good,
+ #: False means that the entry is bad.
self.states = {}
self.whitelist = []
self.blacklist = []
self.removal = []
self.logger = logging.getLogger(__name__)
+ drivers = self.setup['drivers']
for driver in drivers[:]:
- if (driver not in Bcfg2.Client.Tools.drivers and
+ if (driver not in Bcfg2.Client.Tools.__all__ and
isinstance(driver, str)):
self.logger.error("Tool driver %s is not available" % driver)
drivers.remove(driver)
@@ -75,7 +89,7 @@ class Frame(object):
for tool in list(tclass.values()):
try:
- self.tools.append(tool(self.logger, setup, config))
+ self.tools.append(tool(config))
except Bcfg2.Client.Tools.ToolInstantiationError:
continue
except:
@@ -253,7 +267,7 @@ class Frame(object):
self.states[entry] = False
for tool in self.tools:
try:
- tool.Inventory(self.states)
+ self.states.update(tool.Inventory())
except:
self.logger.error("%s.Inventory() call failed:" % tool.name,
exc_info=1)
@@ -323,28 +337,31 @@ class Frame(object):
# first process prereq actions
for bundle in bundles[:]:
- if bundle.tag != 'Bundle':
- continue
- bmodified = len([item for item in bundle
- if item in self.whitelist])
+ if bundle.tag == 'Bundle':
+ bmodified = any(item in self.whitelist for item in bundle)
+ else:
+ bmodified = False
actions = [a for a in bundle.findall('./Action')
- if (a.get('timing') != 'post' and
+ if (a.get('timing') in ['pre', 'both'] and
(bmodified or a.get('when') == 'always'))]
# now we process all "always actions"
if self.setup['interactive']:
self.promptFilter(iprompt, actions)
self.DispatchInstallCalls(actions)
+ if bundle.tag != 'Bundle':
+ continue
+
# need to test to fail entries in whitelist
- if False in [self.states[a] for a in actions]:
+ if not all(self.states[a] for a in actions):
# then display bundles forced off with entries
- self.logger.info("Bundle %s failed prerequisite action" %
- (bundle.get('name')))
+ self.logger.info("%s %s failed prerequisite action" %
+ (bundle.tag, bundle.get('name')))
bundles.remove(bundle)
b_to_remv = [ent for ent in self.whitelist if ent in bundle]
if b_to_remv:
- self.logger.info("Not installing entries from Bundle %s" %
- (bundle.get('name')))
+ self.logger.info("Not installing entries from %s %s" %
+ (bundle.tag, bundle.get('name')))
self.logger.info(["%s:%s" % (e.tag, e.get('name'))
for e in b_to_remv])
for ent in b_to_remv:
@@ -369,7 +386,7 @@ class Frame(object):
if not handled:
continue
try:
- tool.Install(handled, self.states)
+ self.states.update(tool.Install(handled))
except:
self.logger.error("%s.Install() call failed:" % tool.name,
exc_info=1)
@@ -389,7 +406,7 @@ class Frame(object):
tbm = [(t, b) for t in self.tools for b in mbundles]
for tool, bundle in tbm:
try:
- tool.Inventory(self.states, [bundle])
+ self.states.update(tool.Inventory(structures=[bundle]))
except:
self.logger.error("%s.Inventory() call failed:" %
tool.name,
@@ -419,10 +436,20 @@ class Frame(object):
func = "BundleNotUpdated"
for tool in self.tools:
try:
- getattr(tool, func)(bundle, self.states)
+ self.states.update(getattr(tool, func)(bundle))
+ except:
+ self.logger.error("%s.%s(%s:%s) call failed:" %
+ (tool.name, func, bundle.tag,
+ bundle.get("name")), exc_info=1)
+
+ for indep in self.config.findall('.//Independent'):
+ for tool in self.tools:
+ try:
+ self.states.update(tool.BundleNotUpdated(indep))
except:
- self.logger.error("%s.%s() call failed:" %
- (tool.name, func), exc_info=1)
+ self.logger.error("%s.BundleNotUpdated(%s:%s) call failed:"
+ % (tool.name, indep.tag,
+ indep.get("name")), exc_info=1)
def Remove(self):
"""Remove extra entries."""
diff --git a/src/lib/Bcfg2/Proxy.py b/src/lib/Bcfg2/Client/Proxy.py
index f6db66a93..fbf114de6 100644
--- a/src/lib/Bcfg2/Proxy.py
+++ b/src/lib/Bcfg2/Client/Proxy.py
@@ -123,8 +123,6 @@ class SSLHTTPConnection(httplib.HTTPConnection):
implements SSL and related behaviors.
"""
- logger = logging.getLogger('Bcfg2.Proxy.SSLHTTPConnection')
-
def __init__(self, host, port=None, strict=None, timeout=90, key=None,
cert=None, ca=None, scns=None, protocol='xmlrpc/ssl'):
"""Initializes the `httplib.HTTPConnection` object and stores security
@@ -180,6 +178,8 @@ class SSLHTTPConnection(httplib.HTTPConnection):
# the strict parameter is deprecated.
# HTTP 0.9-style "Simple Responses" are not supported anymore.
httplib.HTTPConnection.__init__(self, host, port, timeout=timeout)
+ self.logger = logging.getLogger("%s.%s" % (self.__class__.__module__,
+ self.__class__.__name__))
self.key = key
self.cert = cert
self.ca = ca
diff --git a/src/lib/Bcfg2/Client/Tools/APK.py b/src/lib/Bcfg2/Client/Tools/APK.py
index 58641ed37..46f46bb1c 100644
--- a/src/lib/Bcfg2/Client/Tools/APK.py
+++ b/src/lib/Bcfg2/Client/Tools/APK.py
@@ -12,11 +12,6 @@ class APK(Bcfg2.Client.Tools.PkgTool):
pkgtype = 'apk'
pkgtool = ("/sbin/apk add %s", ("%s", ["name"]))
- def __init__(self, logger, setup, config):
- Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config)
- self.installed = {}
- self.RefreshPackages()
-
def RefreshPackages(self):
"""Refresh memory hashes of packages."""
names = self.cmd.run("/sbin/apk info").stdout.splitlines()
diff --git a/src/lib/Bcfg2/Client/Tools/APT.py b/src/lib/Bcfg2/Client/Tools/APT.py
index 39816403a..f449557aa 100644
--- a/src/lib/Bcfg2/Client/Tools/APT.py
+++ b/src/lib/Bcfg2/Client/Tools/APT.py
@@ -18,12 +18,12 @@ class APT(Bcfg2.Client.Tools.Tool):
__handles__ = [('Package', 'deb'), ('Path', 'ignore')]
__req__ = {'Package': ['name', 'version'], 'Path': ['type']}
- def __init__(self, logger, setup, config):
- Bcfg2.Client.Tools.Tool.__init__(self, logger, setup, config)
+ def __init__(self, config):
+ Bcfg2.Client.Tools.Tool.__init__(self, config)
- self.install_path = setup.get('apt_install_path', '/usr')
- self.var_path = setup.get('apt_var_path', '/var')
- self.etc_path = setup.get('apt_etc_path', '/etc')
+ self.install_path = self.setup.get('apt_install_path', '/usr')
+ self.var_path = self.setup.get('apt_var_path', '/var')
+ self.etc_path = self.setup.get('apt_etc_path', '/etc')
self.debsums = '%s/bin/debsums' % self.install_path
self.aptget = '%s/bin/apt-get' % self.install_path
self.dpkg = '%s/bin/dpkg' % self.install_path
@@ -217,7 +217,7 @@ class APT(Bcfg2.Client.Tools.Tool):
self.modified += packages
self.extra = self.FindExtra()
- def Install(self, packages, states):
+ def Install(self, packages):
# it looks like you can't install arbitrary versions of software
# out of the pkg cache, we will still need to call apt-get
ipkgs = []
@@ -262,10 +262,12 @@ class APT(Bcfg2.Client.Tools.Tool):
self.logger.error("APT command failed")
self.pkg_cache = apt.cache.Cache()
self.extra = self.FindExtra()
+ states = dict()
for package in packages:
states[package] = self.VerifyPackage(package, [], checksums=False)
if states[package]:
self.modified.append(package)
+ return states
def VerifyPath(self, entry, _):
"""Do nothing here since we only verify Path type=ignore."""
diff --git a/src/lib/Bcfg2/Client/Tools/Action.py b/src/lib/Bcfg2/Client/Tools/Action.py
index da4412b1d..fd2c467d7 100644
--- a/src/lib/Bcfg2/Client/Tools/Action.py
+++ b/src/lib/Bcfg2/Client/Tools/Action.py
@@ -11,9 +11,8 @@ from Bcfg2.Compat import input # pylint: disable=W0622
class Action(Bcfg2.Client.Tools.Tool):
"""Implement Actions"""
name = 'Action'
- __handles__ = [('PostInstall', None), ('Action', None)]
- __req__ = {'PostInstall': ['name'],
- 'Action': ['name', 'timing', 'when', 'command', 'status']}
+ __handles__ = [('Action', None)]
+ __req__ = {'Action': ['name', 'timing', 'when', 'command', 'status']}
def _action_allowed(self, action):
""" Return true if the given action is allowed to be run by
@@ -63,39 +62,29 @@ class Action(Bcfg2.Client.Tools.Tool):
"""Actions always verify true."""
return True
- def VerifyPostInstall(self, dummy, _):
- """Actions always verify true."""
- return True
-
def InstallAction(self, entry):
"""Run actions as pre-checks for bundle installation."""
if entry.get('timing') != 'post':
return self.RunAction(entry)
return True
- def InstallPostInstall(self, entry):
- """ Install a deprecated PostInstall entry """
- self.logger.warning("Installing deprecated PostInstall entry %s" %
- entry.get("name"))
- return self.InstallAction(entry)
-
- def BundleUpdated(self, bundle, states):
+ def BundleUpdated(self, bundle):
"""Run postinstalls when bundles have been updated."""
- for postinst in bundle.findall("PostInstall"):
- if not self._action_allowed(postinst):
- continue
- self.cmd.run(postinst.get('name'))
+ states = dict()
for action in bundle.findall("Action"):
if action.get('timing') in ['post', 'both']:
if not self._action_allowed(action):
continue
states[action] = self.RunAction(action)
+ return states
- def BundleNotUpdated(self, bundle, states):
+ def BundleNotUpdated(self, bundle):
"""Run Actions when bundles have not been updated."""
+ states = dict()
for action in bundle.findall("Action"):
- if action.get('timing') in ['post', 'both'] and \
- action.get('when') != 'modified':
+ if (action.get('timing') in ['post', 'both'] and
+ action.get('when') != 'modified'):
if not self._action_allowed(action):
continue
states[action] = self.RunAction(action)
+ return states
diff --git a/src/lib/Bcfg2/Client/Tools/Blast.py b/src/lib/Bcfg2/Client/Tools/Blast.py
index 2627c42fe..fd594b4f4 100644
--- a/src/lib/Bcfg2/Client/Tools/Blast.py
+++ b/src/lib/Bcfg2/Client/Tools/Blast.py
@@ -13,9 +13,9 @@ class Blast(Bcfg2.Client.Tools.SYSV.SYSV):
__handles__ = [('Package', 'blast')]
__req__ = {'Package': ['name', 'version', 'bname']}
- def __init__(self, logger, setup, config):
+ def __init__(self, config):
# dont use the sysv constructor
- Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config)
+ Bcfg2.Client.Tools.PkgTool.__init__(self, config)
noaskfile = tempfile.NamedTemporaryFile()
self.noaskname = noaskfile.name
try:
diff --git a/src/lib/Bcfg2/Client/Tools/FreeBSDInit.py b/src/lib/Bcfg2/Client/Tools/FreeBSDInit.py
index 8ff26d8f3..2ab64f86d 100644
--- a/src/lib/Bcfg2/Client/Tools/FreeBSDInit.py
+++ b/src/lib/Bcfg2/Client/Tools/FreeBSDInit.py
@@ -15,8 +15,8 @@ class FreeBSDInit(Bcfg2.Client.Tools.SvcTool):
__handles__ = [('Service', 'freebsd')]
__req__ = {'Service': ['name', 'status']}
- def __init__(self, logger, cfg, setup):
- Bcfg2.Client.Tools.Tool.__init__(self, logger, cfg, setup)
+ def __init__(self, config):
+ Bcfg2.Client.Tools.SvcTool.__init__(self, config)
if os.uname()[0] != 'FreeBSD':
raise Bcfg2.Client.Tools.ToolInstantiationError
diff --git a/src/lib/Bcfg2/Client/Tools/IPS.py b/src/lib/Bcfg2/Client/Tools/IPS.py
index aff276c3a..c998ff083 100644
--- a/src/lib/Bcfg2/Client/Tools/IPS.py
+++ b/src/lib/Bcfg2/Client/Tools/IPS.py
@@ -15,14 +15,13 @@ class IPS(Bcfg2.Client.Tools.PkgTool):
__req__ = {'Package': ['name', 'version']}
pkgtool = ('pkg install --no-refresh %s', ('%s', ['name']))
- def __init__(self, logger, setup, cfg):
+ def __init__(self, config):
self.installed = {}
self.pending_upgrades = set()
self.image = image.Image()
self.image.find_root('/', False)
self.image.load_config()
- Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, cfg)
- self.cfg = cfg
+ Bcfg2.Client.Tools.PkgTool.__init__(self, config)
def RefreshPackages(self):
self.installed = dict()
diff --git a/src/lib/Bcfg2/Client/Tools/MacPorts.py b/src/lib/Bcfg2/Client/Tools/MacPorts.py
index bd9d24df3..dcf58cfec 100644
--- a/src/lib/Bcfg2/Client/Tools/MacPorts.py
+++ b/src/lib/Bcfg2/Client/Tools/MacPorts.py
@@ -12,11 +12,6 @@ class MacPorts(Bcfg2.Client.Tools.PkgTool):
pkgtype = 'macport'
pkgtool = ('/opt/local/bin/port install %s', ('%s', ['name']))
- def __init__(self, logger, setup, config):
- Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config)
- self.installed = {}
- self.RefreshPackages()
-
def RefreshPackages(self):
"""Refresh memory hashes of packages."""
pkgcache = self.cmd.run(["/opt/local/bin/port",
diff --git a/src/lib/Bcfg2/Client/Tools/OpenCSW.py b/src/lib/Bcfg2/Client/Tools/OpenCSW.py
index 60e362e64..3ea9d835e 100644
--- a/src/lib/Bcfg2/Client/Tools/OpenCSW.py
+++ b/src/lib/Bcfg2/Client/Tools/OpenCSW.py
@@ -14,9 +14,9 @@ class OpenCSW(Bcfg2.Client.Tools.SYSV.SYSV):
__handles__ = [('Package', 'opencsw')]
__req__ = {'Package': ['name', 'version', 'bname']}
- def __init__(self, logger, setup, config):
+ def __init__(self, config):
# dont use the sysv constructor
- Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config)
+ Bcfg2.Client.Tools.PkgTool.__init__(self, config)
noaskfile = tempfile.NamedTemporaryFile()
self.noaskname = noaskfile.name
try:
diff --git a/src/lib/Bcfg2/Client/Tools/POSIX/__init__.py b/src/lib/Bcfg2/Client/Tools/POSIX/__init__.py
index 7708c4f72..4f1f8e5aa 100644
--- a/src/lib/Bcfg2/Client/Tools/POSIX/__init__.py
+++ b/src/lib/Bcfg2/Client/Tools/POSIX/__init__.py
@@ -14,10 +14,10 @@ class POSIX(Bcfg2.Client.Tools.Tool):
"""POSIX File support code."""
name = 'POSIX'
- def __init__(self, logger, setup, config):
- Bcfg2.Client.Tools.Tool.__init__(self, logger, setup, config)
- self.ppath = setup['ppath']
- self.max_copies = setup['max_copies']
+ def __init__(self, config):
+ Bcfg2.Client.Tools.Tool.__init__(self, config)
+ self.ppath = self.setup['ppath']
+ self.max_copies = self.setup['max_copies']
self._handlers = self._load_handlers()
self.logger.debug("POSIX: Handlers loaded: %s" %
(", ".join(self._handlers.keys())))
@@ -53,7 +53,7 @@ class POSIX(Bcfg2.Client.Tools.Tool):
if POSIXTool in hdlr.__mro__:
# figure out what entry type this handler handles
etype = hdlr.__name__[5:].lower()
- rv[etype] = hdlr(self.logger, self.setup, self.config)
+ rv[etype] = hdlr(self.config)
return rv
def canVerify(self, entry):
diff --git a/src/lib/Bcfg2/Client/Tools/POSIXUsers.py b/src/lib/Bcfg2/Client/Tools/POSIXUsers.py
index 8226392f9..8f6bc5f37 100644
--- a/src/lib/Bcfg2/Client/Tools/POSIXUsers.py
+++ b/src/lib/Bcfg2/Client/Tools/POSIXUsers.py
@@ -18,7 +18,6 @@ class POSIXUsers(Bcfg2.Client.Tools.Tool):
('POSIXGroup', None)]
__req__ = dict(POSIXUser=['name'],
POSIXGroup=['name'])
- experimental = True
#: A mapping of XML entry attributes to the indexes of
#: corresponding values in the get{pw|gr}all data structures
@@ -30,8 +29,8 @@ class POSIXUsers(Bcfg2.Client.Tools.Tool):
#: user or group
id_mapping = dict(POSIXUser="uid", POSIXGroup="gid")
- def __init__(self, logger, setup, config):
- Bcfg2.Client.Tools.Tool.__init__(self, logger, setup, config)
+ def __init__(self, config):
+ Bcfg2.Client.Tools.Tool.__init__(self, config)
self.set_defaults = dict(POSIXUser=self.populate_user_entry,
POSIXGroup=lambda g: g)
self._existing = None
@@ -87,7 +86,7 @@ class POSIXUsers(Bcfg2.Client.Tools.Tool):
return False
return True
- def Inventory(self, states, structures=None):
+ def Inventory(self, structures=None):
if not structures:
structures = self.config.getchildren()
# we calculate a list of all POSIXUser and POSIXGroup entries,
@@ -107,7 +106,8 @@ class POSIXUsers(Bcfg2.Client.Tools.Tool):
(group, entry.get("name")))
struct.append(Bcfg2.Client.XML.Element("POSIXGroup",
name=group))
- return Bcfg2.Client.Tools.Tool.Inventory(self, states, structures)
+ return Bcfg2.Client.Tools.Tool.Inventory(self, structures)
+ Inventory.__doc__ = Bcfg2.Client.Tools.Tool.Inventory.__doc__
def FindExtra(self):
extra = []
@@ -208,7 +208,8 @@ class POSIXUsers(Bcfg2.Client.Tools.Tool):
entry.set('qtext', "\n".join([entry.get('qtext', '')] + errors))
return len(errors) == 0
- def Install(self, entries, states):
+ def Install(self, entries):
+ states = dict()
for entry in entries:
# install groups first, so that all groups exist for
# users that might need them
@@ -218,6 +219,7 @@ class POSIXUsers(Bcfg2.Client.Tools.Tool):
if entry.tag == 'POSIXUser':
states[entry] = self._install(entry)
self._existing = None
+ return states
def _install(self, entry):
""" add or modify a user or group using the appropriate command """
diff --git a/src/lib/Bcfg2/Client/Tools/Pacman.py b/src/lib/Bcfg2/Client/Tools/Pacman.py
index a4cfd3315..d7d60a66d 100644
--- a/src/lib/Bcfg2/Client/Tools/Pacman.py
+++ b/src/lib/Bcfg2/Client/Tools/Pacman.py
@@ -13,11 +13,6 @@ class Pacman(Bcfg2.Client.Tools.PkgTool):
pkgtype = 'pacman'
pkgtool = ("/usr/bin/pacman --needed --noconfirm --noprogressbar")
- def __init__(self, logger, setup, config):
- Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config)
- self.installed = {}
- self.RefreshPackages()
-
def RefreshPackages(self):
'''Refresh memory hashes of packages'''
self.installed = {}
@@ -66,7 +61,7 @@ class Pacman(Bcfg2.Client.Tools.PkgTool):
self.RefreshPackages()
self.extra = self.FindExtra()
- def Install(self, packages, states):
+ def Install(self, packages):
'''
Pacman Install
'''
diff --git a/src/lib/Bcfg2/Client/Tools/Portage.py b/src/lib/Bcfg2/Client/Tools/Portage.py
index 2d8b66ce5..e52da081b 100644
--- a/src/lib/Bcfg2/Client/Tools/Portage.py
+++ b/src/lib/Bcfg2/Client/Tools/Portage.py
@@ -17,14 +17,13 @@ class Portage(Bcfg2.Client.Tools.PkgTool):
'version']))
pkgtool = ('emerge %s', ('=%s-%s', ['name', 'version']))
- def __init__(self, logger, cfg, setup):
+ def __init__(self, config):
self._initialised = False
- Bcfg2.Client.Tools.PkgTool.__init__(self, logger, cfg, setup)
+ Bcfg2.Client.Tools.PkgTool.__init__(self, config)
self._initialised = True
self.__important__ = self.__important__ + ['/etc/make.conf']
self._pkg_pattern = re.compile(r'(.*)-(\d.*)')
self._ebuild_pattern = re.compile('(ebuild|binary)')
- self.cfg = cfg
self.installed = {}
self._binpkgonly = self.setup.get('portage_binpkgonly', False)
if self._binpkgonly:
diff --git a/src/lib/Bcfg2/Client/Tools/RPM.py b/src/lib/Bcfg2/Client/Tools/RPM.py
index a4dd2b730..be5ad01e2 100644
--- a/src/lib/Bcfg2/Client/Tools/RPM.py
+++ b/src/lib/Bcfg2/Client/Tools/RPM.py
@@ -1,9 +1,1077 @@
"""Bcfg2 Support for RPMS"""
-import os.path
+import os
import rpm
-import rpmtools
import Bcfg2.Client.Tools
+import grp
+import optparse
+import pwd
+import stat
+import sys
+try:
+ import hashlib
+ py24compat = False
+except ImportError:
+ # FIXME: Remove when client python dep is 2.5 or greater
+ py24compat = True
+ import md5
+
+# Determine what prelink tools we have available.
+# The isprelink module is a python extension that examines the ELF headers
+# to see if the file has been prelinked. If it is not present a lot of files
+# are unnecessarily run through the prelink command.
+try:
+ from isprelink import *
+ isprelink_imported = True
+except ImportError:
+ isprelink_imported = False
+
+# If the prelink command is installed on the system then we need to do
+# prelink -y on files.
+if os.access('/usr/sbin/prelink', os.X_OK):
+ prelink_exists = True
+else:
+ prelink_exists = False
+
+# If we don't have isprelink then we will use the prelink configuration file to
+# filter what we have to put through prelink -y.
+import re
+blacklist = []
+whitelist = []
+try:
+ f = open('/etc/prelink.conf', mode='r')
+ for line in f:
+ if line.startswith('#'):
+ continue
+ option, pattern = line.split()
+ if pattern.startswith('*.'):
+ pattern = pattern.replace('*.', '\.')
+ pattern += '$'
+ elif pattern.startswith('/'):
+ pattern = '^' + pattern
+ if option == '-b':
+ blacklist.append(pattern)
+ elif option == '-l':
+ whitelist.append(pattern)
+ f.close()
+except IOError:
+ pass
+
+blacklist_re = re.compile('|'.join(blacklist))
+whitelist_re = re.compile('|'.join(whitelist))
+
+# Flags that are not defined in rpm-python.
+# They are defined in lib/rpmcli.h
+# Bit(s) for verifyFile() attributes.
+#
+RPMVERIFY_NONE = 0 # /*!< */
+RPMVERIFY_MD5 = 1 # 1 << 0 # /*!< from %verify(md5) */
+RPMVERIFY_FILESIZE = 2 # 1 << 1 # /*!< from %verify(size) */
+RPMVERIFY_LINKTO = 4 # 1 << 2 # /*!< from %verify(link) */
+RPMVERIFY_USER = 8 # 1 << 3 # /*!< from %verify(user) */
+RPMVERIFY_GROUP = 16 # 1 << 4 # /*!< from %verify(group) */
+RPMVERIFY_MTIME = 32 # 1 << 5 # /*!< from %verify(mtime) */
+RPMVERIFY_MODE = 64 # 1 << 6 # /*!< from %verify(mode) */
+RPMVERIFY_RDEV = 128 # 1 << 7 # /*!< from %verify(rdev) */
+RPMVERIFY_CONTEXTS = 32768 # (1 << 15) # /*!< from --nocontexts */
+RPMVERIFY_READLINKFAIL = 268435456 # (1 << 28) # /*!< readlink failed */
+RPMVERIFY_READFAIL = 536870912 # (1 << 29) # /*!< file read failed */
+RPMVERIFY_LSTATFAIL = 1073741824 # (1 << 30) # /*!< lstat failed */
+RPMVERIFY_LGETFILECONFAIL = 2147483648 # (1 << 31) # /*!< lgetfilecon failed */
+
+RPMVERIFY_FAILURES = \
+ (RPMVERIFY_LSTATFAIL|RPMVERIFY_READFAIL|RPMVERIFY_READLINKFAIL| \
+ RPMVERIFY_LGETFILECONFAIL)
+
+# Bit(s) to control rpm_verify() operation.
+#
+VERIFY_DEFAULT = 0, # /*!< */
+VERIFY_MD5 = 1 << 0 # /*!< from --nomd5 */
+VERIFY_SIZE = 1 << 1 # /*!< from --nosize */
+VERIFY_LINKTO = 1 << 2 # /*!< from --nolinkto */
+VERIFY_USER = 1 << 3 # /*!< from --nouser */
+VERIFY_GROUP = 1 << 4 # /*!< from --nogroup */
+VERIFY_MTIME = 1 << 5 # /*!< from --nomtime */
+VERIFY_MODE = 1 << 6 # /*!< from --nomode */
+VERIFY_RDEV = 1 << 7 # /*!< from --nodev */
+# /* bits 8-14 unused, reserved for rpmVerifyAttrs */
+VERIFY_CONTEXTS = 1 << 15 # /*!< verify: from --nocontexts */
+VERIFY_FILES = 1 << 16 # /*!< verify: from --nofiles */
+VERIFY_DEPS = 1 << 17 # /*!< verify: from --nodeps */
+VERIFY_SCRIPT = 1 << 18 # /*!< verify: from --noscripts */
+VERIFY_DIGEST = 1 << 19 # /*!< verify: from --nodigest */
+VERIFY_SIGNATURE = 1 << 20 # /*!< verify: from --nosignature */
+VERIFY_PATCHES = 1 << 21 # /*!< verify: from --nopatches */
+VERIFY_HDRCHK = 1 << 22 # /*!< verify: from --nohdrchk */
+VERIFY_FOR_LIST = 1 << 23 # /*!< query: from --list */
+VERIFY_FOR_STATE = 1 << 24 # /*!< query: from --state */
+VERIFY_FOR_DOCS = 1 << 25 # /*!< query: from --docfiles */
+VERIFY_FOR_CONFIG = 1 << 26 # /*!< query: from --configfiles */
+VERIFY_FOR_DUMPFILES = 1 << 27 # /*!< query: from --dump */
+# /* bits 28-31 used in rpmVerifyAttrs */
+
+# Comes from C cource. lib/rpmcli.h
+VERIFY_ATTRS = \
+ (VERIFY_MD5 | VERIFY_SIZE | VERIFY_LINKTO | VERIFY_USER | VERIFY_GROUP | \
+ VERIFY_MTIME | VERIFY_MODE | VERIFY_RDEV | VERIFY_CONTEXTS)
+
+VERIFY_ALL = \
+ (VERIFY_ATTRS | VERIFY_FILES | VERIFY_DEPS | VERIFY_SCRIPT | VERIFY_DIGEST |\
+ VERIFY_SIGNATURE | VERIFY_HDRCHK)
+
+
+# Some masks for what checks to NOT do on these file types.
+# The C code actiually resets these up for every file.
+DIR_FLAGS = ~(RPMVERIFY_MD5 | RPMVERIFY_FILESIZE | RPMVERIFY_MTIME | \
+ RPMVERIFY_LINKTO)
+
+# These file types all have the same mask, but hopefully this will make the
+# code more readable.
+FIFO_FLAGS = CHR_FLAGS = BLK_FLAGS = GHOST_FLAGS = DIR_FLAGS
+
+LINK_FLAGS = ~(RPMVERIFY_MD5 | RPMVERIFY_FILESIZE | RPMVERIFY_MTIME | \
+ RPMVERIFY_MODE | RPMVERIFY_USER | RPMVERIFY_GROUP)
+
+REG_FLAGS = ~(RPMVERIFY_LINKTO)
+
+
+def s_isdev(mode):
+ """
+ Check to see if a file is a device.
+
+ """
+ return stat.S_ISBLK(mode) | stat.S_ISCHR(mode)
+
+def rpmpackagelist(rts):
+ """
+ Equivalent of rpm -qa. Intended for RefreshPackages() in the RPM Driver.
+ Requires rpmtransactionset() to be run first to get a ts.
+ Returns a list of pkgspec dicts.
+
+ e.g. [ {'name':'foo', 'epoch':'20', 'version':'1.2', 'release':'5', 'arch':'x86_64' },
+ {'name':'bar', 'epoch':'10', 'version':'5.2', 'release':'2', 'arch':'x86_64' } ]
+
+ """
+ return [{'name':header[rpm.RPMTAG_NAME],
+ 'epoch':header[rpm.RPMTAG_EPOCH],
+ 'version':header[rpm.RPMTAG_VERSION],
+ 'release':header[rpm.RPMTAG_RELEASE],
+ 'arch':header[rpm.RPMTAG_ARCH],
+ 'gpgkeyid':header.sprintf("%|SIGGPG?{%{SIGGPG:pgpsig}}:{None}|").split()[-1]}
+ for header in rts.dbMatch()]
+
+def getindexbykeyword(index_ts, **kwargs):
+ """
+ Return list of indexs from the rpmdb matching keywords
+ ex: getHeadersByKeyword(name='foo', version='1', release='1')
+
+ Can be passed any structure that can be indexed by the pkgspec
+ keyswords as other keys are filtered out.
+
+ """
+ lst = []
+ name = kwargs.get('name')
+ if name:
+ index_mi = index_ts.dbMatch(rpm.RPMTAG_NAME, name)
+ else:
+ index_mi = index_ts.dbMatch()
+
+ if 'epoch' in kwargs:
+ if kwargs['epoch'] != None and kwargs['epoch'] != 'None':
+ kwargs['epoch'] = int(kwargs['epoch'])
+ else:
+ del(kwargs['epoch'])
+
+ keywords = [key for key in list(kwargs.keys()) \
+ if key in ('name', 'epoch', 'version', 'release', 'arch')]
+ keywords_len = len(keywords)
+ for hdr in index_mi:
+ match = 0
+ for keyword in keywords:
+ if hdr[keyword] == kwargs[keyword]:
+ match += 1
+ if match == keywords_len:
+ lst.append(index_mi.instance())
+ del index_mi
+ return lst
+
+def getheadersbykeyword(header_ts, **kwargs):
+ """
+ Borrowed parts of this from from Yum. Need to fix it though.
+ Epoch is not handled right.
+
+ Return list of headers from the rpmdb matching keywords
+ ex: getHeadersByKeyword(name='foo', version='1', release='1')
+
+ Can be passed any structure that can be indexed by the pkgspec
+ keyswords as other keys are filtered out.
+
+ """
+ lst = []
+ name = kwargs.get('name')
+ if name:
+ header_mi = header_ts.dbMatch(rpm.RPMTAG_NAME, name)
+ else:
+ header_mi = header_ts.dbMatch()
+
+ if 'epoch' in kwargs:
+ if kwargs['epoch'] != None and kwargs['epoch'] != 'None':
+ kwargs['epoch'] = int(kwargs['epoch'])
+ else:
+ del(kwargs['epoch'])
+
+ keywords = [key for key in list(kwargs.keys()) \
+ if key in ('name', 'epoch', 'version', 'release', 'arch')]
+ keywords_len = len(keywords)
+ for hdr in header_mi:
+ match = 0
+ for keyword in keywords:
+ if hdr[keyword] == kwargs[keyword]:
+ match += 1
+ if match == keywords_len:
+ lst.append(hdr)
+ del header_mi
+ return lst
+
+def prelink_md5_check(filename):
+ """
+ Checks if a file is prelinked. If it is run it through prelink -y
+ to get the unprelinked md5 and file size.
+
+ Return 0 if the file was not prelinked, otherwise return the file size.
+ Always return the md5.
+
+ """
+ prelink = False
+ try:
+ plf = open(filename, "rb")
+ except IOError:
+ return False, 0
+
+ if prelink_exists:
+ if isprelink_imported:
+ plfd = plf.fileno()
+ if isprelink(plfd):
+ plf.close()
+ cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \
+ % (re.escape(filename))
+ plf = os.popen(cmd, 'rb')
+ prelink = True
+ elif whitelist_re.search(filename) and not blacklist_re.search(filename):
+ plf.close()
+ cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \
+ % (re.escape(filename))
+ plf = os.popen(cmd, 'rb')
+ prelink = True
+
+ fsize = 0
+ if py24compat:
+ chksum = md5.new()
+ else:
+ chksum = hashlib.md5()
+ while 1:
+ data = plf.read()
+ if not data:
+ break
+ fsize += len(data)
+ chksum.update(data)
+ plf.close()
+ file_md5 = chksum.hexdigest()
+ if prelink:
+ return file_md5, fsize
+ else:
+ return file_md5, 0
+
+def prelink_size_check(filename):
+ """
+ This check is only done if the prelink_md5_check() is not done first.
+
+ Checks if a file is prelinked. If it is run it through prelink -y
+ to get the unprelinked file size.
+
+ Return 0 if the file was not prelinked, otherwise return the file size.
+
+ """
+ fsize = 0
+ try:
+ plf = open(filename, "rb")
+ except IOError:
+ return False
+
+ if prelink_exists:
+ if isprelink_imported:
+ plfd = plf.fileno()
+ if isprelink(plfd):
+ plf.close()
+ cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \
+ % (re.escape(filename))
+ plf = os.popen(cmd, 'rb')
+
+ while 1:
+ data = plf.read()
+ if not data:
+ break
+ fsize += len(data)
+
+ elif whitelist_re.search(filename) and not blacklist_re.search(filename):
+ plf.close()
+ cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \
+ % (re.escape(filename))
+ plf = os.popen(cmd, 'rb')
+
+ while 1:
+ data = plf.read()
+ if not data:
+ break
+ fsize += len(data)
+
+ plf.close()
+
+ return fsize
+
+def debug_verify_flags(vflags):
+ """
+ Decodes the verify flags bits.
+ """
+ if vflags & RPMVERIFY_MD5:
+ print('RPMVERIFY_MD5')
+ if vflags & RPMVERIFY_FILESIZE:
+ print('RPMVERIFY_FILESIZE')
+ if vflags & RPMVERIFY_LINKTO:
+ print('RPMVERIFY_LINKTO')
+ if vflags & RPMVERIFY_USER:
+ print('RPMVERIFY_USER')
+ if vflags & RPMVERIFY_GROUP:
+ print('RPMVERIFY_GROUP')
+ if vflags & RPMVERIFY_MTIME:
+ print('RPMVERIFY_MTIME')
+ if vflags & RPMVERIFY_MODE:
+ print('RPMVERIFY_MODE')
+ if vflags & RPMVERIFY_RDEV:
+ print('RPMVERIFY_RDEV')
+ if vflags & RPMVERIFY_CONTEXTS:
+ print('RPMVERIFY_CONTEXTS')
+ if vflags & RPMVERIFY_READLINKFAIL:
+ print('RPMVERIFY_READLINKFAIL')
+ if vflags & RPMVERIFY_READFAIL:
+ print('RPMVERIFY_READFAIL')
+ if vflags & RPMVERIFY_LSTATFAIL:
+ print('RPMVERIFY_LSTATFAIL')
+ if vflags & RPMVERIFY_LGETFILECONFAIL:
+ print('RPMVERIFY_LGETFILECONFAIL')
+
+def debug_file_flags(fflags):
+ """
+ Decodes the file flags bits.
+ """
+ if fflags & rpm.RPMFILE_CONFIG:
+ print('rpm.RPMFILE_CONFIG')
+
+ if fflags & rpm.RPMFILE_DOC:
+ print('rpm.RPMFILE_DOC')
+
+ if fflags & rpm.RPMFILE_ICON:
+ print('rpm.RPMFILE_ICON')
+
+ if fflags & rpm.RPMFILE_MISSINGOK:
+ print('rpm.RPMFILE_MISSINGOK')
+
+ if fflags & rpm.RPMFILE_NOREPLACE:
+ print('rpm.RPMFILE_NOREPLACE')
+
+ if fflags & rpm.RPMFILE_GHOST:
+ print('rpm.RPMFILE_GHOST')
+
+ if fflags & rpm.RPMFILE_LICENSE:
+ print('rpm.RPMFILE_LICENSE')
+
+ if fflags & rpm.RPMFILE_README:
+ print('rpm.RPMFILE_README')
+
+ if fflags & rpm.RPMFILE_EXCLUDE:
+ print('rpm.RPMFILE_EXLUDE')
+
+ if fflags & rpm.RPMFILE_UNPATCHED:
+ print('rpm.RPMFILE_UNPATCHED')
+
+ if fflags & rpm.RPMFILE_PUBKEY:
+ print('rpm.RPMFILE_PUBKEY')
+
+def rpm_verify_file(fileinfo, rpmlinktos, omitmask):
+ """
+ Verify all the files in a package.
+
+ Returns a list of error flags, the file type and file name. The list
+ entries are strings that are the same as the labels for the bitwise
+ flags used in the C code.
+
+ """
+ (fname, fsize, fmode, fmtime, fflags, frdev, finode, fnlink, fstate, \
+ vflags, fuser, fgroup, fmd5) = fileinfo
+
+ # 1. rpmtsRootDir stuff. What does it do and where to I get it from?
+
+ file_results = []
+ flags = vflags
+
+ # Check to see if the file was installed - if not pretend all is ok.
+ # This is what the rpm C code does!
+ if fstate != rpm.RPMFILE_STATE_NORMAL:
+ return file_results
+
+ # Get the installed files stats
+ try:
+ lstat = os.lstat(fname)
+ except OSError:
+ if not (fflags & (rpm.RPMFILE_MISSINGOK|rpm.RPMFILE_GHOST)):
+ file_results.append('RPMVERIFY_LSTATFAIL')
+ #file_results.append(fname)
+ return file_results
+
+ # 5. Contexts? SELinux stuff?
+
+ # Setup what checks to do. This is straight out of the C code.
+ if stat.S_ISDIR(lstat.st_mode):
+ flags &= DIR_FLAGS
+ elif stat.S_ISLNK(lstat.st_mode):
+ flags &= LINK_FLAGS
+ elif stat.S_ISFIFO(lstat.st_mode):
+ flags &= FIFO_FLAGS
+ elif stat.S_ISCHR(lstat.st_mode):
+ flags &= CHR_FLAGS
+ elif stat.S_ISBLK(lstat.st_mode):
+ flags &= BLK_FLAGS
+ else:
+ flags &= REG_FLAGS
+
+ if (fflags & rpm.RPMFILE_GHOST):
+ flags &= GHOST_FLAGS
+
+ flags &= ~(omitmask | RPMVERIFY_FAILURES)
+
+ # 8. SELinux stuff.
+
+ prelink_size = 0
+ if flags & RPMVERIFY_MD5:
+ prelink_md5, prelink_size = prelink_md5_check(fname)
+ if prelink_md5 == False:
+ file_results.append('RPMVERIFY_MD5')
+ file_results.append('RPMVERIFY_READFAIL')
+ elif prelink_md5 != fmd5:
+ file_results.append('RPMVERIFY_MD5')
+
+ if flags & RPMVERIFY_LINKTO:
+ linkto = os.readlink(fname)
+ if not linkto:
+ file_results.append('RPMVERIFY_READLINKFAIL')
+ file_results.append('RPMVERIFY_LINKTO')
+ else:
+ if len(rpmlinktos) == 0 or linkto != rpmlinktos:
+ file_results.append('RPMVERIFY_LINKTO')
+
+ if flags & RPMVERIFY_FILESIZE:
+ if not (flags & RPMVERIFY_MD5): # prelink check hasn't been done.
+ prelink_size = prelink_size_check(fname)
+ if (prelink_size != 0): # This is a prelinked file.
+ if (prelink_size != fsize):
+ file_results.append('RPMVERIFY_FILESIZE')
+ elif lstat.st_size != fsize: # It wasn't a prelinked file.
+ file_results.append('RPMVERIFY_FILESIZE')
+
+ if flags & RPMVERIFY_MODE:
+ metamode = fmode
+ filemode = lstat.st_mode
+
+ # Comparing the type of %ghost files is meaningless, but perms are ok.
+ if fflags & rpm.RPMFILE_GHOST:
+ metamode &= ~0xf000
+ filemode &= ~0xf000
+
+ if (stat.S_IFMT(metamode) != stat.S_IFMT(filemode)) or \
+ (stat.S_IMODE(metamode) != stat.S_IMODE(filemode)):
+ file_results.append('RPMVERIFY_MODE')
+
+ if flags & RPMVERIFY_RDEV:
+ if (stat.S_ISCHR(fmode) != stat.S_ISCHR(lstat.st_mode) or
+ stat.S_ISBLK(fmode) != stat.S_ISBLK(lstat.st_mode)):
+ file_results.append('RPMVERIFY_RDEV')
+ elif (s_isdev(fmode) & s_isdev(lstat.st_mode)):
+ st_rdev = lstat.st_rdev
+ if frdev != st_rdev:
+ file_results.append('RPMVERIFY_RDEV')
+
+ if flags & RPMVERIFY_MTIME:
+ if lstat.st_mtime != fmtime:
+ file_results.append('RPMVERIFY_MTIME')
+
+ if flags & RPMVERIFY_USER:
+ try:
+ user = pwd.getpwuid(lstat.st_uid)[0]
+ except KeyError:
+ user = None
+ if not user or not fuser or (user != fuser):
+ file_results.append('RPMVERIFY_USER')
+
+ if flags & RPMVERIFY_GROUP:
+ try:
+ group = grp.getgrgid(lstat.st_gid)[0]
+ except KeyError:
+ group = None
+ if not group or not fgroup or (group != fgroup):
+ file_results.append('RPMVERIFY_GROUP')
+
+ return file_results
+
+def rpm_verify_dependencies(header):
+ """
+ Check package dependencies. Header is an rpm.hdr.
+
+ Don't like opening another ts to do this, but
+ it was the only way I could find of clearing the ts
+ out.
+
+ Have asked on the rpm-maint list on how to do
+ this the right way (28 Feb 2007).
+
+ ts.check() returns:
+
+ ((name, version, release), (reqname, reqversion), \
+ flags, suggest, sense)
+
+ """
+ _ts1 = rpmtransactionset()
+ _ts1.addInstall(header, 'Dep Check', 'i')
+ dep_errors = _ts1.check()
+ _ts1.closeDB()
+ return dep_errors
+
+def rpm_verify_package(vp_ts, header, verify_options):
+ """
+ Verify a single package specified by header. Header is an rpm.hdr.
+
+ If errors are found it returns a dictionary of errors.
+
+ """
+ # Set some transaction level flags.
+ vsflags = 0
+ if 'nodigest' in verify_options:
+ vsflags |= rpm._RPMVSF_NODIGESTS
+ if 'nosignature' in verify_options:
+ vsflags |= rpm._RPMVSF_NOSIGNATURES
+ ovsflags = vp_ts.setVSFlags(vsflags)
+
+ # Map from the Python options to the rpm bitwise flags.
+ omitmask = 0
+
+ if 'nolinkto' in verify_options:
+ omitmask |= VERIFY_LINKTO
+ if 'nomd5' in verify_options:
+ omitmask |= VERIFY_MD5
+ if 'nosize' in verify_options:
+ omitmask |= VERIFY_SIZE
+ if 'nouser' in verify_options:
+ omitmask |= VERIFY_USER
+ if 'nogroup' in verify_options:
+ omitmask |= VERIFY_GROUP
+ if 'nomtime' in verify_options:
+ omitmask |= VERIFY_MTIME
+ if 'nomode' in verify_options:
+ omitmask |= VERIFY_MODE
+ if 'nordev' in verify_options:
+ omitmask |= VERIFY_RDEV
+
+ omitmask = ((~omitmask & VERIFY_ATTRS) ^ VERIFY_ATTRS)
+
+ package_results = {}
+
+ # Check Signatures and Digests.
+ # No idea what this might return. Need to break something to see.
+ # Setting the vsflags above determines what gets checked in the header.
+ hdr_stat = vp_ts.hdrCheck(header.unload())
+ if hdr_stat:
+ package_results['hdr'] = hdr_stat
+
+ # Check Package Depencies.
+ if 'nodeps' not in verify_options:
+ dep_stat = rpm_verify_dependencies(header)
+ if dep_stat:
+ package_results['deps'] = dep_stat
+
+ # Check all the package files.
+ if 'nofiles' not in verify_options:
+ vp_fi = header.fiFromHeader()
+ for fileinfo in vp_fi:
+ # Do not bother doing anything with ghost files.
+ # This is what RPM does.
+ if fileinfo[4] & rpm.RPMFILE_GHOST:
+ continue
+
+ # This is only needed because of an inconsistency in the
+ # rpm.fi interface.
+ linktos = vp_fi.FLink()
+
+ file_stat = rpm_verify_file(fileinfo, linktos, omitmask)
+
+ #if len(file_stat) > 0 or options.verbose:
+ if len(file_stat) > 0:
+ fflags = fileinfo[4]
+ if fflags & rpm.RPMFILE_CONFIG:
+ file_stat.append('c')
+ elif fflags & rpm.RPMFILE_DOC:
+ file_stat.append('d')
+ elif fflags & rpm.RPMFILE_GHOST:
+ file_stat.append('g')
+ elif fflags & rpm.RPMFILE_LICENSE:
+ file_stat.append('l')
+ elif fflags & rpm.RPMFILE_PUBKEY:
+ file_stat.append('P')
+ elif fflags & rpm.RPMFILE_README:
+ file_stat.append('r')
+ else:
+ file_stat.append(' ')
+
+ file_stat.append(fileinfo[0]) # The filename.
+ package_results.setdefault('files', []).append(file_stat)
+
+ # Run the verify script if there is one.
+ # Do we want this?
+ #if 'noscripts' not in verify_options:
+ # script_stat = rpmVerifyscript()
+ # if script_stat:
+ # package_results['script'] = script_stat
+
+ # If there have been any errors, add the package nevra to the result.
+ if len(package_results) > 0:
+ package_results.setdefault('nevra', (header[rpm.RPMTAG_NAME], \
+ header[rpm.RPMTAG_EPOCH], \
+ header[rpm.RPMTAG_VERSION], \
+ header[rpm.RPMTAG_RELEASE], \
+ header[rpm.RPMTAG_ARCH]))
+ else:
+ package_results = None
+
+ # Put things back the way we found them.
+ vsflags = vp_ts.setVSFlags(ovsflags)
+
+ return package_results
+
+def rpm_verify(verify_ts, verify_pkgspec, verify_options=[]):
+ """
+ Requires rpmtransactionset() to be run first to get a ts.
+
+ pkgspec is a dict specifying the package
+ e.g.:
+ For a single package
+ { name='foo', epoch='20', version='1', release='1', arch='x86_64'}
+
+ For all packages
+ {}
+
+ Or any combination of keywords to select one or more packages to verify.
+
+ options is a list of 'rpm --verify' options. Default is to check everything.
+ e.g.:
+ [ 'nodeps', 'nodigest', 'nofiles', 'noscripts', 'nosignature',
+ 'nolinkto' 'nomd5', 'nosize', 'nouser', 'nogroup', 'nomtime',
+ 'nomode', 'nordev' ]
+
+ Returns a list. One list entry per package. Each list entry is a
+ dictionary. Dict keys are 'files', 'deps', 'nevra' and 'hdr'.
+ Entries only get added for the failures. If nothing failed, None is
+ returned.
+
+ Its all a bit messy and probably needs reviewing.
+
+ [ { 'hdr': [???],
+ 'deps: [((name, version, release), (reqname, reqversion),
+ flags, suggest, sense), .... ]
+ 'files': [ ['filename1', 'RPMVERIFY_GROUP', 'RPMVERIFY_USER' ],
+ ['filename2', 'RPMVERFIY_LSTATFAIL']]
+ 'nevra': ['name1', 'epoch1', 'version1', 'release1', 'arch1'] }
+ { 'hdr': [???],
+ 'deps: [((name, version, release), (reqname, reqversion),
+ flags, suggest, sense), .... ]
+ 'files': [ ['filename', 'RPMVERIFY_GROUP', 'RPMVERIFY_USER" ],
+ ['filename2', 'RPMVERFIY_LSTATFAIL']]
+ 'nevra': ['name2', 'epoch2', 'version2', 'release2', 'arch2'] } ]
+
+ """
+ verify_results = []
+ headers = getheadersbykeyword(verify_ts, **verify_pkgspec)
+ for header in headers:
+ result = rpm_verify_package(verify_ts, header, verify_options)
+ if result:
+ verify_results.append(result)
+
+ return verify_results
+
+def rpmtransactionset():
+ """
+ A simple wrapper for rpm.TransactionSet() to keep everthiing together.
+ Might use it to set some ts level flags later.
+
+ """
+ ts = rpm.TransactionSet()
+ return ts
+
+class Rpmtscallback(object):
+ """
+ Callback for ts.run(). Used for adding, upgrading and removing packages.
+ Starting with all possible reasons codes, but bcfg2 will probably only
+ make use of a few of them.
+
+ Mostly just printing stuff at the moment to understand how the callback
+ is used.
+
+ """
+ def __init__(self):
+ self.fdnos = {}
+
+ def callback(self, reason, amount, total, key, client_data):
+ """
+ Generic rpmts call back.
+ """
+ if reason == rpm.RPMCALLBACK_INST_OPEN_FILE:
+ pass
+ elif reason == rpm.RPMCALLBACK_INST_CLOSE_FILE:
+ pass
+ elif reason == rpm.RPMCALLBACK_INST_START:
+ pass
+ elif reason == rpm.RPMCALLBACK_TRANS_PROGRESS or \
+ reason == rpm.RPMCALLBACK_INST_PROGRESS:
+ pass
+ # rpm.RPMCALLBACK_INST_PROGRESS'
+ elif reason == rpm.RPMCALLBACK_TRANS_START:
+ pass
+ elif reason == rpm.RPMCALLBACK_TRANS_STOP:
+ pass
+ elif reason == rpm.RPMCALLBACK_REPACKAGE_START:
+ pass
+ elif reason == rpm.RPMCALLBACK_REPACKAGE_PROGRESS:
+ pass
+ elif reason == rpm.RPMCALLBACK_REPACKAGE_STOP:
+ pass
+ elif reason == rpm.RPMCALLBACK_UNINST_PROGRESS:
+ pass
+ elif reason == rpm.RPMCALLBACK_UNINST_START:
+ pass
+ elif reason == rpm.RPMCALLBACK_UNINST_STOP:
+ pass
+ # How do we get at this?
+ # RPM.modified += key
+ elif reason == rpm.RPMCALLBACK_UNPACK_ERROR:
+ pass
+ elif reason == rpm.RPMCALLBACK_CPIO_ERROR:
+ pass
+ elif reason == rpm.RPMCALLBACK_UNKNOWN:
+ pass
+ else:
+ print('ERROR - Fell through callBack')
+
+
+def rpm_erase(erase_pkgspecs, erase_flags):
+ """
+ pkgspecs is a list of pkgspec dicts specifying packages
+ e.g.:
+ For a single package
+ { name='foo', epoch='20', version='1', release='1', arch='x86_64'}
+
+ """
+ erase_ts_flags = 0
+ if 'noscripts' in erase_flags:
+ erase_ts_flags |= rpm.RPMTRANS_FLAG_NOSCRIPTS
+ if 'notriggers' in erase_flags:
+ erase_ts_flags |= rpm.RPMTRANS_FLAG_NOTRIGGERS
+ if 'repackage' in erase_flags:
+ erase_ts_flags |= rpm.RPMTRANS_FLAG_REPACKAGE
+
+ erase_ts = rpmtransactionset()
+ erase_ts.setFlags(erase_ts_flags)
+
+ for pkgspec in erase_pkgspecs:
+ idx_list = getindexbykeyword(erase_ts, **pkgspec)
+ if len(idx_list) > 1 and not 'allmatches' in erase_flags:
+ #pass
+ print('ERROR - Multiple package match for erase', pkgspec)
+ else:
+ for idx in idx_list:
+ erase_ts.addErase(idx)
+
+ #for te in erase_ts:
+
+ erase_problems = []
+ if 'nodeps' not in erase_flags:
+ erase_problems = erase_ts.check()
+
+ if erase_problems == []:
+ erase_ts.order()
+ erase_callback = Rpmtscallback()
+ erase_ts.run(erase_callback.callback, 'Erase')
+ #else:
+
+ erase_ts.closeDB()
+ del erase_ts
+ return erase_problems
+
+def display_verify_file(file_results):
+ '''
+ Display file results similar to rpm --verify.
+ '''
+ filename = file_results[-1]
+ filetype = file_results[-2]
+
+ result_string = ''
+
+ if 'RPMVERIFY_LSTATFAIL' in file_results:
+ result_string = 'missing '
+ else:
+ if 'RPMVERIFY_FILESIZE' in file_results:
+ result_string = result_string + 'S'
+ else:
+ result_string = result_string + '.'
+
+ if 'RPMVERIFY_MODE' in file_results:
+ result_string = result_string + 'M'
+ else:
+ result_string = result_string + '.'
+
+ if 'RPMVERIFY_MD5' in file_results:
+ if 'RPMVERIFY_READFAIL' in file_results:
+ result_string = result_string + '?'
+ else:
+ result_string = result_string + '5'
+ else:
+ result_string = result_string + '.'
+
+ if 'RPMVERIFY_RDEV' in file_results:
+ result_string = result_string + 'D'
+ else:
+ result_string = result_string + '.'
+
+ if 'RPMVERIFY_LINKTO' in file_results:
+ if 'RPMVERIFY_READLINKFAIL' in file_results:
+ result_string = result_string + '?'
+ else:
+ result_string = result_string + 'L'
+ else:
+ result_string = result_string + '.'
+
+ if 'RPMVERIFY_USER' in file_results:
+ result_string = result_string + 'U'
+ else:
+ result_string = result_string + '.'
+
+ if 'RPMVERIFY_GROUP' in file_results:
+ result_string = result_string + 'G'
+ else:
+ result_string = result_string + '.'
+
+ if 'RPMVERIFY_MTIME' in file_results:
+ result_string = result_string + 'T'
+ else:
+ result_string = result_string + '.'
+
+ print(result_string + ' ' + filetype + ' ' + filename)
+ sys.stdout.flush()
+
+#===============================================================================
+# Some options and output to assist with development and testing.
+# These are not intended for normal use.
+if __name__ == "__main__":
+
+ p = optparse.OptionParser()
+
+ p.add_option('--name', action='store', \
+ default=None, \
+ help='''Package name to verify.
+
+ ******************************************
+ NOT SPECIFYING A NAME MEANS 'ALL' PACKAGES.
+ ******************************************
+
+ The specified operation will be carried out on all
+ instances of packages that match the package specification
+ (name, epoch, version, release, arch).''')
+
+ p.add_option('--epoch', action='store', \
+ default=None, \
+ help='''Package epoch.''')
+
+ p.add_option('--version', action='store', \
+ default=None, \
+ help='''Package version.''')
+
+ p.add_option('--release', action='store', \
+ default=None, \
+ help='''Package release.''')
+
+ p.add_option('--arch', action='store', \
+ default=None, \
+ help='''Package arch.''')
+
+ p.add_option('--erase', '-e', action='store_true', \
+ default=None, \
+ help='''****************************************************
+ REMOVE PACKAGES. THERE ARE NO WARNINGS. MULTIPLE
+ PACKAGES WILL BE REMOVED IF A FULL PACKAGE SPEC IS NOT
+ GIVEN. E.G. IF JUST A NAME IS GIVEN ALL INSTALLED
+ INSTANCES OF THAT PACKAGE WILL BE REMOVED PROVIDED
+ DEPENDENCY CHECKS PASS. IF JUST AN EPOCH IS GIVEN
+ ALL PACKAGE INSTANCES WITH THAT EPOCH WILL BE REMOVED.
+ ****************************************************''')
+
+ p.add_option('--list', '-l', action='store_true', \
+ help='''List package identity info. rpm -qa ish equivalent
+ intended for use in RefreshPackages().''')
+
+ p.add_option('--verify', action='store_true', \
+ help='''Verify Package(s). Output is only produced after all
+ packages has been verified. Be patient.''')
+
+ p.add_option('--verbose', '-v', action='store_true', \
+ help='''Verbose output for --verify option. Output is the
+ same as rpm -v --verify.''')
+
+ p.add_option('--nodeps', action='store_true', \
+ default=False, \
+ help='Do not do dependency testing.')
+
+ p.add_option('--nodigest', action='store_true', \
+ help='Do not check package digests.')
+
+ p.add_option('--nofiles', action='store_true', \
+ help='Do not do file checks.')
+
+ p.add_option('--noscripts', action='store_true', \
+ help='Do not run verification scripts.')
+
+ p.add_option('--nosignature', action='store_true', \
+ help='Do not do package signature verification.')
+
+ p.add_option('--nolinkto', action='store_true', \
+ help='Do not do symlink tests.')
+
+ p.add_option('--nomd5', action='store_true', \
+ help='''Do not do MD5 checksums on files. Note that this does
+ not work for prelink files yet.''')
+
+ p.add_option('--nosize', action='store_true', \
+ help='''Do not do file size tests. Note that this does not work
+ for prelink files yet.''')
+
+ p.add_option('--nouser', action='store_true', \
+ help='Do not check file user ownership.')
+
+ p.add_option('--nogroup', action='store_true', \
+ help='Do not check file group ownership.')
+
+ p.add_option('--nomtime', action='store_true', \
+ help='Do not check file modification times.')
+
+ p.add_option('--nomode', action='store_true', \
+ help='Do not check file modes (permissions).')
+
+ p.add_option('--nordev', action='store_true', \
+ help='Do not check device node.')
+
+ p.add_option('--notriggers', action='store_true', \
+ help='Do not do not generate triggers on erase.')
+
+ p.add_option('--repackage', action='store_true', \
+ help='''Do repackage on erase.i Packages are put
+ in /var/spool/repackage.''')
+
+ p.add_option('--allmatches', action='store_true', \
+ help='''Remove all package instances that match the
+ pkgspec.
+
+ ***************************************************
+ NO WARNINGS ARE GIVEN. IF THERE IS NO PACKAGE SPEC
+ THAT MEANS ALL PACKAGES!!!!
+ ***************************************************''')
+
+ options, arguments = p.parse_args()
+
+ pkgspec = {}
+ rpm_options = []
+
+ if options.nodeps:
+ rpm_options.append('nodeps')
+
+ if options.nodigest:
+ rpm_options.append('nodigest')
+
+ if options.nofiles:
+ rpm_options.append('nofiles')
+
+ if options.noscripts:
+ rpm_options.append('noscripts')
+
+ if options.nosignature:
+ rpm_options.append('nosignature')
+
+ if options.nolinkto:
+ rpm_options.append('nolinkto')
+
+ if options.nomd5:
+ rpm_options.append('nomd5')
+
+ if options.nosize:
+ rpm_options.append('nosize')
+
+ if options.nouser:
+ rpm_options.append('nouser')
+
+ if options.nogroup:
+ rpm_options.append('nogroup')
+
+ if options.nomtime:
+ rpm_options.append('nomtime')
+
+ if options.nomode:
+ rpm_options.append('nomode')
+
+ if options.nordev:
+ rpm_options.append('nordev')
+
+ if options.repackage:
+ rpm_options.append('repackage')
+
+ if options.allmatches:
+ rpm_options.append('allmatches')
+
+ main_ts = rpmtransactionset()
+
+ cmdline_pkgspec = {}
+ if options.name != 'all':
+ if options.name:
+ cmdline_pkgspec['name'] = str(options.name)
+ if options.epoch:
+ cmdline_pkgspec['epoch'] = str(options.epoch)
+ if options.version:
+ cmdline_pkgspec['version'] = str(options.version)
+ if options.release:
+ cmdline_pkgspec['release'] = str(options.release)
+ if options.arch:
+ cmdline_pkgspec['arch'] = str(options.arch)
+
+ if options.verify:
+ results = rpm_verify(main_ts, cmdline_pkgspec, rpm_options)
+ for r in results:
+ files = r.get('files', '')
+ for f in files:
+ display_verify_file(f)
+
+ elif options.list:
+ for p in rpmpackagelist(main_ts):
+ print(p)
+
+ elif options.erase:
+ if options.name:
+ rpm_erase([cmdline_pkgspec], rpm_options)
+ else:
+ print('You must specify the "--name" option')
+
class RPM(Bcfg2.Client.Tools.PkgTool):
"""Support for RPM packages."""
@@ -26,13 +1094,11 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
__new_gpg_ireq__ = {'Package': ['name'],
'Instance': ['version', 'release']}
- conflicts = ['RPMng']
-
pkgtype = 'rpm'
pkgtool = ("rpm --oldpackage --replacepkgs --quiet -U %s", ("%s", ["url"]))
- def __init__(self, logger, setup, config):
- Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config)
+ def __init__(self, config):
+ Bcfg2.Client.Tools.PkgTool.__init__(self, config)
# create a global ignore list used when ignoring particular
# files during package verification
@@ -104,11 +1170,11 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
'arch':'x86_64'} ]
"""
self.installed = {}
- refresh_ts = rpmtools.rpmtransactionset()
+ refresh_ts = rpmtransactionset()
# Don't bother with signature checks at this stage. The GPG keys might
# not be installed.
refresh_ts.setVSFlags(rpm._RPMVSF_NODIGESTS|rpm._RPMVSF_NOSIGNATURES)
- for nevra in rpmtools.rpmpackagelist(refresh_ts):
+ for nevra in rpmpackagelist(refresh_ts):
self.installed.setdefault(nevra['name'], []).append(nevra)
if self.setup['debug']:
print("The following package instances are installed:")
@@ -215,7 +1281,7 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
self.logger.debug(' Disabling signature check.')
if self.setup.get('quick', False):
- if rpmtools.prelink_exists:
+ if prelink_exists:
flags += ['nomd5', 'nosize']
else:
flags += ['nomd5']
@@ -224,9 +1290,9 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
if inst.get('verify', 'true') == 'false':
self.instance_status[inst]['verify'] = None
else:
- vp_ts = rpmtools.rpmtransactionset()
+ vp_ts = rpmtransactionset()
self.instance_status[inst]['verify'] = \
- rpmtools.rpm_verify( vp_ts, pkg, flags)
+ rpm_verify( vp_ts, pkg, flags)
vp_ts.closeDB()
del vp_ts
@@ -274,7 +1340,7 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
self.logger.info(' Disabling signature check.')
if self.setup.get('quick', False):
- if rpmtools.prelink_exists:
+ if prelink_exists:
flags += ['nomd5', 'nosize']
else:
flags += ['nomd5']
@@ -283,9 +1349,9 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
if inst.get('verify', 'true') == 'false':
self.instance_status[inst]['verify'] = None
else:
- vp_ts = rpmtools.rpmtransactionset()
+ vp_ts = rpmtransactionset()
self.instance_status[inst]['verify'] = \
- rpmtools.rpm_verify( vp_ts, pkg, flags )
+ rpm_verify( vp_ts, pkg, flags )
vp_ts.closeDB()
del vp_ts
@@ -436,7 +1502,7 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
self.logger.info(" This package will be deleted in a future version of the RPM driver.")
#pkgspec_list.append(pkg_spec)
- erase_results = rpmtools.rpm_erase(pkgspec_list, self.erase_flags)
+ erase_results = rpm_erase(pkgspec_list, self.erase_flags)
if erase_results == []:
self.modified += packages
for pkg in pkgspec_list:
@@ -464,7 +1530,7 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
% (pkgspec.get('name'), self.str_evra(pkgspec)))
self.logger.info(" This package will be deleted in a future version of the RPM driver.")
continue # Don't delete the gpg-pubkey packages for now.
- erase_results = rpmtools.rpm_erase([pkgspec], self.erase_flags)
+ erase_results = rpm_erase([pkgspec], self.erase_flags)
if erase_results == []:
pkg_modified = True
self.logger.info("Deleted %s %s" % \
@@ -538,7 +1604,7 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
return fix
- def Install(self, packages, states):
+ def Install(self, packages):
"""
Try and fix everything that RPM.VerifyPackages() found wrong for
each Package Entry. This can result in individual RPMs being
@@ -559,6 +1625,7 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
"""
self.logger.info('Runing RPM.Install()')
+ states = dict()
install_only_pkgs = []
gpg_keys = []
upgrade_pkgs = []
@@ -683,8 +1750,8 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
states[pkg_entry] = self.VerifyPackage(pkg_entry, \
self.modlists.get(pkg_entry, []))
- for entry in [ent for ent in packages if states[ent]]:
- self.modified.append(entry)
+ self.modified.extend(ent for ent in packages if states[ent])
+ return states
def canInstall(self, entry):
"""Test if entry has enough information to be installed."""
@@ -966,9 +2033,9 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
(big-endian) of the key ID which is good enough for our purposes.
"""
- init_ts = rpmtools.rpmtransactionset()
+ init_ts = rpmtransactionset()
init_ts.setVSFlags(rpm._RPMVSF_NODIGESTS|rpm._RPMVSF_NOSIGNATURES)
- gpg_hdrs = rpmtools.getheadersbykeyword(init_ts, **{'name':'gpg-pubkey'})
+ gpg_hdrs = getheadersbykeyword(init_ts, **{'name':'gpg-pubkey'})
keyids = [ header[rpm.RPMTAG_VERSION] for header in gpg_hdrs]
keyids.append('None')
init_ts.closeDB()
diff --git a/src/lib/Bcfg2/Client/Tools/RPMng.py b/src/lib/Bcfg2/Client/Tools/RPMng.py
deleted file mode 100644
index 0f0e4c700..000000000
--- a/src/lib/Bcfg2/Client/Tools/RPMng.py
+++ /dev/null
@@ -1,9 +0,0 @@
-""" RPM driver called 'RPMng' for backwards compat """
-
-from Bcfg2.Client.Tools.RPM import RPM
-
-
-class RPMng(RPM):
- """ RPM driver called 'RPMng' for backwards compat """
- deprecated = True
- name = "RPM"
diff --git a/src/lib/Bcfg2/Client/Tools/SELinux.py b/src/lib/Bcfg2/Client/Tools/SELinux.py
index 0b4aba60d..92572ef1d 100644
--- a/src/lib/Bcfg2/Client/Tools/SELinux.py
+++ b/src/lib/Bcfg2/Client/Tools/SELinux.py
@@ -7,6 +7,7 @@ import copy
import glob
import struct
import socket
+import logging
import selinux
import seobject
import Bcfg2.Client.XML
@@ -77,14 +78,13 @@ class SELinux(Bcfg2.Client.Tools.Tool):
SEPort=['name', 'selinuxtype'],
SEUser=['name', 'roles', 'prefix'])
- def __init__(self, logger, setup, config):
- Bcfg2.Client.Tools.Tool.__init__(self, logger, setup, config)
+ def __init__(self, config):
+ Bcfg2.Client.Tools.Tool.__init__(self, config)
self.handlers = {}
for handler in self.__handles__:
etype = handler[0]
self.handlers[etype] = \
- globals()["SELinux%sHandler" % etype.title()](self, logger,
- setup, config)
+ globals()["SELinux%sHandler" % etype.title()](self, config)
self.txn = False
self.post_txn_queue = []
@@ -100,10 +100,6 @@ class SELinux(Bcfg2.Client.Tools.Tool):
# http://docs.python.org/2/reference/datamodel.html#object.__getattr__
# for details
- def BundleUpdated(self, _, states):
- for handler in self.handlers.values():
- handler.BundleUpdated(states)
-
def FindExtra(self):
extra = []
for handler in self.handlers.values():
@@ -119,7 +115,7 @@ class SELinux(Bcfg2.Client.Tools.Tool):
in the specification """
return self.handlers[entry.tag].primarykey(entry)
- def Install(self, entries, states):
+ def Install(self, entries):
# start a transaction
semanage = seobject.semanageRecords("")
if hasattr(semanage, "start"):
@@ -129,13 +125,14 @@ class SELinux(Bcfg2.Client.Tools.Tool):
else:
self.logger.debug("SELinux transactions not supported; this may "
"slow things down considerably")
- Bcfg2.Client.Tools.Tool.Install(self, entries, states)
+ states = Bcfg2.Client.Tools.Tool.Install(self, entries)
if hasattr(semanage, "finish"):
self.logger.debug("Committing SELinux transaction")
semanage.finish()
self.txn = False
for func, arg, kwargs in self.post_txn_queue:
states[arg] = func(*arg, **kwargs)
+ return states
def GenericSEInstall(self, entry):
"""Dispatch install to the proper method according to entry tag"""
@@ -174,10 +171,10 @@ class SELinuxEntryHandler(object):
custom_re = re.compile(r' (?P<name>\S+)$')
custom_format = None
- def __init__(self, tool, logger, setup, config):
+ def __init__(self, tool, config):
self.tool = tool
- self.logger = logger
- self.setup = setup
+ self.logger = logging.getLogger(self.__class__.__name__)
+ self.setup = tool.setup
self.config = config
self._records = None
self._all = None
@@ -379,11 +376,6 @@ class SELinuxEntryHandler(object):
for key in records.keys()
if key not in specified]
- def BundleUpdated(self, states):
- """ perform any additional magic tasks that need to be run
- when a bundle is updated """
- pass
-
class SELinuxSebooleanHandler(SELinuxEntryHandler):
""" handle SELinux boolean entries """
@@ -631,8 +623,8 @@ class SELinuxSeuserHandler(SELinuxEntryHandler):
etype = "user"
value_format = ("prefix", None, None, "roles")
- def __init__(self, tool, logger, setup, config):
- SELinuxEntryHandler.__init__(self, tool, logger, setup, config)
+ def __init__(self, tool, config):
+ SELinuxEntryHandler.__init__(self, tool, config)
self.needs_prefix = False
@property
@@ -725,9 +717,9 @@ class SELinuxSemoduleHandler(SELinuxEntryHandler):
etype = "module"
value_format = (None, "disabled")
- def __init__(self, tool, logger, setup, config):
- SELinuxEntryHandler.__init__(self, tool, logger, setup, config)
- self.filetool = POSIXFile(logger, setup, config)
+ def __init__(self, tool, config):
+ SELinuxEntryHandler.__init__(self, tool, config)
+ self.filetool = POSIXFile(config)
try:
self.setype = selinux.selinux_getpolicytype()[1]
except IndexError:
diff --git a/src/lib/Bcfg2/Client/Tools/SYSV.py b/src/lib/Bcfg2/Client/Tools/SYSV.py
index aca7d593c..7be7b6fa3 100644
--- a/src/lib/Bcfg2/Client/Tools/SYSV.py
+++ b/src/lib/Bcfg2/Client/Tools/SYSV.py
@@ -32,8 +32,8 @@ class SYSV(Bcfg2.Client.Tools.PkgTool):
pkgtype = 'sysv'
pkgtool = ("/usr/sbin/pkgadd %s -n -d %%s", (('%s %s', ['url', 'name'])))
- def __init__(self, logger, setup, config):
- Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config)
+ def __init__(self, config):
+ Bcfg2.Client.Tools.PkgTool.__init__(self, config)
# noaskfile needs to live beyond __init__ otherwise file is removed
self.noaskfile = tempfile.NamedTemporaryFile()
self.noaskname = self.noaskfile.name
diff --git a/src/lib/Bcfg2/Client/Tools/YUM.py b/src/lib/Bcfg2/Client/Tools/YUM.py
index c30c0a13a..147615f47 100644
--- a/src/lib/Bcfg2/Client/Tools/YUM.py
+++ b/src/lib/Bcfg2/Client/Tools/YUM.py
@@ -3,6 +3,7 @@
import copy
import os.path
import sys
+import logging
import yum
import yum.packages
import yum.rpmtrans
@@ -12,6 +13,7 @@ import yum.misc
import rpmUtils.arch
import Bcfg2.Client.XML
import Bcfg2.Client.Tools
+import Bcfg2.Options
def build_yname(pkgname, inst):
@@ -65,13 +67,13 @@ class RPMDisplay(yum.rpmtrans.RPMBaseCallback):
"""We subclass the default RPM transaction callback so that we
can control Yum's verbosity and pipe it through the right logger."""
- def __init__(self, logger):
+ def __init__(self):
yum.rpmtrans.RPMBaseCallback.__init__(self)
# we want to log events to *both* the Bcfg2 logger (which goes
# to stderr or syslog or wherever the user wants it to go)
# *and* the yum file logger, which will go to yum.log (ticket
# #1103)
- self.bcfg2_logger = logger
+ self.bcfg2_logger = logging.getLogger(self.__class__.__name__)
self.state = None
self.package = None
@@ -110,9 +112,9 @@ class YumDisplay(yum.callbacks.ProcessTransBaseCallback):
"""Class to handle display of what step we are in the Yum transaction
such as downloading packages, etc."""
- def __init__(self, logger):
+ def __init__(self):
yum.callbacks.ProcessTransBaseCallback.__init__(self)
- self.logger = logger
+ self.logger = logging.getLogger(self.__class__.__name__)
class YUM(Bcfg2.Client.Tools.PkgTool):
@@ -126,11 +128,11 @@ class YUM(Bcfg2.Client.Tools.PkgTool):
__req__ = {'Package': ['type'],
'Path': ['type']}
- conflicts = ['YUM24', 'RPM', 'RPMng', 'YUMng']
+ conflicts = ['RPM']
- def __init__(self, logger, setup, config):
- self.yumbase = self._loadYumBase(setup=setup, logger=logger)
- Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config)
+ def __init__(self, config):
+ self.yumbase = self._loadYumBase()
+ Bcfg2.Client.Tools.PkgTool.__init__(self, config)
self.ignores = []
for struct in config:
self.ignores.extend([entry.get('name')
@@ -192,22 +194,23 @@ class YUM(Bcfg2.Client.Tools.PkgTool):
self.logger.debug("Yum: installonlypkgs: %s" % self.installonlypkgs)
self.logger.debug("Yum: verify_flags: %s" % self.verify_flags)
- def _loadYumBase(self, setup=None, logger=None):
+ def _loadYumBase(self):
''' this may be called before PkgTool.__init__() is called on
this object (when the YUM object is first instantiated;
PkgTool.__init__() calls RefreshPackages(), which requires a
YumBase object already exist), or after __init__() has
completed, when we reload the yum config before installing
- packages. Consequently, we support both methods by allowing
- setup and logger, the only object properties we use in this
- function, to be passed as keyword arguments or to be omitted
- and drawn from the object itself.'''
+ packages. '''
rv = yum.YumBase() # pylint: disable=C0103
- if setup is None:
+ if hasattr(self, "setup"):
setup = self.setup
- if logger is None:
+ else:
+ setup = Bcfg2.Options.get_option_parser()
+ if hasattr(self, "logger"):
logger = self.logger
+ else:
+ logger = logging.getLogger(self.name)
if setup['debug']:
debuglevel = 3
@@ -816,8 +819,8 @@ class YUM(Bcfg2.Client.Tools.PkgTool):
self.yumbase.closeRpmDB()
self.RefreshPackages()
- rpm_display = RPMDisplay(self.logger)
- yum_display = YumDisplay(self.logger)
+ rpm_display = RPMDisplay()
+ yum_display = YumDisplay()
# Run the Yum Transaction
try:
rescode, restring = self.yumbase.buildTransaction()
@@ -866,7 +869,7 @@ class YUM(Bcfg2.Client.Tools.PkgTool):
cleanup()
- def Install(self, packages, states): # pylint: disable=R0912,R0914
+ def Install(self, packages): # pylint: disable=R0912,R0914
""" Try and fix everything that Yum.VerifyPackages() found
wrong for each Package Entry. This can result in individual
RPMs being installed (for the first time), deleted, downgraded
@@ -884,6 +887,7 @@ class YUM(Bcfg2.Client.Tools.PkgTool):
entry is set to True. """
self.logger.debug('Running Yum.Install()')
+ states = dict()
install_pkgs = []
gpg_keys = []
upgrade_pkgs = []
@@ -1014,8 +1018,8 @@ class YUM(Bcfg2.Client.Tools.PkgTool):
self.VerifyPackage(pkg_entry,
self.modlists.get(pkg_entry, []))
- for entry in [ent for ent in packages if states[ent]]:
- self.modified.append(entry)
+ self.modified.extend(ent for ent in packages if states[ent])
+ return states
def Remove(self, packages):
"""
diff --git a/src/lib/Bcfg2/Client/Tools/YUM24.py b/src/lib/Bcfg2/Client/Tools/YUM24.py
deleted file mode 100644
index d78127ddd..000000000
--- a/src/lib/Bcfg2/Client/Tools/YUM24.py
+++ /dev/null
@@ -1,399 +0,0 @@
-"""This provides bcfg2 support for yum."""
-
-import copy
-import os.path
-import sys
-import yum
-import Bcfg2.Client.XML
-from Bcfg2.Client.Tools.RPM import RPM
-
-
-def build_yname(pkgname, inst):
- """Build yum appropriate package name."""
- ypname = pkgname
- if inst.get('version') != 'any':
- ypname += '-'
- if inst.get('epoch', False):
- ypname += "%s:" % inst.get('epoch')
- if inst.get('version', False) and inst.get('version') != 'any':
- ypname += "%s" % (inst.get('version'))
- if inst.get('release', False) and inst.get('release') != 'any':
- ypname += "-%s" % (inst.get('release'))
- if inst.get('arch', False) and inst.get('arch') != 'any':
- ypname += ".%s" % (inst.get('arch'))
- return ypname
-
-
-class YUM24(RPM):
- """Support for Yum packages."""
- pkgtype = 'yum'
- deprecated = True
- __execs__ = ['/usr/bin/yum', '/var/lib/rpm']
- __handles__ = [('Package', 'yum'),
- ('Package', 'rpm'),
- ('Path', 'ignore')]
-
- __req__ = {'Package': ['name', 'version']}
- __ireq__ = {'Package': ['name']}
- #__ireq__ = {'Package': ['name', 'version']}
-
- __new_req__ = {'Package': ['name'],
- 'Instance': ['version', 'release', 'arch']}
- __new_ireq__ = {'Package': ['name'], \
- 'Instance': []}
- #__new_ireq__ = {'Package': ['name', 'uri'], \
- # 'Instance': ['simplefile', 'version', 'release', 'arch']}
-
- __gpg_req__ = {'Package': ['name', 'version']}
- __gpg_ireq__ = {'Package': ['name', 'version']}
-
- __new_gpg_req__ = {'Package': ['name'],
- 'Instance': ['version', 'release']}
- __new_gpg_ireq__ = {'Package': ['name'],
- 'Instance': ['version', 'release']}
-
- def __init__(self, logger, setup, config):
- RPM.__init__(self, logger, setup, config)
- self.__important__ = self.__important__ + \
- [entry.get('name') for struct in config \
- for entry in struct \
- if entry.tag in ['Path', 'ConfigFile'] and \
- (entry.get('name').startswith('/etc/yum.d') \
- or entry.get('name').startswith('/etc/yum.repos.d')) \
- or entry.get('name') == '/etc/yum.conf']
- self.autodep = setup.get("yum24_autodep")
- self.yum_avail = dict()
- self.yum_installed = dict()
- self.yb = yum.YumBase()
- self.yb.doConfigSetup()
- self.yb.doTsSetup()
- self.yb.doRpmDBSetup()
- yup = self.yb.doPackageLists(pkgnarrow='updates')
- if hasattr(self.yb.rpmdb, 'pkglist'):
- yinst = self.yb.rpmdb.pkglist
- else:
- yinst = self.yb.rpmdb.getPkgList()
- for dest, source in [(self.yum_avail, yup.updates),
- (self.yum_installed, yinst)]:
- for pkg in source:
- if dest is self.yum_avail:
- pname = pkg.name
- data = {pkg.arch: (pkg.epoch, pkg.version, pkg.release)}
- else:
- pname = pkg[0]
- if pkg[1] is None:
- a = 'noarch'
- else:
- a = pkg[1]
- if pkg[2] is None:
- e = '0'
- else:
- e = pkg[2]
- data = {a: (e, pkg[3], pkg[4])}
- if pname in dest:
- dest[pname].update(data)
- else:
- dest[pname] = dict(data)
-
- def VerifyPackage(self, entry, modlist):
- pinned_version = None
- if entry.get('version', False) == 'auto':
- # old style entry; synthesize Instances from current installed
- if entry.get('name') not in self.yum_installed and \
- entry.get('name') not in self.yum_avail:
- # new entry; fall back to default
- entry.set('version', 'any')
- else:
- data = copy.copy(self.yum_installed[entry.get('name')])
- if entry.get('name') in self.yum_avail:
- # installed but out of date
- data.update(self.yum_avail[entry.get('name')])
- for (arch, (epoch, vers, rel)) in list(data.items()):
- x = Bcfg2.Client.XML.SubElement(entry, "Instance",
- name=entry.get('name'),
- version=vers, arch=arch,
- release=rel, epoch=epoch)
- if 'verify_flags' in entry.attrib:
- x.set('verify_flags', entry.get('verify_flags'))
- if 'verify' in entry.attrib:
- x.set('verify', entry.get('verify'))
-
- if entry.get('type', False) == 'yum':
- # Check for virtual provides or packages. If we don't have
- # this package use Yum to resolve it to a real package name
- knownPkgs = list(self.yum_installed.keys()) + list(self.yum_avail.keys())
- if entry.get('name') not in knownPkgs:
- # If the package name matches something installed
- # or available the that's the correct package.
- try:
- pkgDict = dict([(i.name, i) for i in \
- self.yb.returnPackagesByDep(entry.get('name'))])
- except yum.Errors.YumBaseError:
- e = sys.exc_info()[1]
- self.logger.error('Yum Error Depsolving for %s: %s' % \
- (entry.get('name'), str(e)))
- pkgDict = {}
-
- if len(pkgDict) > 1:
- # What do we do with multiple packages?
- s = "YUM24: returnPackagesByDep(%s) returned many packages"
- self.logger.info(s % entry.get('name'))
- s = "YUM24: matching packages: %s"
- self.logger.info(s % str(list(pkgDict.keys())))
- pkgs = set(pkgDict.keys()) & set(self.yum_installed.keys())
- if len(pkgs) > 0:
- # Virtual packages matches an installed real package
- pkg = pkgDict[pkgs.pop()]
- s = "YUM24: chosing: %s" % pkg.name
- self.logger.info(s)
- else:
- # What's the right package? This will fail verify
- # and Yum should Do The Right Thing on package install
- pkg = None
- elif len(pkgDict) == 1:
- pkg = list(pkgDict.values())[0]
- else: # len(pkgDict) == 0
- s = "YUM24: returnPackagesByDep(%s) returned no results"
- self.logger.info(s % entry.get('name'))
- pkg = None
-
- if pkg is not None:
- s = "YUM24: remapping virtual package %s to %s"
- self.logger.info(s % (entry.get('name'), pkg.name))
- entry.set('name', pkg.name)
-
- return RPM.VerifyPackage(self, entry, modlist)
-
- def Install(self, packages, states):
- """
- Try and fix everything that YUM24.VerifyPackages() found wrong for
- each Package Entry. This can result in individual RPMs being
- installed (for the first time), deleted, downgraded
- or upgraded.
-
- NOTE: YUM can not reinstall a package that it thinks is already
- installed.
-
- packages is a list of Package Elements that has
- states[<Package Element>] == False
-
- The following effects occur:
- - states{} is conditionally updated for each package.
- - self.installed{} is rebuilt, possibly multiple times.
- - self.instance_status{} is conditionally updated for each instance
- of a package.
- - Each package will be added to self.modified[] if its states{}
- entry is set to True.
-
- """
- self.logger.info('Running YUM24.Install()')
-
- install_pkgs = []
- gpg_keys = []
- upgrade_pkgs = []
-
- # Remove extra instances.
- # Can not reverify because we don't have a package entry.
- if len(self.extra_instances) > 0:
- if (self.setup.get('remove') == 'all' or \
- self.setup.get('remove') == 'packages'):
- self.Remove(self.extra_instances)
- else:
- self.logger.info("The following extra package instances will be removed by the '-r' option:")
- for pkg in self.extra_instances:
- for inst in pkg:
- self.logger.info(" %s %s" % \
- ((pkg.get('name'), self.str_evra(inst))))
-
- # Figure out which instances of the packages actually need something
- # doing to them and place in the appropriate work 'queue'.
- for pkg in packages:
- insts = [pinst for pinst in pkg \
- if pinst.tag in ['Instance', 'Package']]
- if insts:
- for inst in insts:
- if self.FixInstance(inst, self.instance_status[inst]):
- if self.instance_status[inst].get('installed', False) \
- == False:
- if pkg.get('name') == 'gpg-pubkey':
- gpg_keys.append(inst)
- else:
- install_pkgs.append(inst)
- elif self.instance_status[inst].get('version_fail', \
- False) == True:
- upgrade_pkgs.append(inst)
- else:
- install_pkgs.append(pkg)
-
- # Install GPG keys.
- # Alternatively specify the required keys using 'gpgkey' in the
- # repository definition in yum.conf. YUM will install the keys
- # automatically.
- if len(gpg_keys) > 0:
- for inst in gpg_keys:
- self.logger.info("Installing GPG keys.")
- if inst.get('simplefile') is None:
- self.logger.error("GPG key has no simplefile attribute")
- continue
- key_arg = os.path.join(self.instance_status[inst].get('pkg').get('uri'), \
- inst.get('simplefile'))
- if self.cmd.run("rpm --import %s" % key_arg).success:
- self.logger.debug("Unable to install %s-%s" % \
- (self.instance_status[inst].get('pkg').get('name'), \
- self.str_evra(inst)))
- else:
- self.logger.debug("Installed %s-%s-%s" % \
- (self.instance_status[inst].get('pkg').get('name'), \
- inst.get('version'), inst.get('release')))
- self.RefreshPackages()
- self.gpg_keyids = self.getinstalledgpg()
- pkg = self.instance_status[gpg_keys[0]].get('pkg')
- states[pkg] = self.VerifyPackage(pkg, [])
-
- # Install packages.
- if len(install_pkgs) > 0:
- self.logger.info("Attempting to install packages")
-
- if self.autodep:
- pkgtool = "/usr/bin/yum -d0 -y install %s"
- else:
- pkgtool = "/usr/bin/yum -d0 install %s"
-
- install_args = []
- for inst in install_pkgs:
- pkg_arg = self.instance_status[inst].get('pkg').get('name')
- install_args.append(build_yname(pkg_arg, inst))
-
- if self.cmd.run(pkgtool % " ".join(install_args)).success:
- # The yum command succeeded. All packages installed.
- self.logger.info("Single Pass for Install Succeeded")
- self.RefreshPackages()
- else:
- # The yum command failed. No packages installed.
- # Try installing instances individually.
- self.logger.error("Single Pass Install of Packages Failed")
- installed_instances = []
- for inst in install_pkgs:
- pkg_arg = build_yname(self.instance_status[inst].get('pkg').get('name'), inst)
-
- if self.cmd.run(pkgtool % pkg_arg).success:
- installed_instances.append(inst)
- else:
- self.logger.debug("%s %s would not install." %
- (self.instance_status[inst].get('pkg').get('name'),
- self.str_evra(inst)))
- self.RefreshPackages()
-
- # Fix upgradeable packages.
- if len(upgrade_pkgs) > 0:
- self.logger.info("Attempting to upgrade packages")
-
- if self.autodep:
- pkgtool = "/usr/bin/yum -d0 -y update %s"
- else:
- pkgtool = "/usr/bin/yum -d0 update %s"
-
- upgrade_args = []
- for inst in upgrade_pkgs:
- pkg_arg = build_yname(self.instance_status[inst].get('pkg').get('name'), inst)
- upgrade_args.append(pkg_arg)
-
- if self.cmd.run(pkgtool % " ".join(upgrade_args)).success:
- # The yum command succeeded. All packages installed.
- self.logger.info("Single Pass for Install Succeeded")
- self.RefreshPackages()
- else:
- # The yum command failed. No packages installed.
- # Try installing instances individually.
- self.logger.error("Single Pass Install of Packages Failed")
- installed_instances = []
- for inst in upgrade_pkgs:
- pkg_arg = build_yname(self.instance_status[inst].get('pkg').get('name'), inst)
- if self.cmd.run(pkgtool % pkg_arg).success:
- installed_instances.append(inst)
- else:
- self.logger.debug("%s %s would not install." % \
- (self.instance_status[inst].get('pkg').get('name'), \
- self.str_evra(inst)))
-
- self.RefreshPackages()
-
- if not self.setup['kevlar']:
- for pkg_entry in [p for p in packages if self.canVerify(p)]:
- self.logger.debug("Reverifying Failed Package %s" % (pkg_entry.get('name')))
- states[pkg_entry] = self.VerifyPackage(pkg_entry, \
- self.modlists.get(pkg_entry, []))
-
- for entry in [ent for ent in packages if states[ent]]:
- self.modified.append(entry)
-
- def Remove(self, packages):
- """
- Remove specified entries.
-
- packages is a list of Package Entries with Instances generated
- by FindExtra().
- """
- self.logger.debug('Running YUM24.Remove()')
-
- if self.autodep:
- pkgtool = "/usr/bin/yum -d0 -y erase %s"
- else:
- pkgtool = "/usr/bin/yum -d0 erase %s"
-
- erase_args = []
- for pkg in packages:
- for inst in pkg:
- if pkg.get('name') != 'gpg-pubkey':
- pkg_arg = pkg.get('name') + '-'
- if inst.get('epoch', False):
- pkg_arg = pkg_arg + inst.get('epoch') + ':'
- pkg_arg = pkg_arg + inst.get('version') + '-' + inst.get('release')
- if inst.get('arch', False):
- pkg_arg = pkg_arg + '.' + inst.get('arch')
- erase_args.append(pkg_arg)
- else:
- pkgspec = {'name': pkg.get('name'),
- 'version': inst.get('version'),
- 'release': inst.get('release')}
- self.logger.info("WARNING: gpg-pubkey package not in configuration %s %s"\
- % (pkgspec.get('name'), self.str_evra(pkgspec)))
- self.logger.info(" This package will be deleted in a future version of the YUM24 driver.")
-
- rv = self.cmd.run(pkgtool % " ".join(erase_args))
- if rv.success:
- self.modified += packages
- for pkg in erase_args:
- self.logger.info("Deleted %s" % (pkg))
- else:
- self.logger.info("Bulk erase failed with errors:")
- self.logger.debug("Erase results: %s" % rv.error)
- self.logger.info("Attempting individual erase for each package.")
- for pkg in packages:
- pkg_modified = False
- for inst in pkg:
- if pkg.get('name') != 'gpg-pubkey':
- pkg_arg = pkg.get('name') + '-'
- if 'epoch' in inst.attrib:
- pkg_arg = pkg_arg + inst.get('epoch') + ':'
- pkg_arg = pkg_arg + inst.get('version') + '-' + inst.get('release')
- if 'arch' in inst.attrib:
- pkg_arg = pkg_arg + '.' + inst.get('arch')
- else:
- self.logger.info("WARNING: gpg-pubkey package not in configuration %s %s"\
- % (pkg.get('name'), self.str_evra(pkg)))
- self.logger.info(" This package will be deleted in a future version of the YUM24 driver.")
- continue
-
- rv = self.cmd.run(self.pkgtool % pkg_arg)
- if rv.success:
- pkg_modified = True
- self.logger.info("Deleted %s" % pkg_arg)
- else:
- self.logger.error("Unable to delete %s" % pkg_arg)
- self.logger.debug("Failure: %s" % rv.error)
- if pkg_modified == True:
- self.modified.append(pkg)
-
- self.RefreshPackages()
- self.extra = self.FindExtra()
diff --git a/src/lib/Bcfg2/Client/Tools/YUMng.py b/src/lib/Bcfg2/Client/Tools/YUMng.py
deleted file mode 100644
index 22fbba537..000000000
--- a/src/lib/Bcfg2/Client/Tools/YUMng.py
+++ /dev/null
@@ -1,9 +0,0 @@
-""" YUM driver called 'YUMng' for backwards compat """
-
-from Bcfg2.Client.Tools.YUM import YUM
-
-
-class YUMng(YUM):
- """ YUM driver called 'YUMng' for backwards compat """
- deprecated = True
- conflicts = ['YUM24', 'RPM', 'RPMng']
diff --git a/src/lib/Bcfg2/Client/Tools/__init__.py b/src/lib/Bcfg2/Client/Tools/__init__.py
index 11fe55bd6..885e22761 100644
--- a/src/lib/Bcfg2/Client/Tools/__init__.py
+++ b/src/lib/Bcfg2/Client/Tools/__init__.py
@@ -3,22 +3,15 @@
import os
import sys
import stat
+import logging
import Bcfg2.Client
import Bcfg2.Client.XML
from Bcfg2.Utils import Executor, ClassName
from Bcfg2.Compat import walk_packages # pylint: disable=W0622
+import Bcfg2.Options
__all__ = [m[1] for m in walk_packages(path=__path__)]
-# pylint: disable=C0103
-#: All available tools
-drivers = [item for item in __all__ if item not in ['rpmtools']]
-
-#: The default set of tools that will be used if "drivers" is not set
-#: in bcfg2.conf
-default = drivers[:]
-# pylint: enable=C0103
-
class ToolInstantiationError(Exception):
""" This error is raised if the toolset cannot be instantiated. """
@@ -78,23 +71,19 @@ class Tool(object):
#: runtime with a warning.
conflicts = []
- def __init__(self, logger, setup, config):
+ def __init__(self, config):
"""
- :param logger: Logger that will be used for logging by this tool
- :type logger: logging.Logger
- :param setup: The option set Bcfg2 was invoked with
- :type setup: Bcfg2.Options.OptionParser
:param config: The XML configuration for this client
:type config: lxml.etree._Element
:raises: :exc:`Bcfg2.Client.Tools.ToolInstantiationError`
"""
#: A :class:`Bcfg2.Options.OptionParser` object describing the
#: option set Bcfg2 was invoked with
- self.setup = setup
+ self.setup = Bcfg2.Options.get_option_parser()
#: A :class:`logging.Logger` object that will be used by this
#: tool for logging
- self.logger = logger
+ self.logger = logging.getLogger(self.name)
#: The XML configuration for this client
self.config = config
@@ -141,27 +130,27 @@ class Tool(object):
raise ToolInstantiationError("%s: %s not executable" %
(self.name, filename))
- def BundleUpdated(self, bundle, states): # pylint: disable=W0613
+ def BundleUpdated(self, bundle): # pylint: disable=W0613
""" Callback that is invoked when a bundle has been updated.
:param bundle: The bundle that has been updated
:type bundle: lxml.etree._Element
- :param states: The :attr:`Bcfg2.Client.Frame.Frame.states` dict
- :type states: dict
- :returns: None """
- return
+ :returns: dict - A dict of the state of entries suitable for
+ updating :attr:`Bcfg2.Client.Frame.Frame.states`
+ """
+ return dict()
- def BundleNotUpdated(self, bundle, states): # pylint: disable=W0613
+ def BundleNotUpdated(self, bundle): # pylint: disable=W0613
""" Callback that is invoked when a bundle has been updated.
:param bundle: The bundle that has been updated
:type bundle: lxml.etree._Element
- :param states: The :attr:`Bcfg2.Client.Frame.Frame.states` dict
- :type states: dict
- :returns: None """
- return
+ :returns: dict - A dict of the state of entries suitable for
+ updating :attr:`Bcfg2.Client.Frame.Frame.states`
+ """
+ return dict()
- def Inventory(self, states, structures=None):
+ def Inventory(self, structures=None):
""" Take an inventory of the system as it exists. This
involves two steps:
@@ -176,18 +165,19 @@ class Tool(object):
is the entry tag. E.g., a Path entry would be verified by
calling :func:`VerifyPath`.
- :param states: The :attr:`Bcfg2.Client.Frame.Frame.states` dict
- :type states: dict
:param structures: The list of structures (i.e., bundles) to
get entries from. If this is not given,
all children of
:attr:`Bcfg2.Client.Tools.Tool.config` will
be used.
:type structures: list of lxml.etree._Element
- :returns: None """
+ :returns: dict - A dict of the state of entries suitable for
+ updating :attr:`Bcfg2.Client.Frame.Frame.states`
+ """
if not structures:
structures = self.config.getchildren()
mods = self.buildModlist()
+ states = dict()
for struct in structures:
for entry in struct.getchildren():
if self.canVerify(entry):
@@ -205,8 +195,9 @@ class Tool(object):
self.primarykey(entry)),
exc_info=1)
self.extra = self.FindExtra()
+ return states
- def Install(self, entries, states):
+ def Install(self, entries):
""" Install entries. 'Install' in this sense means either
initially install, or update as necessary to match the
specification.
@@ -218,9 +209,10 @@ class Tool(object):
:param entries: The entries to install
:type entries: list of lxml.etree._Element
- :param states: The :attr:`Bcfg2.Client.Frame.Frame.states` dict
- :type states: dict
- :returns: None """
+ :returns: dict - A dict of the state of entries suitable for
+ updating :attr:`Bcfg2.Client.Frame.Frame.states`
+ """
+ states = dict()
for entry in entries:
try:
func = getattr(self, "Install%s" % entry.tag)
@@ -236,6 +228,7 @@ class Tool(object):
self.logger.error("%s: Unexpected failure installing %s" %
(self.name, self.primarykey(entry)),
exc_info=1)
+ return states
def Remove(self, entries):
""" Remove specified extra entries.
@@ -396,8 +389,8 @@ class PkgTool(Tool):
#: The ``type`` attribute of Packages handled by this tool.
pkgtype = 'echo'
- def __init__(self, logger, setup, config):
- Tool.__init__(self, logger, setup, config)
+ def __init__(self, config):
+ Tool.__init__(self, config)
#: A dict of installed packages; the keys should be package
#: names and the values should be simple strings giving the
@@ -434,32 +427,27 @@ class PkgTool(Tool):
for pkg in packages)
return self.pkgtool[0] % pkgargs
- def Install(self, packages, states):
+ def Install(self, packages):
""" Run a one-pass install where all required packages are
installed with a single command, followed by single package
installs in case of failure.
:param entries: The entries to install
:type entries: list of lxml.etree._Element
- :param states: The :attr:`Bcfg2.Client.Frame.Frame.states` dict
- :type states: dict
- :returns: None """
+ :returns: dict - A dict of the state of entries suitable for
+ updating :attr:`Bcfg2.Client.Frame.Frame.states`
+ """
self.logger.info("Trying single pass package install for pkgtype %s" %
self.pkgtype)
- pkgcmd = self._get_package_command(packages)
- self.logger.debug("Running command: %s" % pkgcmd)
- if self.cmd.run(pkgcmd):
+ states = dict()
+ if self.cmd.run(self._get_package_command(packages)):
self.logger.info("Single Pass Succeded")
# set all package states to true and flush workqueues
- pkgnames = [pkg.get('name') for pkg in packages]
- for entry in list(states.keys()):
- if (entry.tag == 'Package'
- and entry.get('type') == self.pkgtype
- and entry.get('name') in pkgnames):
- self.logger.debug('Setting state to true for pkg %s' %
- entry.get('name'))
- states[entry] = True
+ for entry in packages:
+ self.logger.debug('Setting state to true for %s' %
+ self.primarykey(entry))
+ states[entry] = True
self.RefreshPackages()
else:
self.logger.error("Single Pass Failed")
@@ -477,10 +465,13 @@ class PkgTool(Tool):
if self.cmd.run(self._get_package_command([pkg])):
states[pkg] = True
else:
+ states[pkg] = False
self.logger.error("Failed to install package %s" %
pkg.get('name'))
self.RefreshPackages()
- self.modified.extend(entry for entry in packages if states[entry])
+ self.modified.extend(entry for entry in packages
+ if entry in states and states[entry])
+ return states
def RefreshPackages(self):
""" Refresh the internal representation of the package
@@ -502,8 +493,8 @@ class PkgTool(Tool):
class SvcTool(Tool):
""" Base class for tools that handle Service entries """
- def __init__(self, logger, setup, config):
- Tool.__init__(self, logger, setup, config)
+ def __init__(self, config):
+ Tool.__init__(self, config)
#: List of services that have been restarted
self.restarted = []
__init__.__doc__ = Tool.__init__.__doc__
@@ -586,7 +577,7 @@ class SvcTool(Tool):
self.InstallService(entry)
Remove.__doc__ = Tool.Remove.__doc__
- def BundleUpdated(self, bundle, states):
+ def BundleUpdated(self, bundle):
if self.setup['servicemode'] == 'disabled':
return
@@ -616,9 +607,10 @@ class SvcTool(Tool):
if not success:
self.logger.error("Failed to manipulate service %s" %
(entry.get('name')))
+ return dict()
BundleUpdated.__doc__ = Tool.BundleUpdated.__doc__
- def Install(self, entries, states):
+ def Install(self, entries):
install_entries = []
for entry in entries:
if entry.get('install', 'true').lower() == 'false':
@@ -626,7 +618,7 @@ class SvcTool(Tool):
(entry.tag, entry.get('name')))
else:
install_entries.append(entry)
- return Tool.Install(self, install_entries, states)
+ return Tool.Install(self, install_entries)
Install.__doc__ = Tool.Install.__doc__
def InstallService(self, entry):
diff --git a/src/lib/Bcfg2/Client/Tools/launchd.py b/src/lib/Bcfg2/Client/Tools/launchd.py
index b0661b26b..a4aeab6c7 100644
--- a/src/lib/Bcfg2/Client/Tools/launchd.py
+++ b/src/lib/Bcfg2/Client/Tools/launchd.py
@@ -12,8 +12,8 @@ class launchd(Bcfg2.Client.Tools.Tool): # pylint: disable=C0103
__execs__ = ['/bin/launchctl', '/usr/bin/defaults']
__req__ = {'Service': ['name', 'status']}
- def __init__(self, logger, setup, config):
- Bcfg2.Client.Tools.Tool.__init__(self, logger, setup, config)
+ def __init__(self, config):
+ Bcfg2.Client.Tools.Tool.__init__(self, config)
# Locate plist file that provides given reverse-fqdn name:
#
@@ -117,9 +117,11 @@ class launchd(Bcfg2.Client.Tools.Tool): # pylint: disable=C0103
status='on')
for name in allsrv]
- def BundleUpdated(self, bundle, states):
+ def BundleUpdated(self, bundle):
"""Reload launchd plist."""
- for entry in [entry for entry in bundle if self.handlesEntry(entry)]:
+ for entry in bundle:
+ if not self.handlesEntry(entry):
+ continue
if not self.canInstall(entry):
self.logger.error("Insufficient information to restart "
"service %s" % entry.get('name'))
diff --git a/src/lib/Bcfg2/Client/Tools/rpmtools.py b/src/lib/Bcfg2/Client/Tools/rpmtools.py
deleted file mode 100755
index 32a04262d..000000000
--- a/src/lib/Bcfg2/Client/Tools/rpmtools.py
+++ /dev/null
@@ -1,1091 +0,0 @@
-#!/usr/bin/env python
-"""
- Module that uses rpm-python to implement the following rpm
- functionality for the bcfg2 RPM and YUM client drivers:
-
- rpm -qa
- rpm --verify
- rpm --erase
-
- The code closely follows the rpm C code.
-
- The code was written to be used in the bcfg2 RPM/YUM drivers.
-
- Some command line options have been provided to assist with
- testing and development, but the output isn't pretty and looks
- nothing like rpm output.
-
- Run 'rpmtools' -h for the options.
-
-"""
-
-import grp
-import optparse
-import os
-import pwd
-import rpm
-import stat
-import sys
-if sys.version_info >= (2, 5):
- import hashlib
- py24compat = False
-else:
- # FIXME: Remove when client python dep is 2.5 or greater
- py24compat = True
- import md5
-
-# Determine what prelink tools we have available.
-# The isprelink module is a python extension that examines the ELF headers
-# to see if the file has been prelinked. If it is not present a lot of files
-# are unnecessarily run through the prelink command.
-try:
- from isprelink import *
- isprelink_imported = True
-except ImportError:
- isprelink_imported = False
-
-# If the prelink command is installed on the system then we need to do
-# prelink -y on files.
-if os.access('/usr/sbin/prelink', os.X_OK):
- prelink_exists = True
-else:
- prelink_exists = False
-
-# If we don't have isprelink then we will use the prelink configuration file to
-# filter what we have to put through prelink -y.
-import re
-blacklist = []
-whitelist = []
-try:
- f = open('/etc/prelink.conf', mode='r')
- for line in f:
- if line.startswith('#'):
- continue
- option, pattern = line.split()
- if pattern.startswith('*.'):
- pattern = pattern.replace('*.', '\.')
- pattern += '$'
- elif pattern.startswith('/'):
- pattern = '^' + pattern
- if option == '-b':
- blacklist.append(pattern)
- elif option == '-l':
- whitelist.append(pattern)
- f.close()
-except IOError:
- pass
-
-blacklist_re = re.compile('|'.join(blacklist))
-whitelist_re = re.compile('|'.join(whitelist))
-
-# Flags that are not defined in rpm-python.
-# They are defined in lib/rpmcli.h
-# Bit(s) for verifyFile() attributes.
-#
-RPMVERIFY_NONE = 0 # /*!< */
-RPMVERIFY_MD5 = 1 # 1 << 0 # /*!< from %verify(md5) */
-RPMVERIFY_FILESIZE = 2 # 1 << 1 # /*!< from %verify(size) */
-RPMVERIFY_LINKTO = 4 # 1 << 2 # /*!< from %verify(link) */
-RPMVERIFY_USER = 8 # 1 << 3 # /*!< from %verify(user) */
-RPMVERIFY_GROUP = 16 # 1 << 4 # /*!< from %verify(group) */
-RPMVERIFY_MTIME = 32 # 1 << 5 # /*!< from %verify(mtime) */
-RPMVERIFY_MODE = 64 # 1 << 6 # /*!< from %verify(mode) */
-RPMVERIFY_RDEV = 128 # 1 << 7 # /*!< from %verify(rdev) */
-RPMVERIFY_CONTEXTS = 32768 # (1 << 15) # /*!< from --nocontexts */
-RPMVERIFY_READLINKFAIL = 268435456 # (1 << 28) # /*!< readlink failed */
-RPMVERIFY_READFAIL = 536870912 # (1 << 29) # /*!< file read failed */
-RPMVERIFY_LSTATFAIL = 1073741824 # (1 << 30) # /*!< lstat failed */
-RPMVERIFY_LGETFILECONFAIL = 2147483648 # (1 << 31) # /*!< lgetfilecon failed */
-
-RPMVERIFY_FAILURES = \
- (RPMVERIFY_LSTATFAIL|RPMVERIFY_READFAIL|RPMVERIFY_READLINKFAIL| \
- RPMVERIFY_LGETFILECONFAIL)
-
-# Bit(s) to control rpm_verify() operation.
-#
-VERIFY_DEFAULT = 0, # /*!< */
-VERIFY_MD5 = 1 << 0 # /*!< from --nomd5 */
-VERIFY_SIZE = 1 << 1 # /*!< from --nosize */
-VERIFY_LINKTO = 1 << 2 # /*!< from --nolinkto */
-VERIFY_USER = 1 << 3 # /*!< from --nouser */
-VERIFY_GROUP = 1 << 4 # /*!< from --nogroup */
-VERIFY_MTIME = 1 << 5 # /*!< from --nomtime */
-VERIFY_MODE = 1 << 6 # /*!< from --nomode */
-VERIFY_RDEV = 1 << 7 # /*!< from --nodev */
-# /* bits 8-14 unused, reserved for rpmVerifyAttrs */
-VERIFY_CONTEXTS = 1 << 15 # /*!< verify: from --nocontexts */
-VERIFY_FILES = 1 << 16 # /*!< verify: from --nofiles */
-VERIFY_DEPS = 1 << 17 # /*!< verify: from --nodeps */
-VERIFY_SCRIPT = 1 << 18 # /*!< verify: from --noscripts */
-VERIFY_DIGEST = 1 << 19 # /*!< verify: from --nodigest */
-VERIFY_SIGNATURE = 1 << 20 # /*!< verify: from --nosignature */
-VERIFY_PATCHES = 1 << 21 # /*!< verify: from --nopatches */
-VERIFY_HDRCHK = 1 << 22 # /*!< verify: from --nohdrchk */
-VERIFY_FOR_LIST = 1 << 23 # /*!< query: from --list */
-VERIFY_FOR_STATE = 1 << 24 # /*!< query: from --state */
-VERIFY_FOR_DOCS = 1 << 25 # /*!< query: from --docfiles */
-VERIFY_FOR_CONFIG = 1 << 26 # /*!< query: from --configfiles */
-VERIFY_FOR_DUMPFILES = 1 << 27 # /*!< query: from --dump */
-# /* bits 28-31 used in rpmVerifyAttrs */
-
-# Comes from C cource. lib/rpmcli.h
-VERIFY_ATTRS = \
- (VERIFY_MD5 | VERIFY_SIZE | VERIFY_LINKTO | VERIFY_USER | VERIFY_GROUP | \
- VERIFY_MTIME | VERIFY_MODE | VERIFY_RDEV | VERIFY_CONTEXTS)
-
-VERIFY_ALL = \
- (VERIFY_ATTRS | VERIFY_FILES | VERIFY_DEPS | VERIFY_SCRIPT | VERIFY_DIGEST |\
- VERIFY_SIGNATURE | VERIFY_HDRCHK)
-
-
-# Some masks for what checks to NOT do on these file types.
-# The C code actiually resets these up for every file.
-DIR_FLAGS = ~(RPMVERIFY_MD5 | RPMVERIFY_FILESIZE | RPMVERIFY_MTIME | \
- RPMVERIFY_LINKTO)
-
-# These file types all have the same mask, but hopefully this will make the
-# code more readable.
-FIFO_FLAGS = CHR_FLAGS = BLK_FLAGS = GHOST_FLAGS = DIR_FLAGS
-
-LINK_FLAGS = ~(RPMVERIFY_MD5 | RPMVERIFY_FILESIZE | RPMVERIFY_MTIME | \
- RPMVERIFY_MODE | RPMVERIFY_USER | RPMVERIFY_GROUP)
-
-REG_FLAGS = ~(RPMVERIFY_LINKTO)
-
-
-def s_isdev(mode):
- """
- Check to see if a file is a device.
-
- """
- return stat.S_ISBLK(mode) | stat.S_ISCHR(mode)
-
-def rpmpackagelist(rts):
- """
- Equivalent of rpm -qa. Intended for RefreshPackages() in the RPM Driver.
- Requires rpmtransactionset() to be run first to get a ts.
- Returns a list of pkgspec dicts.
-
- e.g. [ {'name':'foo', 'epoch':'20', 'version':'1.2', 'release':'5', 'arch':'x86_64' },
- {'name':'bar', 'epoch':'10', 'version':'5.2', 'release':'2', 'arch':'x86_64' } ]
-
- """
- return [{'name':header[rpm.RPMTAG_NAME],
- 'epoch':header[rpm.RPMTAG_EPOCH],
- 'version':header[rpm.RPMTAG_VERSION],
- 'release':header[rpm.RPMTAG_RELEASE],
- 'arch':header[rpm.RPMTAG_ARCH],
- 'gpgkeyid':header.sprintf("%|SIGGPG?{%{SIGGPG:pgpsig}}:{None}|").split()[-1]}
- for header in rts.dbMatch()]
-
-def getindexbykeyword(index_ts, **kwargs):
- """
- Return list of indexs from the rpmdb matching keywords
- ex: getHeadersByKeyword(name='foo', version='1', release='1')
-
- Can be passed any structure that can be indexed by the pkgspec
- keyswords as other keys are filtered out.
-
- """
- lst = []
- name = kwargs.get('name')
- if name:
- index_mi = index_ts.dbMatch(rpm.RPMTAG_NAME, name)
- else:
- index_mi = index_ts.dbMatch()
-
- if 'epoch' in kwargs:
- if kwargs['epoch'] != None and kwargs['epoch'] != 'None':
- kwargs['epoch'] = int(kwargs['epoch'])
- else:
- del(kwargs['epoch'])
-
- keywords = [key for key in list(kwargs.keys()) \
- if key in ('name', 'epoch', 'version', 'release', 'arch')]
- keywords_len = len(keywords)
- for hdr in index_mi:
- match = 0
- for keyword in keywords:
- if hdr[keyword] == kwargs[keyword]:
- match += 1
- if match == keywords_len:
- lst.append(index_mi.instance())
- del index_mi
- return lst
-
-def getheadersbykeyword(header_ts, **kwargs):
- """
- Borrowed parts of this from from Yum. Need to fix it though.
- Epoch is not handled right.
-
- Return list of headers from the rpmdb matching keywords
- ex: getHeadersByKeyword(name='foo', version='1', release='1')
-
- Can be passed any structure that can be indexed by the pkgspec
- keyswords as other keys are filtered out.
-
- """
- lst = []
- name = kwargs.get('name')
- if name:
- header_mi = header_ts.dbMatch(rpm.RPMTAG_NAME, name)
- else:
- header_mi = header_ts.dbMatch()
-
- if 'epoch' in kwargs:
- if kwargs['epoch'] != None and kwargs['epoch'] != 'None':
- kwargs['epoch'] = int(kwargs['epoch'])
- else:
- del(kwargs['epoch'])
-
- keywords = [key for key in list(kwargs.keys()) \
- if key in ('name', 'epoch', 'version', 'release', 'arch')]
- keywords_len = len(keywords)
- for hdr in header_mi:
- match = 0
- for keyword in keywords:
- if hdr[keyword] == kwargs[keyword]:
- match += 1
- if match == keywords_len:
- lst.append(hdr)
- del header_mi
- return lst
-
-def prelink_md5_check(filename):
- """
- Checks if a file is prelinked. If it is run it through prelink -y
- to get the unprelinked md5 and file size.
-
- Return 0 if the file was not prelinked, otherwise return the file size.
- Always return the md5.
-
- """
- prelink = False
- try:
- plf = open(filename, "rb")
- except IOError:
- return False, 0
-
- if prelink_exists:
- if isprelink_imported:
- plfd = plf.fileno()
- if isprelink(plfd):
- plf.close()
- cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \
- % (re.escape(filename))
- plf = os.popen(cmd, 'rb')
- prelink = True
- elif whitelist_re.search(filename) and not blacklist_re.search(filename):
- plf.close()
- cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \
- % (re.escape(filename))
- plf = os.popen(cmd, 'rb')
- prelink = True
-
- fsize = 0
- if py24compat:
- chksum = md5.new()
- else:
- chksum = hashlib.md5()
- while 1:
- data = plf.read()
- if not data:
- break
- fsize += len(data)
- chksum.update(data)
- plf.close()
- file_md5 = chksum.hexdigest()
- if prelink:
- return file_md5, fsize
- else:
- return file_md5, 0
-
-def prelink_size_check(filename):
- """
- This check is only done if the prelink_md5_check() is not done first.
-
- Checks if a file is prelinked. If it is run it through prelink -y
- to get the unprelinked file size.
-
- Return 0 if the file was not prelinked, otherwise return the file size.
-
- """
- fsize = 0
- try:
- plf = open(filename, "rb")
- except IOError:
- return False
-
- if prelink_exists:
- if isprelink_imported:
- plfd = plf.fileno()
- if isprelink(plfd):
- plf.close()
- cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \
- % (re.escape(filename))
- plf = os.popen(cmd, 'rb')
-
- while 1:
- data = plf.read()
- if not data:
- break
- fsize += len(data)
-
- elif whitelist_re.search(filename) and not blacklist_re.search(filename):
- plf.close()
- cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \
- % (re.escape(filename))
- plf = os.popen(cmd, 'rb')
-
- while 1:
- data = plf.read()
- if not data:
- break
- fsize += len(data)
-
- plf.close()
-
- return fsize
-
-def debug_verify_flags(vflags):
- """
- Decodes the verify flags bits.
- """
- if vflags & RPMVERIFY_MD5:
- print('RPMVERIFY_MD5')
- if vflags & RPMVERIFY_FILESIZE:
- print('RPMVERIFY_FILESIZE')
- if vflags & RPMVERIFY_LINKTO:
- print('RPMVERIFY_LINKTO')
- if vflags & RPMVERIFY_USER:
- print('RPMVERIFY_USER')
- if vflags & RPMVERIFY_GROUP:
- print('RPMVERIFY_GROUP')
- if vflags & RPMVERIFY_MTIME:
- print('RPMVERIFY_MTIME')
- if vflags & RPMVERIFY_MODE:
- print('RPMVERIFY_MODE')
- if vflags & RPMVERIFY_RDEV:
- print('RPMVERIFY_RDEV')
- if vflags & RPMVERIFY_CONTEXTS:
- print('RPMVERIFY_CONTEXTS')
- if vflags & RPMVERIFY_READLINKFAIL:
- print('RPMVERIFY_READLINKFAIL')
- if vflags & RPMVERIFY_READFAIL:
- print('RPMVERIFY_READFAIL')
- if vflags & RPMVERIFY_LSTATFAIL:
- print('RPMVERIFY_LSTATFAIL')
- if vflags & RPMVERIFY_LGETFILECONFAIL:
- print('RPMVERIFY_LGETFILECONFAIL')
-
-def debug_file_flags(fflags):
- """
- Decodes the file flags bits.
- """
- if fflags & rpm.RPMFILE_CONFIG:
- print('rpm.RPMFILE_CONFIG')
-
- if fflags & rpm.RPMFILE_DOC:
- print('rpm.RPMFILE_DOC')
-
- if fflags & rpm.RPMFILE_ICON:
- print('rpm.RPMFILE_ICON')
-
- if fflags & rpm.RPMFILE_MISSINGOK:
- print('rpm.RPMFILE_MISSINGOK')
-
- if fflags & rpm.RPMFILE_NOREPLACE:
- print('rpm.RPMFILE_NOREPLACE')
-
- if fflags & rpm.RPMFILE_GHOST:
- print('rpm.RPMFILE_GHOST')
-
- if fflags & rpm.RPMFILE_LICENSE:
- print('rpm.RPMFILE_LICENSE')
-
- if fflags & rpm.RPMFILE_README:
- print('rpm.RPMFILE_README')
-
- if fflags & rpm.RPMFILE_EXCLUDE:
- print('rpm.RPMFILE_EXLUDE')
-
- if fflags & rpm.RPMFILE_UNPATCHED:
- print('rpm.RPMFILE_UNPATCHED')
-
- if fflags & rpm.RPMFILE_PUBKEY:
- print('rpm.RPMFILE_PUBKEY')
-
-def rpm_verify_file(fileinfo, rpmlinktos, omitmask):
- """
- Verify all the files in a package.
-
- Returns a list of error flags, the file type and file name. The list
- entries are strings that are the same as the labels for the bitwise
- flags used in the C code.
-
- """
- (fname, fsize, fmode, fmtime, fflags, frdev, finode, fnlink, fstate, \
- vflags, fuser, fgroup, fmd5) = fileinfo
-
- # 1. rpmtsRootDir stuff. What does it do and where to I get it from?
-
- file_results = []
- flags = vflags
-
- # Check to see if the file was installed - if not pretend all is ok.
- # This is what the rpm C code does!
- if fstate != rpm.RPMFILE_STATE_NORMAL:
- return file_results
-
- # Get the installed files stats
- try:
- lstat = os.lstat(fname)
- except OSError:
- if not (fflags & (rpm.RPMFILE_MISSINGOK|rpm.RPMFILE_GHOST)):
- file_results.append('RPMVERIFY_LSTATFAIL')
- #file_results.append(fname)
- return file_results
-
- # 5. Contexts? SELinux stuff?
-
- # Setup what checks to do. This is straight out of the C code.
- if stat.S_ISDIR(lstat.st_mode):
- flags &= DIR_FLAGS
- elif stat.S_ISLNK(lstat.st_mode):
- flags &= LINK_FLAGS
- elif stat.S_ISFIFO(lstat.st_mode):
- flags &= FIFO_FLAGS
- elif stat.S_ISCHR(lstat.st_mode):
- flags &= CHR_FLAGS
- elif stat.S_ISBLK(lstat.st_mode):
- flags &= BLK_FLAGS
- else:
- flags &= REG_FLAGS
-
- if (fflags & rpm.RPMFILE_GHOST):
- flags &= GHOST_FLAGS
-
- flags &= ~(omitmask | RPMVERIFY_FAILURES)
-
- # 8. SELinux stuff.
-
- prelink_size = 0
- if flags & RPMVERIFY_MD5:
- prelink_md5, prelink_size = prelink_md5_check(fname)
- if prelink_md5 == False:
- file_results.append('RPMVERIFY_MD5')
- file_results.append('RPMVERIFY_READFAIL')
- elif prelink_md5 != fmd5:
- file_results.append('RPMVERIFY_MD5')
-
- if flags & RPMVERIFY_LINKTO:
- linkto = os.readlink(fname)
- if not linkto:
- file_results.append('RPMVERIFY_READLINKFAIL')
- file_results.append('RPMVERIFY_LINKTO')
- else:
- if len(rpmlinktos) == 0 or linkto != rpmlinktos:
- file_results.append('RPMVERIFY_LINKTO')
-
- if flags & RPMVERIFY_FILESIZE:
- if not (flags & RPMVERIFY_MD5): # prelink check hasn't been done.
- prelink_size = prelink_size_check(fname)
- if (prelink_size != 0): # This is a prelinked file.
- if (prelink_size != fsize):
- file_results.append('RPMVERIFY_FILESIZE')
- elif lstat.st_size != fsize: # It wasn't a prelinked file.
- file_results.append('RPMVERIFY_FILESIZE')
-
- if flags & RPMVERIFY_MODE:
- metamode = fmode
- filemode = lstat.st_mode
-
- # Comparing the type of %ghost files is meaningless, but perms are ok.
- if fflags & rpm.RPMFILE_GHOST:
- metamode &= ~0xf000
- filemode &= ~0xf000
-
- if (stat.S_IFMT(metamode) != stat.S_IFMT(filemode)) or \
- (stat.S_IMODE(metamode) != stat.S_IMODE(filemode)):
- file_results.append('RPMVERIFY_MODE')
-
- if flags & RPMVERIFY_RDEV:
- if (stat.S_ISCHR(fmode) != stat.S_ISCHR(lstat.st_mode) or
- stat.S_ISBLK(fmode) != stat.S_ISBLK(lstat.st_mode)):
- file_results.append('RPMVERIFY_RDEV')
- elif (s_isdev(fmode) & s_isdev(lstat.st_mode)):
- st_rdev = lstat.st_rdev
- if frdev != st_rdev:
- file_results.append('RPMVERIFY_RDEV')
-
- if flags & RPMVERIFY_MTIME:
- if lstat.st_mtime != fmtime:
- file_results.append('RPMVERIFY_MTIME')
-
- if flags & RPMVERIFY_USER:
- try:
- user = pwd.getpwuid(lstat.st_uid)[0]
- except KeyError:
- user = None
- if not user or not fuser or (user != fuser):
- file_results.append('RPMVERIFY_USER')
-
- if flags & RPMVERIFY_GROUP:
- try:
- group = grp.getgrgid(lstat.st_gid)[0]
- except KeyError:
- group = None
- if not group or not fgroup or (group != fgroup):
- file_results.append('RPMVERIFY_GROUP')
-
- return file_results
-
-def rpm_verify_dependencies(header):
- """
- Check package dependencies. Header is an rpm.hdr.
-
- Don't like opening another ts to do this, but
- it was the only way I could find of clearing the ts
- out.
-
- Have asked on the rpm-maint list on how to do
- this the right way (28 Feb 2007).
-
- ts.check() returns:
-
- ((name, version, release), (reqname, reqversion), \
- flags, suggest, sense)
-
- """
- _ts1 = rpmtransactionset()
- _ts1.addInstall(header, 'Dep Check', 'i')
- dep_errors = _ts1.check()
- _ts1.closeDB()
- return dep_errors
-
-def rpm_verify_package(vp_ts, header, verify_options):
- """
- Verify a single package specified by header. Header is an rpm.hdr.
-
- If errors are found it returns a dictionary of errors.
-
- """
- # Set some transaction level flags.
- vsflags = 0
- if 'nodigest' in verify_options:
- vsflags |= rpm._RPMVSF_NODIGESTS
- if 'nosignature' in verify_options:
- vsflags |= rpm._RPMVSF_NOSIGNATURES
- ovsflags = vp_ts.setVSFlags(vsflags)
-
- # Map from the Python options to the rpm bitwise flags.
- omitmask = 0
-
- if 'nolinkto' in verify_options:
- omitmask |= VERIFY_LINKTO
- if 'nomd5' in verify_options:
- omitmask |= VERIFY_MD5
- if 'nosize' in verify_options:
- omitmask |= VERIFY_SIZE
- if 'nouser' in verify_options:
- omitmask |= VERIFY_USER
- if 'nogroup' in verify_options:
- omitmask |= VERIFY_GROUP
- if 'nomtime' in verify_options:
- omitmask |= VERIFY_MTIME
- if 'nomode' in verify_options:
- omitmask |= VERIFY_MODE
- if 'nordev' in verify_options:
- omitmask |= VERIFY_RDEV
-
- omitmask = ((~omitmask & VERIFY_ATTRS) ^ VERIFY_ATTRS)
-
- package_results = {}
-
- # Check Signatures and Digests.
- # No idea what this might return. Need to break something to see.
- # Setting the vsflags above determines what gets checked in the header.
- hdr_stat = vp_ts.hdrCheck(header.unload())
- if hdr_stat:
- package_results['hdr'] = hdr_stat
-
- # Check Package Depencies.
- if 'nodeps' not in verify_options:
- dep_stat = rpm_verify_dependencies(header)
- if dep_stat:
- package_results['deps'] = dep_stat
-
- # Check all the package files.
- if 'nofiles' not in verify_options:
- vp_fi = header.fiFromHeader()
- for fileinfo in vp_fi:
- # Do not bother doing anything with ghost files.
- # This is what RPM does.
- if fileinfo[4] & rpm.RPMFILE_GHOST:
- continue
-
- # This is only needed because of an inconsistency in the
- # rpm.fi interface.
- linktos = vp_fi.FLink()
-
- file_stat = rpm_verify_file(fileinfo, linktos, omitmask)
-
- #if len(file_stat) > 0 or options.verbose:
- if len(file_stat) > 0:
- fflags = fileinfo[4]
- if fflags & rpm.RPMFILE_CONFIG:
- file_stat.append('c')
- elif fflags & rpm.RPMFILE_DOC:
- file_stat.append('d')
- elif fflags & rpm.RPMFILE_GHOST:
- file_stat.append('g')
- elif fflags & rpm.RPMFILE_LICENSE:
- file_stat.append('l')
- elif fflags & rpm.RPMFILE_PUBKEY:
- file_stat.append('P')
- elif fflags & rpm.RPMFILE_README:
- file_stat.append('r')
- else:
- file_stat.append(' ')
-
- file_stat.append(fileinfo[0]) # The filename.
- package_results.setdefault('files', []).append(file_stat)
-
- # Run the verify script if there is one.
- # Do we want this?
- #if 'noscripts' not in verify_options:
- # script_stat = rpmVerifyscript()
- # if script_stat:
- # package_results['script'] = script_stat
-
- # If there have been any errors, add the package nevra to the result.
- if len(package_results) > 0:
- package_results.setdefault('nevra', (header[rpm.RPMTAG_NAME], \
- header[rpm.RPMTAG_EPOCH], \
- header[rpm.RPMTAG_VERSION], \
- header[rpm.RPMTAG_RELEASE], \
- header[rpm.RPMTAG_ARCH]))
- else:
- package_results = None
-
- # Put things back the way we found them.
- vsflags = vp_ts.setVSFlags(ovsflags)
-
- return package_results
-
-def rpm_verify(verify_ts, verify_pkgspec, verify_options=[]):
- """
- Requires rpmtransactionset() to be run first to get a ts.
-
- pkgspec is a dict specifying the package
- e.g.:
- For a single package
- { name='foo', epoch='20', version='1', release='1', arch='x86_64'}
-
- For all packages
- {}
-
- Or any combination of keywords to select one or more packages to verify.
-
- options is a list of 'rpm --verify' options. Default is to check everything.
- e.g.:
- [ 'nodeps', 'nodigest', 'nofiles', 'noscripts', 'nosignature',
- 'nolinkto' 'nomd5', 'nosize', 'nouser', 'nogroup', 'nomtime',
- 'nomode', 'nordev' ]
-
- Returns a list. One list entry per package. Each list entry is a
- dictionary. Dict keys are 'files', 'deps', 'nevra' and 'hdr'.
- Entries only get added for the failures. If nothing failed, None is
- returned.
-
- Its all a bit messy and probably needs reviewing.
-
- [ { 'hdr': [???],
- 'deps: [((name, version, release), (reqname, reqversion),
- flags, suggest, sense), .... ]
- 'files': [ ['filename1', 'RPMVERIFY_GROUP', 'RPMVERIFY_USER' ],
- ['filename2', 'RPMVERFIY_LSTATFAIL']]
- 'nevra': ['name1', 'epoch1', 'version1', 'release1', 'arch1'] }
- { 'hdr': [???],
- 'deps: [((name, version, release), (reqname, reqversion),
- flags, suggest, sense), .... ]
- 'files': [ ['filename', 'RPMVERIFY_GROUP', 'RPMVERIFY_USER" ],
- ['filename2', 'RPMVERFIY_LSTATFAIL']]
- 'nevra': ['name2', 'epoch2', 'version2', 'release2', 'arch2'] } ]
-
- """
- verify_results = []
- headers = getheadersbykeyword(verify_ts, **verify_pkgspec)
- for header in headers:
- result = rpm_verify_package(verify_ts, header, verify_options)
- if result:
- verify_results.append(result)
-
- return verify_results
-
-def rpmtransactionset():
- """
- A simple wrapper for rpm.TransactionSet() to keep everthiing together.
- Might use it to set some ts level flags later.
-
- """
- ts = rpm.TransactionSet()
- return ts
-
-class Rpmtscallback(object):
- """
- Callback for ts.run(). Used for adding, upgrading and removing packages.
- Starting with all possible reasons codes, but bcfg2 will probably only
- make use of a few of them.
-
- Mostly just printing stuff at the moment to understand how the callback
- is used.
-
- """
- def __init__(self):
- self.fdnos = {}
-
- def callback(self, reason, amount, total, key, client_data):
- """
- Generic rpmts call back.
- """
- if reason == rpm.RPMCALLBACK_INST_OPEN_FILE:
- pass
- elif reason == rpm.RPMCALLBACK_INST_CLOSE_FILE:
- pass
- elif reason == rpm.RPMCALLBACK_INST_START:
- pass
- elif reason == rpm.RPMCALLBACK_TRANS_PROGRESS or \
- reason == rpm.RPMCALLBACK_INST_PROGRESS:
- pass
- # rpm.RPMCALLBACK_INST_PROGRESS'
- elif reason == rpm.RPMCALLBACK_TRANS_START:
- pass
- elif reason == rpm.RPMCALLBACK_TRANS_STOP:
- pass
- elif reason == rpm.RPMCALLBACK_REPACKAGE_START:
- pass
- elif reason == rpm.RPMCALLBACK_REPACKAGE_PROGRESS:
- pass
- elif reason == rpm.RPMCALLBACK_REPACKAGE_STOP:
- pass
- elif reason == rpm.RPMCALLBACK_UNINST_PROGRESS:
- pass
- elif reason == rpm.RPMCALLBACK_UNINST_START:
- pass
- elif reason == rpm.RPMCALLBACK_UNINST_STOP:
- pass
- # How do we get at this?
- # RPM.modified += key
- elif reason == rpm.RPMCALLBACK_UNPACK_ERROR:
- pass
- elif reason == rpm.RPMCALLBACK_CPIO_ERROR:
- pass
- elif reason == rpm.RPMCALLBACK_UNKNOWN:
- pass
- else:
- print('ERROR - Fell through callBack')
-
-
-def rpm_erase(erase_pkgspecs, erase_flags):
- """
- pkgspecs is a list of pkgspec dicts specifying packages
- e.g.:
- For a single package
- { name='foo', epoch='20', version='1', release='1', arch='x86_64'}
-
- """
- erase_ts_flags = 0
- if 'noscripts' in erase_flags:
- erase_ts_flags |= rpm.RPMTRANS_FLAG_NOSCRIPTS
- if 'notriggers' in erase_flags:
- erase_ts_flags |= rpm.RPMTRANS_FLAG_NOTRIGGERS
- if 'repackage' in erase_flags:
- erase_ts_flags |= rpm.RPMTRANS_FLAG_REPACKAGE
-
- erase_ts = rpmtransactionset()
- erase_ts.setFlags(erase_ts_flags)
-
- for pkgspec in erase_pkgspecs:
- idx_list = getindexbykeyword(erase_ts, **pkgspec)
- if len(idx_list) > 1 and not 'allmatches' in erase_flags:
- #pass
- print('ERROR - Multiple package match for erase', pkgspec)
- else:
- for idx in idx_list:
- erase_ts.addErase(idx)
-
- #for te in erase_ts:
-
- erase_problems = []
- if 'nodeps' not in erase_flags:
- erase_problems = erase_ts.check()
-
- if erase_problems == []:
- erase_ts.order()
- erase_callback = Rpmtscallback()
- erase_ts.run(erase_callback.callback, 'Erase')
- #else:
-
- erase_ts.closeDB()
- del erase_ts
- return erase_problems
-
-def display_verify_file(file_results):
- '''
- Display file results similar to rpm --verify.
- '''
- filename = file_results[-1]
- filetype = file_results[-2]
-
- result_string = ''
-
- if 'RPMVERIFY_LSTATFAIL' in file_results:
- result_string = 'missing '
- else:
- if 'RPMVERIFY_FILESIZE' in file_results:
- result_string = result_string + 'S'
- else:
- result_string = result_string + '.'
-
- if 'RPMVERIFY_MODE' in file_results:
- result_string = result_string + 'M'
- else:
- result_string = result_string + '.'
-
- if 'RPMVERIFY_MD5' in file_results:
- if 'RPMVERIFY_READFAIL' in file_results:
- result_string = result_string + '?'
- else:
- result_string = result_string + '5'
- else:
- result_string = result_string + '.'
-
- if 'RPMVERIFY_RDEV' in file_results:
- result_string = result_string + 'D'
- else:
- result_string = result_string + '.'
-
- if 'RPMVERIFY_LINKTO' in file_results:
- if 'RPMVERIFY_READLINKFAIL' in file_results:
- result_string = result_string + '?'
- else:
- result_string = result_string + 'L'
- else:
- result_string = result_string + '.'
-
- if 'RPMVERIFY_USER' in file_results:
- result_string = result_string + 'U'
- else:
- result_string = result_string + '.'
-
- if 'RPMVERIFY_GROUP' in file_results:
- result_string = result_string + 'G'
- else:
- result_string = result_string + '.'
-
- if 'RPMVERIFY_MTIME' in file_results:
- result_string = result_string + 'T'
- else:
- result_string = result_string + '.'
-
- print(result_string + ' ' + filetype + ' ' + filename)
- sys.stdout.flush()
-
-#===============================================================================
-# Some options and output to assist with development and testing.
-# These are not intended for normal use.
-if __name__ == "__main__":
-
- p = optparse.OptionParser()
-
- p.add_option('--name', action='store', \
- default=None, \
- help='''Package name to verify.
-
- ******************************************
- NOT SPECIFYING A NAME MEANS 'ALL' PACKAGES.
- ******************************************
-
- The specified operation will be carried out on all
- instances of packages that match the package specification
- (name, epoch, version, release, arch).''')
-
- p.add_option('--epoch', action='store', \
- default=None, \
- help='''Package epoch.''')
-
- p.add_option('--version', action='store', \
- default=None, \
- help='''Package version.''')
-
- p.add_option('--release', action='store', \
- default=None, \
- help='''Package release.''')
-
- p.add_option('--arch', action='store', \
- default=None, \
- help='''Package arch.''')
-
- p.add_option('--erase', '-e', action='store_true', \
- default=None, \
- help='''****************************************************
- REMOVE PACKAGES. THERE ARE NO WARNINGS. MULTIPLE
- PACKAGES WILL BE REMOVED IF A FULL PACKAGE SPEC IS NOT
- GIVEN. E.G. IF JUST A NAME IS GIVEN ALL INSTALLED
- INSTANCES OF THAT PACKAGE WILL BE REMOVED PROVIDED
- DEPENDENCY CHECKS PASS. IF JUST AN EPOCH IS GIVEN
- ALL PACKAGE INSTANCES WITH THAT EPOCH WILL BE REMOVED.
- ****************************************************''')
-
- p.add_option('--list', '-l', action='store_true', \
- help='''List package identity info. rpm -qa ish equivalent
- intended for use in RefreshPackages().''')
-
- p.add_option('--verify', action='store_true', \
- help='''Verify Package(s). Output is only produced after all
- packages has been verified. Be patient.''')
-
- p.add_option('--verbose', '-v', action='store_true', \
- help='''Verbose output for --verify option. Output is the
- same as rpm -v --verify.''')
-
- p.add_option('--nodeps', action='store_true', \
- default=False, \
- help='Do not do dependency testing.')
-
- p.add_option('--nodigest', action='store_true', \
- help='Do not check package digests.')
-
- p.add_option('--nofiles', action='store_true', \
- help='Do not do file checks.')
-
- p.add_option('--noscripts', action='store_true', \
- help='Do not run verification scripts.')
-
- p.add_option('--nosignature', action='store_true', \
- help='Do not do package signature verification.')
-
- p.add_option('--nolinkto', action='store_true', \
- help='Do not do symlink tests.')
-
- p.add_option('--nomd5', action='store_true', \
- help='''Do not do MD5 checksums on files. Note that this does
- not work for prelink files yet.''')
-
- p.add_option('--nosize', action='store_true', \
- help='''Do not do file size tests. Note that this does not work
- for prelink files yet.''')
-
- p.add_option('--nouser', action='store_true', \
- help='Do not check file user ownership.')
-
- p.add_option('--nogroup', action='store_true', \
- help='Do not check file group ownership.')
-
- p.add_option('--nomtime', action='store_true', \
- help='Do not check file modification times.')
-
- p.add_option('--nomode', action='store_true', \
- help='Do not check file modes (permissions).')
-
- p.add_option('--nordev', action='store_true', \
- help='Do not check device node.')
-
- p.add_option('--notriggers', action='store_true', \
- help='Do not do not generate triggers on erase.')
-
- p.add_option('--repackage', action='store_true', \
- help='''Do repackage on erase.i Packages are put
- in /var/spool/repackage.''')
-
- p.add_option('--allmatches', action='store_true', \
- help='''Remove all package instances that match the
- pkgspec.
-
- ***************************************************
- NO WARNINGS ARE GIVEN. IF THERE IS NO PACKAGE SPEC
- THAT MEANS ALL PACKAGES!!!!
- ***************************************************''')
-
- options, arguments = p.parse_args()
-
- pkgspec = {}
- rpm_options = []
-
- if options.nodeps:
- rpm_options.append('nodeps')
-
- if options.nodigest:
- rpm_options.append('nodigest')
-
- if options.nofiles:
- rpm_options.append('nofiles')
-
- if options.noscripts:
- rpm_options.append('noscripts')
-
- if options.nosignature:
- rpm_options.append('nosignature')
-
- if options.nolinkto:
- rpm_options.append('nolinkto')
-
- if options.nomd5:
- rpm_options.append('nomd5')
-
- if options.nosize:
- rpm_options.append('nosize')
-
- if options.nouser:
- rpm_options.append('nouser')
-
- if options.nogroup:
- rpm_options.append('nogroup')
-
- if options.nomtime:
- rpm_options.append('nomtime')
-
- if options.nomode:
- rpm_options.append('nomode')
-
- if options.nordev:
- rpm_options.append('nordev')
-
- if options.repackage:
- rpm_options.append('repackage')
-
- if options.allmatches:
- rpm_options.append('allmatches')
-
- main_ts = rpmtransactionset()
-
- cmdline_pkgspec = {}
- if options.name != 'all':
- if options.name:
- cmdline_pkgspec['name'] = str(options.name)
- if options.epoch:
- cmdline_pkgspec['epoch'] = str(options.epoch)
- if options.version:
- cmdline_pkgspec['version'] = str(options.version)
- if options.release:
- cmdline_pkgspec['release'] = str(options.release)
- if options.arch:
- cmdline_pkgspec['arch'] = str(options.arch)
-
- if options.verify:
- results = rpm_verify(main_ts, cmdline_pkgspec, rpm_options)
- for r in results:
- files = r.get('files', '')
- for f in files:
- display_verify_file(f)
-
- elif options.list:
- for p in rpmpackagelist(main_ts):
- print(p)
-
- elif options.erase:
- if options.name:
- rpm_erase([cmdline_pkgspec], rpm_options)
- else:
- print('You must specify the "--name" option')
diff --git a/src/lib/Bcfg2/Options.py b/src/lib/Bcfg2/Options.py
index 243c4ed2a..a6511f88e 100644
--- a/src/lib/Bcfg2/Options.py
+++ b/src/lib/Bcfg2/Options.py
@@ -9,8 +9,8 @@ import shlex
import sys
import grp
import pwd
-import Bcfg2.Client.Tools
-from Bcfg2.Compat import ConfigParser
+from Bcfg2.Client.Tools import __path__ as toolpath
+from Bcfg2.Compat import ConfigParser, walk_packages
from Bcfg2.version import __version__
@@ -334,12 +334,6 @@ def get_bool(val):
raise ValueError("Not a boolean value", val)
-def get_int(val):
- """ given a string value of an integer configuration option,
- return an actual int """
- return int(val)
-
-
def get_timeout(val):
""" convert the timeout value into a float or None """
if val is None:
@@ -624,33 +618,27 @@ SERVER_CHILDREN = \
DB_ENGINE = \
Option('Database engine',
default='sqlite3',
- cf=('database', 'engine'),
- deprecated_cf=('statistics', 'database_engine'))
+ cf=('database', 'engine'))
DB_NAME = \
Option('Database name',
default=os.path.join(SERVER_REPOSITORY.default, "etc/bcfg2.sqlite"),
- cf=('database', 'name'),
- deprecated_cf=('statistics', 'database_name'))
+ cf=('database', 'name'))
DB_USER = \
Option('Database username',
default=None,
- cf=('database', 'user'),
- deprecated_cf=('statistics', 'database_user'))
+ cf=('database', 'user'))
DB_PASSWORD = \
Option('Database password',
default=None,
- cf=('database', 'password'),
- deprecated_cf=('statistics', 'database_password'))
+ cf=('database', 'password'))
DB_HOST = \
Option('Database host',
default='localhost',
- cf=('database', 'host'),
- deprecated_cf=('statistics', 'database_host'))
+ cf=('database', 'host'))
DB_PORT = \
Option('Database port',
default='',
- cf=('database', 'port'),
- deprecated_cf=('statistics', 'database_port'))
+ cf=('database', 'port'))
# Django options
WEB_CFILE = \
@@ -674,8 +662,7 @@ DJANGO_DEBUG = \
DJANGO_WEB_PREFIX = \
Option('Web prefix',
default=None,
- cf=('reporting', 'web_prefix'),
- deprecated_cf=('statistics', 'web_prefix'),)
+ cf=('reporting', 'web_prefix'))
# Reporting options
REPORTING_FILE_LIMIT = \
@@ -754,7 +741,7 @@ CLIENT_PARANOID = \
cook=get_bool)
CLIENT_DRIVERS = \
Option('Specify tool driver set',
- default=Bcfg2.Client.Tools.default,
+ default=[m[1] for m in walk_packages(path=toolpath)],
cmd='-D',
odesc='<driver1,driver2>',
cf=('client', 'drivers'),
@@ -876,7 +863,7 @@ TEST_CHILDREN = \
cmd='--children',
odesc='<children>',
cf=('bcfg2_test', 'children'),
- cook=get_int,
+ cook=int,
long_arg=True)
TEST_XUNIT = \
Option('Output an XUnit result file with --children',
@@ -933,125 +920,65 @@ CLIENT_RPM_INSTALLONLY = \
'kernel-default', 'kernel-largesmp-devel',
'kernel-largesmp', 'kernel-xen', 'gpg-pubkey'],
cf=('RPM', 'installonlypackages'),
- deprecated_cf=('RPMng', 'installonlypackages'),
cook=list_split)
CLIENT_RPM_PKG_CHECKS = \
Option("Perform RPM package checks",
default=True,
cf=('RPM', 'pkg_checks'),
- deprecated_cf=('RPMng', 'pkg_checks'),
cook=get_bool)
CLIENT_RPM_PKG_VERIFY = \
Option("Perform RPM package verify",
default=True,
cf=('RPM', 'pkg_verify'),
- deprecated_cf=('RPMng', 'pkg_verify'),
cook=get_bool)
CLIENT_RPM_INSTALLED_ACTION = \
Option("RPM installed action",
default="install",
- cf=('RPM', 'installed_action'),
- deprecated_cf=('RPMng', 'installed_action'))
+ cf=('RPM', 'installed_action'))
CLIENT_RPM_ERASE_FLAGS = \
Option("RPM erase flags",
default=["allmatches"],
cf=('RPM', 'erase_flags'),
- deprecated_cf=('RPMng', 'erase_flags'),
cook=list_split)
CLIENT_RPM_VERSION_FAIL_ACTION = \
Option("RPM version fail action",
default="upgrade",
- cf=('RPM', 'version_fail_action'),
- deprecated_cf=('RPMng', 'version_fail_action'))
+ cf=('RPM', 'version_fail_action'))
CLIENT_RPM_VERIFY_FAIL_ACTION = \
Option("RPM verify fail action",
default="reinstall",
- cf=('RPM', 'verify_fail_action'),
- deprecated_cf=('RPMng', 'verify_fail_action'))
+ cf=('RPM', 'verify_fail_action'))
CLIENT_RPM_VERIFY_FLAGS = \
Option("RPM verify flags",
default=[],
cf=('RPM', 'verify_flags'),
- deprecated_cf=('RPMng', 'verify_flags'),
cook=list_split)
-CLIENT_YUM24_INSTALLONLY = \
- Option('YUM24 install-only packages',
- default=['kernel', 'kernel-bigmem', 'kernel-enterprise',
- 'kernel-smp', 'kernel-modules', 'kernel-debug',
- 'kernel-unsupported', 'kernel-devel', 'kernel-source',
- 'kernel-default', 'kernel-largesmp-devel',
- 'kernel-largesmp', 'kernel-xen', 'gpg-pubkey'],
- cf=('YUM24', 'installonlypackages'),
- cook=list_split)
-CLIENT_YUM24_PKG_CHECKS = \
- Option("Perform YUM24 package checks",
- default=True,
- cf=('YUM24', 'pkg_checks'),
- cook=get_bool)
-CLIENT_YUM24_PKG_VERIFY = \
- Option("Perform YUM24 package verify",
- default=True,
- cf=('YUM24', 'pkg_verify'),
- cook=get_bool)
-CLIENT_YUM24_INSTALLED_ACTION = \
- Option("YUM24 installed action",
- default="install",
- cf=('YUM24', 'installed_action'))
-CLIENT_YUM24_ERASE_FLAGS = \
- Option("YUM24 erase flags",
- default=["allmatches"],
- cf=('YUM24', 'erase_flags'),
- cook=list_split)
-CLIENT_YUM24_VERSION_FAIL_ACTION = \
- Option("YUM24 version fail action",
- cf=('YUM24', 'version_fail_action'),
- default="upgrade")
-CLIENT_YUM24_VERIFY_FAIL_ACTION = \
- Option("YUM24 verify fail action",
- default="reinstall",
- cf=('YUM24', 'verify_fail_action'))
-CLIENT_YUM24_VERIFY_FLAGS = \
- Option("YUM24 verify flags",
- default=[],
- cf=('YUM24', 'verify_flags'),
- cook=list_split)
-CLIENT_YUM24_AUTODEP = \
- Option("YUM24 autodependency processing",
- default=True,
- cf=('YUM24', 'autodep'),
- cook=get_bool)
CLIENT_YUM_PKG_CHECKS = \
Option("Perform YUM package checks",
default=True,
cf=('YUM', 'pkg_checks'),
- deprecated_cf=('YUMng', 'pkg_checks'),
cook=get_bool)
CLIENT_YUM_PKG_VERIFY = \
Option("Perform YUM package verify",
default=True,
cf=('YUM', 'pkg_verify'),
- deprecated_cf=('YUMng', 'pkg_verify'),
cook=get_bool)
CLIENT_YUM_INSTALLED_ACTION = \
Option("YUM installed action",
default="install",
- cf=('YUM', 'installed_action'),
- deprecated_cf=('YUMng', 'installed_action'))
+ cf=('YUM', 'installed_action'))
CLIENT_YUM_VERSION_FAIL_ACTION = \
Option("YUM version fail action",
default="upgrade",
- cf=('YUM', 'version_fail_action'),
- deprecated_cf=('YUMng', 'version_fail_action'))
+ cf=('YUM', 'version_fail_action'))
CLIENT_YUM_VERIFY_FAIL_ACTION = \
Option("YUM verify fail action",
default="reinstall",
- cf=('YUM', 'verify_fail_action'),
- deprecated_cf=('YUMng', 'verify_fail_action'))
+ cf=('YUM', 'verify_fail_action'))
CLIENT_YUM_VERIFY_FLAGS = \
Option("YUM verify flags",
default=[],
cf=('YUM', 'verify_flags'),
- deprecated_cf=('YUMng', 'verify_flags'),
cook=list_split)
CLIENT_POSIX_UID_WHITELIST = \
Option("UID ranges the POSIXUsers tool will manage",
@@ -1204,6 +1131,14 @@ CRYPT_OPTIONS = dict(encrypt=ENCRYPT,
cfg=CRYPT_CFG,
remove=CRYPT_REMOVE)
+PATH_METADATA_OPTIONS = dict(owner=MDATA_OWNER,
+ group=MDATA_GROUP,
+ mode=MDATA_MODE,
+ secontext=MDATA_SECONTEXT,
+ important=MDATA_IMPORTANT,
+ paranoid=MDATA_PARANOID,
+ sensitive=MDATA_SENSITIVE)
+
DRIVER_OPTIONS = \
dict(apt_install_path=CLIENT_APT_TOOLS_INSTALL_PATH,
apt_var_path=CLIENT_APT_TOOLS_VAR_PATH,
@@ -1217,15 +1152,6 @@ DRIVER_OPTIONS = \
rpm_version_fail_action=CLIENT_RPM_VERSION_FAIL_ACTION,
rpm_verify_fail_action=CLIENT_RPM_VERIFY_FAIL_ACTION,
rpm_verify_flags=CLIENT_RPM_VERIFY_FLAGS,
- yum24_installonly=CLIENT_YUM24_INSTALLONLY,
- yum24_pkg_checks=CLIENT_YUM24_PKG_CHECKS,
- yum24_pkg_verify=CLIENT_YUM24_PKG_VERIFY,
- yum24_installed_action=CLIENT_YUM24_INSTALLED_ACTION,
- yum24_erase_flags=CLIENT_YUM24_ERASE_FLAGS,
- yum24_version_fail_action=CLIENT_YUM24_VERSION_FAIL_ACTION,
- yum24_verify_fail_action=CLIENT_YUM24_VERIFY_FAIL_ACTION,
- yum24_verify_flags=CLIENT_YUM24_VERIFY_FLAGS,
- yum24_autodep=CLIENT_YUM24_AUTODEP,
yum_pkg_checks=CLIENT_YUM_PKG_CHECKS,
yum_pkg_verify=CLIENT_YUM_PKG_VERIFY,
yum_installed_action=CLIENT_YUM_INSTALLED_ACTION,
@@ -1305,10 +1231,11 @@ INFO_COMMON_OPTIONS.update(SERVER_COMMON_OPTIONS)
class OptionParser(OptionSet):
- """
- OptionParser bootstraps option parsing,
- getting the value of the config file
- """
+ """ OptionParser bootstraps option parsing, getting the value of
+ the config file. This should only be instantiated by
+ :func:`get_option_parser`, below, not by individual plugins or
+ scripts. """
+
def __init__(self, args, argv=None, quiet=False):
if argv is None:
argv = sys.argv[1:]
@@ -1323,25 +1250,73 @@ class OptionParser(OptionSet):
self.argv = []
self.do_getopt = True
- def reparse(self):
+ def reparse(self, argv=None, do_getopt=None):
""" parse the options again, taking any changes (e.g., to the
config file) into account """
+ self.parse(argv=argv, do_getopt=do_getopt)
+
+ def parse(self, argv=None, do_getopt=None):
for key, opt in self.optinfo.items():
self[key] = opt
- if "args" not in self.optinfo:
+ if "args" not in self.optinfo and "args" in self:
del self['args']
- self.parse(self.argv, self.do_getopt)
-
- def parse(self, argv, do_getopt=True):
- self.argv = argv
- self.do_getopt = do_getopt
- OptionSet.parse(self, self.argv, do_getopt=self.do_getopt)
+ self.argv = argv or sys.argv[1:]
+ if self.do_getopt is None:
+ if do_getopt:
+ self.do_getopt = do_getopt
+ else:
+ self.do_getopt = True
+ if do_getopt is None:
+ do_getopt = self.do_getopt
+ OptionSet.parse(self, self.argv, do_getopt=do_getopt)
def add_option(self, name, opt):
""" Add an option to the parser """
self[name] = opt
self.optinfo[name] = opt
+ def add_options(self, options):
+ """ Add a set of options to the parser """
+ self.update(options)
+ self.optinfo.update(options)
+
def update(self, optdict):
dict.update(self, optdict)
self.optinfo.update(optdict)
+
+
+#: A module-level OptionParser object that all plugins, etc., can use.
+#: This should not be used directly, but retrieved via
+#: :func:`get_option_parser`.
+_PARSER = None
+
+
+def load_option_parser(args, argv=None, quiet=False):
+ """ Load an :class:`Bcfg2.Options.OptionParser` object, caching it
+ in :attr:`_PARSER` for later retrieval via
+ :func:`get_option_parser`.
+
+ :param args: The argument set to parse.
+ :type args: dict of :class:`Bcfg2.Options.Option` objects
+ :param argv: The command-line argument list. If this is not
+ provided, :attr:`sys.argv` will be used.
+ :type argv: list of strings
+ :param quiet: Be quiet when bootstrapping the argument parser.
+ :type quiet: bool
+ :returns: :class:`Bcfg2.Options.OptionParser`
+ """
+ global _PARSER # pylint: disable=W0603
+ _PARSER = OptionParser(args, argv=argv, quiet=quiet)
+ return _PARSER
+
+
+def get_option_parser():
+ """ Get an already-created :class:`Bcfg2.Options.OptionParser` object. If
+ :attr:`_PARSER` has not been populated, then a new OptionParser
+ will be created with basic arguments.
+
+ :returns: :class:`Bcfg2.Options.OptionParser`
+ """
+ if _PARSER is None:
+ return load_option_parser(CLI_COMMON_OPTIONS)
+ return _PARSER
diff --git a/src/lib/Bcfg2/Server/Admin/Compare.py b/src/lib/Bcfg2/Server/Admin/Compare.py
index c56dd0a8f..d0831362c 100644
--- a/src/lib/Bcfg2/Server/Admin/Compare.py
+++ b/src/lib/Bcfg2/Server/Admin/Compare.py
@@ -9,8 +9,8 @@ class Compare(Bcfg2.Server.Admin.Mode):
__usage__ = ("<old> <new>\n\n"
" -r\trecursive")
- def __init__(self, setup):
- Bcfg2.Server.Admin.Mode.__init__(self, setup)
+ def __init__(self):
+ Bcfg2.Server.Admin.Mode.__init__(self)
self.important = {'Path': ['name', 'type', 'owner', 'group', 'mode',
'important', 'paranoid', 'sensitive',
'dev_type', 'major', 'minor', 'prune',
@@ -22,15 +22,14 @@ class Compare(Bcfg2.Server.Admin.Mode):
'Service': ['name', 'type', 'status', 'mode',
'target', 'sequence', 'parameters'],
'Action': ['name', 'timing', 'when', 'status',
- 'command'],
- 'PostInstall': ['name']
+ 'command']
}
def compareStructures(self, new, old):
- if new.tag == 'Independent':
- bundle = 'Base'
- else:
+ if new.get("name"):
bundle = new.get('name')
+ else:
+ bundle = 'Independent'
identical = True
diff --git a/src/lib/Bcfg2/Server/Admin/Init.py b/src/lib/Bcfg2/Server/Admin/Init.py
index 6175d8ed0..870a31480 100644
--- a/src/lib/Bcfg2/Server/Admin/Init.py
+++ b/src/lib/Bcfg2/Server/Admin/Init.py
@@ -8,8 +8,7 @@ import random
import socket
import string
import getpass
-import subprocess
-
+from Bcfg2.Utils import Executor
import Bcfg2.Server.Admin
import Bcfg2.Server.Plugin
import Bcfg2.Options
@@ -104,23 +103,26 @@ def gen_password(length):
def create_key(hostname, keypath, certpath, country, state, location):
"""Creates a bcfg2.key at the directory specifed by keypath."""
- kcstr = ("openssl req -batch -x509 -nodes -subj '/C=%s/ST=%s/L=%s/CN=%s' "
- "-days 1000 -newkey rsa:2048 -keyout %s -noout" % (country,
- state,
- location,
- hostname,
- keypath))
- subprocess.call((kcstr), shell=True)
- ccstr = ("openssl req -batch -new -subj '/C=%s/ST=%s/L=%s/CN=%s' -key %s "
- "| openssl x509 -req -days 1000 -signkey %s -out %s" % (country,
- state,
- location,
- hostname,
- keypath,
- keypath,
- certpath))
- subprocess.call((ccstr), shell=True)
+ cmd = Executor(timeout=120)
+ subject = "/C=%s/ST=%s/L=%s/CN=%s'" % (country, state, location, hostname)
+ key = cmd.run(["openssl", "req", "-batch", "-x509", "-nodes",
+ "-subj", subject, "-days", "1000", "-newkey", "rsa:2048",
+ "-keyout", keypath, "-noout"])
+ if not key.success:
+ print("Error generating key: %s" % key.error)
+ return
os.chmod(keypath, stat.S_IRUSR | stat.S_IWUSR) # 0600
+ csr = cmd.run(["openssl", "req", "-batch", "-new", "-subj", subject,
+ "-key", keypath])
+ if not csr.success:
+ print("Error generating certificate signing request: %s" % csr.error)
+ return
+ cert = cmd.run(["openssl", "x509", "-req", "-days", "1000",
+ "-signkey", keypath, "-out", certpath],
+ inputdata=csr.stdout)
+ if not cert.success:
+ print("Error signing certificate: %s" % cert.error)
+ return
def create_conf(confpath, confdata):
@@ -144,14 +146,9 @@ def create_conf(confpath, confdata):
class Init(Bcfg2.Server.Admin.Mode):
"""Interactively initialize a new repository."""
- options = {'configfile': Bcfg2.Options.CFILE,
- 'plugins': Bcfg2.Options.SERVER_PLUGINS,
- 'proto': Bcfg2.Options.SERVER_PROTOCOL,
- 'repo': Bcfg2.Options.SERVER_REPOSITORY,
- 'sendmail': Bcfg2.Options.SENDMAIL_PATH}
-
- def __init__(self, setup):
- Bcfg2.Server.Admin.Mode.__init__(self, setup)
+
+ def __init__(self):
+ Bcfg2.Server.Admin.Mode.__init__(self)
self.data = dict()
self.plugins = Bcfg2.Options.SERVER_PLUGINS.default
@@ -176,9 +173,16 @@ class Init(Bcfg2.Server.Admin.Mode):
def __call__(self, args):
# Parse options
- opts = Bcfg2.Options.OptionParser(self.options)
- opts.parse(args)
- self._set_defaults(opts)
+ setup = Bcfg2.Options.get_option_parser()
+ setup.add_options(dict(configfile=Bcfg2.Options.CFILE,
+ plugins=Bcfg2.Options.SERVER_PLUGINS,
+ proto=Bcfg2.Options.SERVER_PROTOCOL,
+ repo=Bcfg2.Options.SERVER_REPOSITORY,
+ sendmail=Bcfg2.Options.SENDMAIL_PATH))
+ opts = sys.argv[1:]
+ opts.remove(self.__class__.__name__.lower())
+ setup.reparse(argv=opts)
+ self._set_defaults(setup)
# Prompt the user for input
self._prompt_config()
diff --git a/src/lib/Bcfg2/Server/Admin/Perf.py b/src/lib/Bcfg2/Server/Admin/Perf.py
index f6bc22959..1a772e6fc 100644
--- a/src/lib/Bcfg2/Server/Admin/Perf.py
+++ b/src/lib/Bcfg2/Server/Admin/Perf.py
@@ -2,7 +2,7 @@
import sys
import Bcfg2.Options
-import Bcfg2.Proxy
+import Bcfg2.Client.Proxy
import Bcfg2.Server.Admin
@@ -11,23 +11,24 @@ class Perf(Bcfg2.Server.Admin.Mode):
def __call__(self, args):
output = [('Name', 'Min', 'Max', 'Mean', 'Count')]
- optinfo = {
- 'ca': Bcfg2.Options.CLIENT_CA,
- 'certificate': Bcfg2.Options.CLIENT_CERT,
- 'key': Bcfg2.Options.SERVER_KEY,
- 'password': Bcfg2.Options.SERVER_PASSWORD,
- 'server': Bcfg2.Options.SERVER_LOCATION,
- 'user': Bcfg2.Options.CLIENT_USER,
- 'timeout': Bcfg2.Options.CLIENT_TIMEOUT}
- setup = Bcfg2.Options.OptionParser(optinfo)
- setup.parse(sys.argv[1:])
- proxy = Bcfg2.Proxy.ComponentProxy(setup['server'],
- setup['user'],
- setup['password'],
- key=setup['key'],
- cert=setup['certificate'],
- ca=setup['ca'],
- timeout=setup['timeout'])
+ setup = Bcfg2.Options.get_option_parser()
+ setup.add_options(dict(ca=Bcfg2.Options.CLIENT_CA,
+ certificate=Bcfg2.Options.CLIENT_CERT,
+ key=Bcfg2.Options.SERVER_KEY,
+ password=Bcfg2.Options.SERVER_PASSWORD,
+ server=Bcfg2.Options.SERVER_LOCATION,
+ user=Bcfg2.Options.CLIENT_USER,
+ timeout=Bcfg2.Options.CLIENT_TIMEOUT))
+ opts = sys.argv[1:]
+ opts.remove(self.__class__.__name__.lower())
+ setup.reparse(argv=opts)
+ proxy = Bcfg2.Client.Proxy.ComponentProxy(setup['server'],
+ setup['user'],
+ setup['password'],
+ key=setup['key'],
+ cert=setup['certificate'],
+ ca=setup['ca'],
+ timeout=setup['timeout'])
data = proxy.get_statistics()
for key in sorted(data.keys()):
output.append(
diff --git a/src/lib/Bcfg2/Server/Admin/Pull.py b/src/lib/Bcfg2/Server/Admin/Pull.py
index 8001425df..e883c432f 100644
--- a/src/lib/Bcfg2/Server/Admin/Pull.py
+++ b/src/lib/Bcfg2/Server/Admin/Pull.py
@@ -23,8 +23,8 @@ class Pull(Bcfg2.Server.Admin.MetadataCore):
"-I", "interactive",
"-s", "stdin"))
- def __init__(self, setup):
- Bcfg2.Server.Admin.MetadataCore.__init__(self, setup)
+ def __init__(self):
+ Bcfg2.Server.Admin.MetadataCore.__init__(self)
self.log = False
self.mode = 'interactive'
diff --git a/src/lib/Bcfg2/Server/Admin/Reports.py b/src/lib/Bcfg2/Server/Admin/Reports.py
index 6e313e84b..bb5ee352b 100644
--- a/src/lib/Bcfg2/Server/Admin/Reports.py
+++ b/src/lib/Bcfg2/Server/Admin/Reports.py
@@ -69,8 +69,8 @@ class Reports(Bcfg2.Server.Admin.Mode):
" Django commands:\n " \
+ "\n ".join(django_commands))
- def __init__(self, setup):
- Bcfg2.Server.Admin.Mode.__init__(self, setup)
+ def __init__(self):
+ Bcfg2.Server.Admin.Mode.__init__(self)
try:
import south
except ImportError:
diff --git a/src/lib/Bcfg2/Server/Admin/Snapshots.py b/src/lib/Bcfg2/Server/Admin/Snapshots.py
deleted file mode 100644
index c2d279391..000000000
--- a/src/lib/Bcfg2/Server/Admin/Snapshots.py
+++ /dev/null
@@ -1,163 +0,0 @@
-from datetime import date
-import sys
-
-# Prereq issues can be signaled with ImportError, so no try needed
-import sqlalchemy, sqlalchemy.orm
-import Bcfg2.Server.Admin
-import Bcfg2.Server.Snapshots
-import Bcfg2.Server.Snapshots.model
-from Bcfg2.Server.Snapshots.model import Snapshot, Client, Metadata, Base, \
- File, Group, Package, Service
-# Compatibility import
-from Bcfg2.Compat import u_str
-
-class Snapshots(Bcfg2.Server.Admin.Mode):
- """ Interact with the Snapshots system """
- __usage__ = "[init|query qtype]"
-
- q_dispatch = {'client': Client,
- 'group': Group,
- 'metadata': Metadata,
- 'package': Package,
- 'snapshot': Snapshot}
-
- def __init__(self, setup):
- Bcfg2.Server.Admin.Mode.__init__(self, setup)
- self.session = Bcfg2.Server.Snapshots.setup_session(self.configfile)
- self.cfile = self.configfile
-
- def __call__(self, args):
- Bcfg2.Server.Admin.Mode.__call__(self, args)
- if len(args) == 0 or args[0] == '-h':
- print(self.__usage__)
- raise SystemExit(0)
-
- if args[0] == 'query':
- if args[1] in self.q_dispatch:
- q_obj = self.q_dispatch[args[1]]
- if q_obj == Client:
- rows = []
- labels = ('Client', 'Active')
- for host in \
- self.session.query(q_obj).filter(q_obj.active == False):
- rows.append([host.name, 'No'])
- for host in \
- self.session.query(q_obj).filter(q_obj.active == True):
- rows.append([host.name, 'Yes'])
- self.print_table([labels]+rows,
- justify='left',
- hdr=True,
- vdelim=" ",
- padding=1)
- elif q_obj == Group:
- print("Groups:")
- for group in self.session.query(q_obj).all():
- print(" %s" % group.name)
- else:
- results = self.session.query(q_obj).all()
- else:
- print('error')
- raise SystemExit(1)
- elif args[0] == 'init':
- # Initialize the Snapshots database
- dbpath = Bcfg2.Server.Snapshots.db_from_config(self.cfile)
- engine = sqlalchemy.create_engine(dbpath, echo=True)
- metadata = Base.metadata
- metadata.create_all(engine)
- Session = sqlalchemy.orm.sessionmaker()
- Session.configure(bind=engine)
- session = Session()
- session.commit()
- elif args[0] == 'dump':
- client = args[1]
- snap = Snapshot.get_current(self.session, u_str(client))
- if not snap:
- print("Current snapshot for %s not found" % client)
- sys.exit(1)
- print("Client %s last run at %s" % (client, snap.timestamp))
- for pkg in snap.packages:
- print("C:", pkg.correct, 'M:', pkg.modified)
- print("start", pkg.start.name, pkg.start.version)
- print("end", pkg.end.name, pkg.end.version)
- elif args[0] == 'reports':
- # bcfg2-admin reporting interface for Snapshots
- if '-a' in args[1:]:
- # Query all hosts for Name, Status, Revision, Timestamp
- q = self.session.query(Client.name,
- Snapshot.correct,
- Snapshot.revision,
- Snapshot.timestamp)\
- .filter(Client.id==Snapshot.client_id)\
- .group_by(Client.id)
- rows = []
- labels = ('Client', 'Correct', 'Revision', 'Time')
- for item in q.all():
- cli, cor, time, rev = item
- rows.append([cli, cor, time, rev])
- self.print_table([labels]+rows,
- justify='left',
- hdr=True, vdelim=" ",
- padding=1)
- elif '-b' in args[1:]:
- # Query a single host for bad entries
- if len(args) < 3:
- print("Usage: bcfg2-admin snapshots -b <client>")
- return
- client = args[2]
- snap = Snapshot.get_current(self.session, u_str(client))
- if not snap:
- print("Current snapshot for %s not found" % client)
- sys.exit(1)
- print("Bad entries:")
- bad_pkgs = [self.session.query(Package)
- .filter(Package.id==p.start_id).one().name \
- for p in snap.packages if p.correct == False]
- for p in bad_pkgs:
- print(" Package:%s" % p)
- bad_files = [self.session.query(File)
- .filter(File.id==f.start_id).one().name \
- for f in snap.files if f.correct == False]
- for filename in bad_files:
- print(" File:%s" % filename)
- bad_svcs = [self.session.query(Service)
- .filter(Service.id==s.start_id).one().name \
- for s in snap.services if s.correct == False]
- for svc in bad_svcs:
- print(" Service:%s" % svc)
- elif '-e' in args[1:]:
- # Query a single host for extra entries
- client = args[2]
- snap = Snapshot.get_current(self.session, u_str(client))
- if not snap:
- print("Current snapshot for %s not found" % client)
- sys.exit(1)
- print("Extra entries:")
- for pkg in snap.extra_packages:
- print(" Package:%s" % pkg.name)
- # FIXME: Do we know about extra files yet?
- for f in snap.extra_files:
- print(" File:%s" % f.name)
- for svc in snap.extra_services:
- print(" Service:%s" % svc.name)
- elif '--date' in args[1:]:
- year, month, day = args[2:]
- timestamp = date(int(year), int(month), int(day))
- snaps = []
- for client in self.session.query(Client).filter(Client.active == True):
- snaps.append(Snapshot.get_by_date(self.session,
- client.name,
- timestamp))
- rows = []
- labels = ('Client', 'Correct', 'Revision', 'Time')
- for snap in snaps:
- rows.append([snap.client.name,
- snap.correct,
- snap.revision,
- snap.timestamp])
- self.print_table([labels]+rows,
- justify='left',
- hdr=True,
- vdelim=" ",
- padding=1)
- else:
- print("Unknown options: ", args[1:])
diff --git a/src/lib/Bcfg2/Server/Admin/Syncdb.py b/src/lib/Bcfg2/Server/Admin/Syncdb.py
index 4ba840b86..84ad93ae0 100644
--- a/src/lib/Bcfg2/Server/Admin/Syncdb.py
+++ b/src/lib/Bcfg2/Server/Admin/Syncdb.py
@@ -8,15 +8,17 @@ from django.core.management import setup_environ, call_command
class Syncdb(Bcfg2.Server.Admin.Mode):
""" Sync the Django ORM with the configured database """
- options = {'configfile': Bcfg2.Options.WEB_CFILE}
def __call__(self, args):
# Parse options
- opts = Bcfg2.Options.OptionParser(self.options)
- opts.parse(args)
+ setup = Bcfg2.Options.get_option_parser()
+ setup.add_option("web_configfile", Bcfg2.Options.WEB_CFILE)
+ opts = sys.argv[1:]
+ opts.remove(self.__class__.__name__.lower())
+ setup.reparse(argv=opts)
setup_environ(Bcfg2.settings)
- Bcfg2.Server.models.load_models(cfile=opts['configfile'])
+ Bcfg2.Server.models.load_models(cfile=setup['web_configfile'])
try:
call_command("syncdb", interactive=False, verbosity=0)
diff --git a/src/lib/Bcfg2/Server/Admin/Viz.py b/src/lib/Bcfg2/Server/Admin/Viz.py
index 1d9d25f16..a29fdaceb 100644
--- a/src/lib/Bcfg2/Server/Admin/Viz.py
+++ b/src/lib/Bcfg2/Server/Admin/Viz.py
@@ -1,9 +1,8 @@
""" Produce graphviz diagrams of metadata structures """
import getopt
-from subprocess import Popen, PIPE
-import pipes
import Bcfg2.Server.Admin
+from Bcfg2.Utils import Executor
class Viz(Bcfg2.Server.Admin.MetadataCore):
@@ -29,10 +28,9 @@ class Viz(Bcfg2.Server.Admin.MetadataCore):
'indianred1', 'limegreen', 'orange1', 'lightblue2',
'green1', 'blue1', 'yellow1', 'darkturquoise', 'gray66']
- __plugin_blacklist__ = ['DBStats', 'Snapshots', 'Cfg', 'Pkgmgr',
- 'Packages', 'Rules', 'Account', 'Decisions',
- 'Deps', 'Git', 'Svn', 'Fossil', 'Bzr', 'Bundler',
- 'TGenshi', 'Base']
+ __plugin_blacklist__ = ['DBStats', 'Cfg', 'Pkgmgr',
+ 'Packages', 'Rules', 'Decisions',
+ 'Deps', 'Git', 'Svn', 'Fossil', 'Bzr', 'Bundler']
def __call__(self, args):
# First get options to the 'viz' subcommand
@@ -73,40 +71,34 @@ class Viz(Bcfg2.Server.Admin.MetadataCore):
else:
fmt = 'png'
+ exc = Executor()
cmd = ["dot", "-T", fmt]
if output:
cmd.extend(["-o", output])
+ idata = ["digraph groups {",
+ '\trankdir="LR";',
+ self.metadata.viz(hosts, bundles,
+ key, only_client, self.colors)]
+ if key:
+ idata.extend(
+ ["\tsubgraph cluster_key {",
+ '\tstyle="filled";',
+ '\tcolor="lightblue";',
+ '\tBundle [ shape="septagon" ];',
+ '\tGroup [shape="ellipse"];',
+ '\tProfile [style="bold", shape="ellipse"];',
+ '\tHblock [label="Host1|Host2|Host3",shape="record"];',
+ '\tlabel="Key";',
+ "\t}"])
+ idata.append("}")
try:
- dotpipe = Popen(cmd, stdin=PIPE, stdout=PIPE, close_fds=True)
+ result = exc.run(cmd, inputdata=idata)
except OSError:
# on some systems (RHEL 6), you cannot run dot with
# shell=True. on others (Gentoo with Python 2.7), you
# must. In yet others (RHEL 5), either way works. I have
# no idea what the difference is, but it's kind of a PITA.
- cmd = ["dot", "-T", pipes.quote(fmt)]
- if output:
- cmd.extend(["-o", pipes.quote(output)])
- dotpipe = Popen(cmd, shell=True,
- stdin=PIPE, stdout=PIPE, close_fds=True)
- try:
- dotpipe.stdin.write("digraph groups {\n")
- except:
- print("write to dot process failed. Is graphviz installed?")
- raise SystemExit(1)
- dotpipe.stdin.write('\trankdir="LR";\n')
- dotpipe.stdin.write(self.metadata.viz(hosts, bundles,
- key, only_client, self.colors))
- if key:
- dotpipe.stdin.write("\tsubgraph cluster_key {\n")
- dotpipe.stdin.write('\tstyle="filled";\n')
- dotpipe.stdin.write('\tcolor="lightblue";\n')
- dotpipe.stdin.write('\tBundle [ shape="septagon" ];\n')
- dotpipe.stdin.write('\tGroup [shape="ellipse"];\n')
- dotpipe.stdin.write('\tProfile [style="bold", shape="ellipse"];\n')
- dotpipe.stdin.write('\tHblock [label="Host1|Host2|Host3", '
- 'shape="record"];\n')
- dotpipe.stdin.write('\tlabel="Key";\n')
- dotpipe.stdin.write("\t}\n")
- dotpipe.stdin.write("}\n")
- dotpipe.stdin.close()
- return dotpipe.stdout.read()
+ result = exc.run(cmd, shell=True, inputdata=idata)
+ if not result.success:
+ print("Error running %s: %s" % (cmd, result.error))
+ raise SystemExit(result.retval)
diff --git a/src/lib/Bcfg2/Server/Admin/Xcmd.py b/src/lib/Bcfg2/Server/Admin/Xcmd.py
index be556bed4..ba4777c93 100644
--- a/src/lib/Bcfg2/Server/Admin/Xcmd.py
+++ b/src/lib/Bcfg2/Server/Admin/Xcmd.py
@@ -2,7 +2,7 @@
import sys
import Bcfg2.Options
-import Bcfg2.Proxy
+import Bcfg2.Client.Proxy
import Bcfg2.Server.Admin
from Bcfg2.Compat import xmlrpclib
@@ -12,33 +12,31 @@ class Xcmd(Bcfg2.Server.Admin.Mode):
__usage__ = "<command>"
def __call__(self, args):
- optinfo = {
- 'server': Bcfg2.Options.SERVER_LOCATION,
- 'user': Bcfg2.Options.CLIENT_USER,
- 'password': Bcfg2.Options.SERVER_PASSWORD,
- 'key': Bcfg2.Options.SERVER_KEY,
- 'certificate': Bcfg2.Options.CLIENT_CERT,
- 'ca': Bcfg2.Options.CLIENT_CA,
- 'timeout': Bcfg2.Options.CLIENT_TIMEOUT}
- setup = Bcfg2.Options.OptionParser(optinfo)
- setup.parse(args)
- Bcfg2.Proxy.RetryMethod.max_retries = 1
- proxy = Bcfg2.Proxy.ComponentProxy(setup['server'],
- setup['user'],
- setup['password'],
- key=setup['key'],
- cert=setup['certificate'],
- ca=setup['ca'],
- timeout=setup['timeout'])
+ setup = Bcfg2.Options.get_option_parser()
+ setup.add_options(dict(ca=Bcfg2.Options.CLIENT_CA,
+ certificate=Bcfg2.Options.CLIENT_CERT,
+ key=Bcfg2.Options.SERVER_KEY,
+ password=Bcfg2.Options.SERVER_PASSWORD,
+ server=Bcfg2.Options.SERVER_LOCATION,
+ user=Bcfg2.Options.CLIENT_USER,
+ timeout=Bcfg2.Options.CLIENT_TIMEOUT))
+ opts = sys.argv[1:]
+ opts.remove(self.__class__.__name__.lower())
+ setup.reparse(argv=opts)
+ Bcfg2.Client.Proxy.RetryMethod.max_retries = 1
+ proxy = Bcfg2.Client.Proxy.ComponentProxy(setup['server'],
+ setup['user'],
+ setup['password'],
+ key=setup['key'],
+ cert=setup['certificate'],
+ ca=setup['ca'],
+ timeout=setup['timeout'])
if len(setup['args']) == 0:
print("Usage: xcmd <xmlrpc method> <optional arguments>")
return
- cmd = setup['args'][0]
- args = ()
- if len(setup['args']) > 1:
- args = tuple(setup['args'][1:])
+ cmd = args[0]
try:
- data = getattr(proxy, cmd)(*args)
+ data = getattr(proxy, cmd)(*args[1:])
except xmlrpclib.Fault:
flt = sys.exc_info()[1]
if flt.faultCode == 7:
@@ -48,7 +46,7 @@ class Xcmd(Bcfg2.Server.Admin.Mode):
return
else:
raise
- except Bcfg2.Proxy.ProxyError:
+ except Bcfg2.Client.Proxy.ProxyError:
err = sys.exc_info()[1]
print("Proxy Error: %s" % err)
return
diff --git a/src/lib/Bcfg2/Server/Admin/__init__.py b/src/lib/Bcfg2/Server/Admin/__init__.py
index 8f12a940e..06a419354 100644
--- a/src/lib/Bcfg2/Server/Admin/__init__.py
+++ b/src/lib/Bcfg2/Server/Admin/__init__.py
@@ -18,15 +18,15 @@ class Mode(object):
__usage__ = None
__args__ = []
- def __init__(self, setup):
- self.setup = setup
- self.configfile = setup['configfile']
+ def __init__(self):
+ self.setup = Bcfg2.Options.get_option_parser()
+ self.configfile = self.setup['configfile']
self.__cfp = False
self.log = logging.getLogger('Bcfg2.Server.Admin.Mode')
usage = "bcfg2-admin %s" % self.__class__.__name__.lower()
if self.__usage__ is not None:
usage += " " + self.__usage__
- setup.hm = usage
+ self.setup.hm = usage
def getCFP(self):
""" get a config parser for the Bcfg2 config file """
@@ -112,19 +112,19 @@ class MetadataCore(Mode):
__plugin_whitelist__ = None
__plugin_blacklist__ = None
- def __init__(self, setup):
- Mode.__init__(self, setup)
+ def __init__(self):
+ Mode.__init__(self)
if self.__plugin_whitelist__ is not None:
- setup['plugins'] = [p for p in setup['plugins']
- if p in self.__plugin_whitelist__]
+ self.setup['plugins'] = [p for p in self.setup['plugins']
+ if p in self.__plugin_whitelist__]
elif self.__plugin_blacklist__ is not None:
- setup['plugins'] = [p for p in setup['plugins']
- if p not in self.__plugin_blacklist__]
+ self.setup['plugins'] = [p for p in self.setup['plugins']
+ if p not in self.__plugin_blacklist__]
# admin modes don't need to watch for changes. one shot is fine here.
- setup['filemonitor'] = 'pseudo'
+ self.setup['filemonitor'] = 'pseudo'
try:
- self.bcore = Bcfg2.Server.Core.BaseCore(setup)
+ self.bcore = Bcfg2.Server.Core.BaseCore()
except Bcfg2.Server.Core.CoreInitError:
msg = sys.exc_info()[1]
self.errExit("Core load failed: %s" % msg)
diff --git a/src/lib/Bcfg2/Server/BuiltinCore.py b/src/lib/Bcfg2/Server/BuiltinCore.py
index e69a92b64..b05ad9d41 100644
--- a/src/lib/Bcfg2/Server/BuiltinCore.py
+++ b/src/lib/Bcfg2/Server/BuiltinCore.py
@@ -4,10 +4,10 @@ import sys
import time
import socket
import daemon
-import Bcfg2.Statistics
+import Bcfg2.Server.Statistics
from Bcfg2.Server.Core import BaseCore, NoExposedMethod
from Bcfg2.Compat import xmlrpclib, urlparse
-from Bcfg2.SSLServer import XMLRPCServer
+from Bcfg2.Server.SSLServer import XMLRPCServer
from lockfile import LockFailed, LockTimeout
# pylint: disable=E0611
@@ -22,11 +22,11 @@ class Core(BaseCore):
""" The built-in server core """
name = 'bcfg2-server'
- def __init__(self, setup):
- BaseCore.__init__(self, setup)
+ def __init__(self):
+ BaseCore.__init__(self)
- #: The :class:`Bcfg2.SSLServer.XMLRPCServer` instance powering
- #: this server core
+ #: The :class:`Bcfg2.Server.SSLServer.XMLRPCServer` instance
+ #: powering this server core
self.server = None
daemon_args = dict(uid=self.setup['daemon_uid'],
@@ -69,8 +69,9 @@ class Core(BaseCore):
try:
return method_func(*args)
finally:
- Bcfg2.Statistics.stats.add_value(method,
- time.time() - method_start)
+ Bcfg2.Server.Statistics.stats.add_value(
+ method,
+ time.time() - method_start)
except xmlrpclib.Fault:
raise
except Exception:
diff --git a/src/lib/Bcfg2/Cache.py b/src/lib/Bcfg2/Server/Cache.py
index 842098eda..842098eda 100644
--- a/src/lib/Bcfg2/Cache.py
+++ b/src/lib/Bcfg2/Server/Cache.py
diff --git a/src/lib/Bcfg2/Server/CherryPyCore.py b/src/lib/Bcfg2/Server/CherryPyCore.py
index d097fd08f..bf3be72f9 100644
--- a/src/lib/Bcfg2/Server/CherryPyCore.py
+++ b/src/lib/Bcfg2/Server/CherryPyCore.py
@@ -3,7 +3,7 @@ server. """
import sys
import time
-import Bcfg2.Statistics
+import Bcfg2.Server.Statistics
from Bcfg2.Compat import urlparse, xmlrpclib, b64decode
from Bcfg2.Server.Core import BaseCore
import cherrypy
@@ -36,8 +36,8 @@ class Core(BaseCore):
_cp_config = {'tools.xmlrpc_error.on': True,
'tools.bcfg2_authn.on': True}
- def __init__(self, setup):
- BaseCore.__init__(self, setup)
+ def __init__(self):
+ BaseCore.__init__(self)
cherrypy.tools.bcfg2_authn = cherrypy.Tool('on_start_resource',
self.do_authn)
@@ -65,8 +65,15 @@ class Core(BaseCore):
# FIXME: Get client cert
cert = None
- address = (cherrypy.request.remote.ip, cherrypy.request.remote.name)
- return self.authenticate(cert, username, password, address)
+ address = (cherrypy.request.remote.ip, cherrypy.request.remote.port)
+
+ rpcmethod = xmlrpcutil.process_body()[1]
+ if rpcmethod == 'ERRORMETHOD':
+ raise Exception("Unknown error processing XML-RPC request body")
+
+ if (not self.check_acls(address[0], rpcmethod) or
+ not self.authenticate(cert, username, password, address)):
+ raise cherrypy.HTTPError(401)
@cherrypy.expose
def default(self, *args, **params): # pylint: disable=W0613
@@ -96,8 +103,8 @@ class Core(BaseCore):
try:
body = handler(*rpcparams, **params)
finally:
- Bcfg2.Statistics.stats.add_value(rpcmethod,
- time.time() - method_start)
+ Bcfg2.Server.Statistics.stats.add_value(rpcmethod,
+ time.time() - method_start)
xmlrpcutil.respond(body, 'utf-8', True)
return cherrypy.serving.response.body
diff --git a/src/lib/Bcfg2/Server/Core.py b/src/lib/Bcfg2/Server/Core.py
index c246860c1..8ef9e3e96 100644
--- a/src/lib/Bcfg2/Server/Core.py
+++ b/src/lib/Bcfg2/Server/Core.py
@@ -11,13 +11,14 @@ import logging
import inspect
import threading
import lxml.etree
-import Bcfg2.settings
import Bcfg2.Server
import Bcfg2.Logger
+import Bcfg2.settings
+import Bcfg2.Server.Statistics
import Bcfg2.Server.FileMonitor
-from Bcfg2.Cache import Cache
-import Bcfg2.Statistics
from itertools import chain
+from Bcfg2.Server.Cache import Cache
+from Bcfg2.Options import get_option_parser, SERVER_FAM_IGNORE
from Bcfg2.Compat import xmlrpclib # pylint: disable=W0622
from Bcfg2.Server.Plugin.exceptions import * # pylint: disable=W0401,W0614
from Bcfg2.Server.Plugin.interfaces import * # pylint: disable=W0401,W0614
@@ -87,11 +88,8 @@ class BaseCore(object):
and modules. All core implementations must inherit from
``BaseCore``. """
- def __init__(self, setup): # pylint: disable=R0912,R0915
+ def __init__(self): # pylint: disable=R0912,R0915
"""
- :param setup: A Bcfg2 options dict
- :type setup: Bcfg2.Options.OptionParser
-
.. automethod:: _daemonize
.. automethod:: _run
.. automethod:: _block
@@ -99,10 +97,15 @@ class BaseCore(object):
.. automethod:: _file_monitor_thread
.. automethod:: _perflog_thread
"""
+ #: The Bcfg2 options dict
+ self.setup = get_option_parser()
+
#: The Bcfg2 repository directory
- self.datastore = setup['repo']
+ self.datastore = self.setup['repo']
- if setup['verbose']:
+ if self.setup['debug']:
+ level = logging.DEBUG
+ elif self.setup['verbose']:
level = logging.INFO
else:
level = logging.WARNING
@@ -113,8 +116,8 @@ class BaseCore(object):
# setup_logging and the console will get DEBUG output.
Bcfg2.Logger.setup_logging('bcfg2-server',
to_console=logging.INFO,
- to_syslog=setup['syslog'],
- to_file=setup['logging'],
+ to_syslog=self.setup['syslog'],
+ to_file=self.setup['logging'],
level=level)
#: A :class:`logging.Logger` object for use by the core
@@ -136,34 +139,34 @@ class BaseCore(object):
# enable debugging on the core now. debugging is enabled on
# everything else later
- if setup['debug']:
- self.set_core_debug(None, setup['debug'])
+ if self.setup['debug']:
+ self.set_core_debug(None, self.setup['debug'])
- try:
- filemonitor = \
- Bcfg2.Server.FileMonitor.available[setup['filemonitor']]
- except KeyError:
+ if 'ignore' not in self.setup:
+ self.setup.add_option('ignore', SERVER_FAM_IGNORE)
+ self.setup.reparse()
+
+ famargs = dict(filemonitor=self.setup['filemonitor'],
+ debug=self.setup['debug'],
+ ignore=self.setup['ignore'])
+ if self.setup['filemonitor'] not in Bcfg2.Server.FileMonitor.available:
self.logger.error("File monitor driver %s not available; "
- "forcing to default" % setup['filemonitor'])
- filemonitor = Bcfg2.Server.FileMonitor.available['default']
- famargs = dict(ignore=[], debug=False)
- if 'ignore' in setup:
- famargs['ignore'] = setup['ignore']
- if 'debug' in setup:
- famargs['debug'] = setup['debug']
+ "forcing to default" % self.setup['filemonitor'])
+ famargs['filemonitor'] = 'default'
try:
#: The :class:`Bcfg2.Server.FileMonitor.FileMonitor`
#: object used by the core to monitor for Bcfg2 data
#: changes.
- self.fam = filemonitor(**famargs)
+ self.fam = Bcfg2.Server.FileMonitor.load_fam(**famargs)
except IOError:
- msg = "Failed to instantiate fam driver %s" % setup['filemonitor']
+ msg = "Failed to instantiate fam driver %s" % \
+ self.setup['filemonitor']
self.logger.error(msg, exc_info=1)
raise CoreInitError(msg)
#: Path to bcfg2.conf
- self.cfile = setup['configfile']
+ self.cfile = self.setup['configfile']
#: Dict of plugins that are enabled. Keys are the plugin
#: names (just the plugin name, in the correct case; e.g.,
@@ -184,9 +187,6 @@ class BaseCore(object):
#: :class:`Bcfg2.Server.Plugin.interfaces.Version` plugin.
self.revision = '-1'
- #: The Bcfg2 options dict
- self.setup = setup
-
atexit.register(self.shutdown)
#: Threading event to signal worker threads (e.g.,
@@ -241,7 +241,7 @@ class BaseCore(object):
"at %s: %s" % (db_settings['NAME'], err))
#: The CA that signed the server cert
- self.ca = setup['ca']
+ self.ca = self.setup['ca']
def hdlr(sig, frame): # pylint: disable=W0613
""" Handle SIGINT/Ctrl-C by shutting down the core and exiting
@@ -254,7 +254,7 @@ class BaseCore(object):
#: The FAM :class:`threading.Thread`,
#: :func:`_file_monitor_thread`
self.fam_thread = \
- threading.Thread(name="%sFAMThread" % setup['filemonitor'],
+ threading.Thread(name="%sFAMThread" % self.setup['filemonitor'],
target=self._file_monitor_thread)
self.perflog_thread = None
@@ -267,7 +267,7 @@ class BaseCore(object):
#: :func:`Bcfg2.Server.FileMonitor.FileMonitor.handle_event_set`
self.lock = threading.Lock()
- #: A :class:`Bcfg2.Cache.Cache` object for caching client
+ #: A :class:`Bcfg2.Server.Cache.Cache` object for caching client
#: metadata
self.metadata_cache = Cache()
@@ -324,7 +324,7 @@ class BaseCore(object):
self._update_vcs_revision()
self.logger.debug("File monitor thread terminated")
- @track_statistics()
+ @Bcfg2.Server.Statistics.track_statistics()
def _update_vcs_revision(self):
""" Update the revision of the current configuration on-disk
from the VCS plugin """
@@ -361,17 +361,18 @@ class BaseCore(object):
for plug in blacklist:
del self.plugins[plug]
- # Log experimental plugins
- expl = [plug for plug in list(self.plugins.values())
- if plug.experimental]
+ # Log deprecated and experimental plugins
+ expl = []
+ depr = []
+ for plug in list(self.plugins.values()):
+ if plug.experimental:
+ expl.append(plug)
+ if plug.deprecated:
+ depr.append(plug)
if expl:
self.logger.info("Loading experimental plugin(s): %s" %
(" ".join([x.name for x in expl])))
self.logger.info("NOTE: Interfaces subject to change")
-
- # Log deprecated plugins
- depr = [plug for plug in list(self.plugins.values())
- if plug.deprecated]
if depr:
self.logger.info("Loading deprecated plugin(s): %s" %
(" ".join([x.name for x in depr])))
@@ -452,8 +453,10 @@ class BaseCore(object):
""" Get the client :attr:`metadata_cache` mode. Options are
off, initial, cautious, aggressive, on (synonym for
cautious). See :ref:`server-caching` for more details. """
+ # pylint: disable=E1103
mode = self.setup.cfp.get("caching", "client_metadata",
default="off").lower()
+ # pylint: enable=E1103
if mode == "on":
return "cautious"
else:
@@ -490,11 +493,12 @@ class BaseCore(object):
self.logger.error("%s: Error invoking hook %s: %s" %
(plugin, hook, err))
finally:
- Bcfg2.Statistics.stats.add_value("%s:client_run_hook:%s" %
- (self.__class__.__name__, hook),
- time.time() - start)
+ Bcfg2.Server.Statistics.stats.add_value(
+ "%s:client_run_hook:%s" %
+ (self.__class__.__name__, hook),
+ time.time() - start)
- @track_statistics()
+ @Bcfg2.Server.Statistics.track_statistics()
def validate_structures(self, metadata, data):
""" Checks the data structures by calling the
:func:`Bcfg2.Server.Plugin.interfaces.StructureValidator.validate_structures`
@@ -521,7 +525,7 @@ class BaseCore(object):
self.logger.error("Plugin %s: unexpected structure validation "
"failure" % plugin.name, exc_info=1)
- @track_statistics()
+ @Bcfg2.Server.Statistics.track_statistics()
def validate_goals(self, metadata, data):
""" Checks that the config matches the goals enforced by
:class:`Bcfg2.Server.Plugin.interfaces.GoalValidator` plugins
@@ -547,7 +551,7 @@ class BaseCore(object):
self.logger.error("Plugin %s: unexpected goal validation "
"failure" % plugin.name, exc_info=1)
- @track_statistics()
+ @Bcfg2.Server.Statistics.track_statistics()
def GetStructures(self, metadata):
""" Get all structures (i.e., bundles) for the given client
@@ -566,7 +570,7 @@ class BaseCore(object):
(metadata.hostname, ':'.join(missing)))
return structures
- @track_statistics()
+ @Bcfg2.Server.Statistics.track_statistics()
def BindStructures(self, structures, metadata, config):
""" Given a list of structures (i.e. bundles), bind all the
entries in them and add the structures to the config.
@@ -587,7 +591,7 @@ class BaseCore(object):
except:
self.logger.error("error in BindStructure", exc_info=1)
- @track_statistics()
+ @Bcfg2.Server.Statistics.track_statistics()
def BindStructure(self, structure, metadata):
""" Bind all elements in a single structure (i.e., bundle).
@@ -662,10 +666,10 @@ class BaseCore(object):
raise PluginExecutionError("No matching generator: %s:%s" %
(entry.tag, entry.get('name')))
finally:
- Bcfg2.Statistics.stats.add_value("%s:Bind:%s" %
- (self.__class__.__name__,
- entry.tag),
- time.time() - start)
+ Bcfg2.Server.Statistics.stats.add_value("%s:Bind:%s" %
+ (self.__class__.__name__,
+ entry.tag),
+ time.time() - start)
def BuildConfiguration(self, client):
""" Build the complete configuration for a client.
@@ -824,7 +828,49 @@ class BaseCore(object):
% plugin.name, exc_info=1)
return result
- @track_statistics()
+ @Bcfg2.Server.Statistics.track_statistics()
+ def check_acls(self, address, rmi):
+ """ Check client IP address and metadata object against all
+ :class:`Bcfg2.Server.Plugin.interfaces.ClientACLs` plugins.
+ If any ACL plugin denies access, then access is denied. ACLs
+ are checked in two phases: First, with the client IP address;
+ and second, with the client metadata object. This lets an ACL
+ interface do a quick rejection based on IP before metadata is
+ ever built.
+
+ :param address: The address pair of the client to check ACLs for
+ :type address: tuple of (<ip address>, <port>)
+ :param rmi: The fully-qualified name of the RPC call
+ :param rmi: string
+ :returns: bool
+ """
+ plugins = self.plugins_by_type(Bcfg2.Server.Plugin.ClientACLs)
+ try:
+ ip_checks = [p.check_acl_ip(address, rmi) for p in plugins]
+ except:
+ self.logger.error("Unexpected error checking ACLs for %s for %s: "
+ "%s" % (address[0], rmi, sys.exc_info()[1]))
+ return False # failsafe
+
+ if all(ip_checks):
+ # if all ACL plugins return True (allow), then allow
+ return True
+ elif False in ip_checks:
+ # if any ACL plugin returned False (deny), then deny
+ return False
+ # else, no plugins returned False, but not all plugins
+ # returned True, so some plugin returned None (defer), so
+ # defer.
+
+ client, metadata = self.resolve_client(address)
+ try:
+ return all(p.check_acl_metadata(metadata, rmi) for p in plugins)
+ except:
+ self.logger.error("Unexpected error checking ACLs for %s for %s: "
+ "%s" % (client, rmi, sys.exc_info()[1]))
+ return False # failsafe
+
+ @Bcfg2.Server.Statistics.track_statistics()
def build_metadata(self, client_name):
""" Build initial client metadata for a client
@@ -888,7 +934,7 @@ class BaseCore(object):
:param address: The address pair of the client to get the
canonical hostname for.
- :type address: tuple of (<ip address>, <hostname>)
+ :type address: tuple of (<ip address>, <port>)
:param cleanup_cache: Tell the
:class:`Bcfg2.Server.Plugin.interfaces.Metadata`
plugin in :attr:`metadata` to clean up
@@ -967,21 +1013,23 @@ class BaseCore(object):
def listMethods(self, address): # pylint: disable=W0613
""" List all exposed methods, including plugin RMI.
- :param address: Client (address, hostname) pair
+ :param address: Client (address, port) pair
:type address: tuple
:returns: list of exposed method names
"""
methods = [name
for name, func in inspect.getmembers(self, callable)
- if getattr(func, "exposed", False)]
- methods.extend(self._get_rmi().keys())
+ if (getattr(func, "exposed", False) and
+ self.check_acls(address, name))]
+ methods.extend([m for m in self._get_rmi().keys()
+ if self.check_acls(address, m)])
return methods
@exposed
def methodHelp(self, address, method_name): # pylint: disable=W0613
""" Get help from the docstring of an exposed method
- :param address: Client (address, hostname) pair
+ :param address: Client (address, port) pair
:type address: tuple
:param method_name: The name of the method to get help on
:type method_name: string
@@ -998,7 +1046,7 @@ class BaseCore(object):
def DeclareVersion(self, address, version):
""" Declare the client version.
- :param address: Client (address, hostname) pair
+ :param address: Client (address, port) pair
:type address: tuple
:param version: The client's declared version
:type version: string
@@ -1020,7 +1068,7 @@ class BaseCore(object):
def GetProbes(self, address):
""" Fetch probes for the client.
- :param address: Client (address, hostname) pair
+ :param address: Client (address, port) pair
:type address: tuple
:returns: lxml.etree._Element - XML tree describing probes for
this client
@@ -1044,7 +1092,7 @@ class BaseCore(object):
def RecvProbeData(self, address, probedata):
""" Receive probe data from clients.
- :param address: Client (address, hostname) pair
+ :param address: Client (address, port) pair
:type address: tuple
:returns: bool - True on success
:raises: :exc:`xmlrpclib.Fault`
@@ -1091,7 +1139,7 @@ class BaseCore(object):
def AssertProfile(self, address, profile):
""" Set profile for a client.
- :param address: Client (address, hostname) pair
+ :param address: Client (address, port) pair
:type address: tuple
:returns: bool - True on success
:raises: :exc:`xmlrpclib.Fault`
@@ -1111,7 +1159,7 @@ class BaseCore(object):
""" Build config for a client by calling
:func:`BuildConfiguration`.
- :param address: Client (address, hostname) pair
+ :param address: Client (address, port) pair
:type address: tuple
:returns: lxml.etree._Element - The full configuration
document for the client
@@ -1129,7 +1177,7 @@ class BaseCore(object):
def RecvStats(self, address, stats):
""" Act on statistics upload with :func:`process_statistics`.
- :param address: Client (address, hostname) pair
+ :param address: Client (address, port) pair
:type address: tuple
:returns: bool - True on success
:raises: :exc:`xmlrpclib.Fault`
@@ -1150,7 +1198,7 @@ class BaseCore(object):
:type user: string
:param password: The password supplied by the client
:type password: string
- :param address: An address pair of ``(<ip address>, <hostname>)``
+ :param address: An address pair of ``(<ip address>, <port>)``
:type address: tuple
:return: bool - True if the authenticate succeeds, False otherwise
"""
@@ -1162,11 +1210,19 @@ class BaseCore(object):
return self.metadata.AuthenticateConnection(acert, user, password,
address)
+ def check_acls(self, client_ip):
+ """ Check if client IP is in list of accepted IPs """
+ try:
+ return self.plugins['Acl'].config.check_acl(client_ip)
+ except KeyError:
+ # No ACL means accept all incoming ips
+ return True
+
@exposed
def GetDecisionList(self, address, mode):
""" Get the decision list for the client with :func:`GetDecisions`.
- :param address: Client (address, hostname) pair
+ :param address: Client (address, port) pair
:type address: tuple
:returns: list of decision tuples
:raises: :exc:`xmlrpclib.Fault`
@@ -1183,17 +1239,17 @@ class BaseCore(object):
@exposed
def get_statistics(self, _):
""" Get current statistics about component execution from
- :attr:`Bcfg2.Statistics.stats`.
+ :attr:`Bcfg2.Server.Statistics.stats`.
:returns: dict - The statistics data as returned by
- :func:`Bcfg2.Statistics.Statistics.display` """
- return Bcfg2.Statistics.stats.display()
+ :func:`Bcfg2.Server.Statistics.Statistics.display` """
+ return Bcfg2.Server.Statistics.stats.display()
@exposed
def toggle_debug(self, address):
""" Toggle debug status of the FAM and all plugins
- :param address: Client (address, hostname) pair
+ :param address: Client (address, port) pair
:type address: tuple
:returns: bool - The new debug state of the FAM
"""
diff --git a/src/lib/Bcfg2/Encryption.py b/src/lib/Bcfg2/Server/Encryption.py
index b4674d72f..797b44ab9 100755
--- a/src/lib/Bcfg2/Encryption.py
+++ b/src/lib/Bcfg2/Server/Encryption.py
@@ -1,8 +1,9 @@
-""" Bcfg2.Encryption provides a number of convenience methods for
-handling encryption in Bcfg2. See :ref:`server-encryption` for more
-details. """
+""" Bcfg2.Server.Encryption provides a number of convenience methods
+for handling encryption in Bcfg2. See :ref:`server-encryption` for
+more details. """
import os
+import Bcfg2.Options
from M2Crypto import Rand
from M2Crypto.EVP import Cipher, EVPError
from Bcfg2.Compat import StringIO, md5, b64encode, b64decode
@@ -17,13 +18,6 @@ ENCRYPT = 1
#: makes our code more readable.
DECRYPT = 0
-#: Default cipher algorithm. To get a full list of valid algorithms,
-#: you can run::
-#:
-#: openssl list-cipher-algorithms | grep -v ' => ' | \
-#: tr 'A-Z-' 'a-z_' | sort -u
-ALGORITHM = "aes_256_cbc"
-
#: Default initialization vector. For best security, you should use a
#: unique IV for each message. :func:`ssl_encrypt` does this in an
#: automated fashion.
@@ -39,6 +33,17 @@ CFG_ALGORITHM = "algorithm"
#: The config option used to store the decryption strictness
CFG_DECRYPT = "decrypt"
+#: Default cipher algorithm. To get a full list of valid algorithms,
+#: you can run::
+#:
+#: openssl list-cipher-algorithms | grep -v ' => ' | \
+#: tr 'A-Z-' 'a-z_' | sort -u
+ALGORITHM = Bcfg2.Options.get_option_parser().cfp.get( # pylint: disable=E1103
+ CFG_SECTION,
+ CFG_ALGORITHM,
+ default="aes_256_cbc").lower().replace("-", "_")
+
+
Rand.rand_seed(os.urandom(1024))
@@ -116,11 +121,11 @@ def ssl_decrypt(data, passwd, algorithm=ALGORITHM):
# base64-decode the data
data = b64decode(data)
salt = data[8:16]
- # pylint: disable=E1101
+ # pylint: disable=E1101,E1121
hashes = [md5(passwd + salt).digest()]
for i in range(1, 3):
hashes.append(md5(hashes[i - 1] + passwd + salt).digest())
- # pylint: enable=E1101
+ # pylint: enable=E1101,E1121
key = hashes[0] + hashes[1]
iv = hashes[2]
@@ -146,11 +151,11 @@ def ssl_encrypt(plaintext, passwd, algorithm=ALGORITHM, salt=None):
if salt is None:
salt = Rand.rand_bytes(8)
- # pylint: disable=E1101
+ # pylint: disable=E1101,E1121
hashes = [md5(passwd + salt).digest()]
for i in range(1, 3):
hashes.append(md5(hashes[i - 1] + passwd + salt).digest())
- # pylint: enable=E1101
+ # pylint: enable=E1101,E1121
key = hashes[0] + hashes[1]
iv = hashes[2]
@@ -159,58 +164,37 @@ def ssl_encrypt(plaintext, passwd, algorithm=ALGORITHM, salt=None):
return b64encode("Salted__" + salt + crypted) + "\n"
-def get_algorithm(setup):
- """ Get the cipher algorithm from the config file. This is used
- in case someone uses the OpenSSL algorithm name (e.g.,
- "AES-256-CBC") instead of the M2Crypto name (e.g., "aes_256_cbc"),
- and to handle errors in a sensible way and deduplicate this code.
-
- :param setup: The Bcfg2 option set to extract passphrases from
- :type setup: Bcfg2.Options.OptionParser
- :returns: dict - a dict of ``<passphrase name>``: ``<passphrase>``
- """
- return setup.cfp.get(CFG_SECTION, CFG_ALGORITHM,
- default=ALGORITHM).lower().replace("-", "_")
-
-
-def get_passphrases(setup):
+def get_passphrases():
""" Get all candidate encryption passphrases from the config file.
- :param setup: The Bcfg2 option set to extract passphrases from
- :type setup: Bcfg2.Options.OptionParser
:returns: dict - a dict of ``<passphrase name>``: ``<passphrase>``
"""
- section = CFG_SECTION
- if setup.cfp.has_section(section):
- return dict([(o, setup.cfp.get(section, o))
- for o in setup.cfp.options(section)
+ setup = Bcfg2.Options.get_option_parser()
+ if setup.cfp.has_section(CFG_SECTION):
+ return dict([(o, setup.cfp.get(CFG_SECTION, o))
+ for o in setup.cfp.options(CFG_SECTION)
if o not in [CFG_ALGORITHM, CFG_DECRYPT]])
else:
return dict()
-def bruteforce_decrypt(crypted, passphrases=None, setup=None,
- algorithm=ALGORITHM):
+def bruteforce_decrypt(crypted, passphrases=None, algorithm=ALGORITHM):
""" Convenience method to decrypt the given encrypted string by
trying the given passphrases or all passphrases (as returned by
:func:`get_passphrases`) sequentially until one is found that
works.
- Either ``passphrases`` or ``setup`` must be provided.
-
:param crypted: The data to decrypt
:type crypted: string
:param passphrases: The passphrases to try.
:type passphrases: list
- :param setup: A Bcfg2 option set to extract passphrases from
- :type setup: Bcfg2.Options.OptionParser
:param algorithm: The cipher algorithm to use
:type algorithm: string
:returns: string - The decrypted data
:raises: :class:`M2Crypto.EVP.EVPError`, if the data cannot be decrypted
"""
if passphrases is None:
- passphrases = get_passphrases(setup).values()
+ passphrases = get_passphrases().values()
for passwd in passphrases:
try:
return ssl_decrypt(crypted, passwd, algorithm=algorithm)
diff --git a/src/lib/Bcfg2/Server/FileMonitor/Fam.py b/src/lib/Bcfg2/Server/FileMonitor/Fam.py
deleted file mode 100644
index 09d41038e..000000000
--- a/src/lib/Bcfg2/Server/FileMonitor/Fam.py
+++ /dev/null
@@ -1,105 +0,0 @@
-""" File monitor backend with support for the `File Alteration Monitor
-<http://oss.sgi.com/projects/fam/>`_. The FAM backend is deprecated. """
-
-import os
-import _fam
-import stat
-import logging
-from time import time
-from Bcfg2.Server.FileMonitor import FileMonitor
-
-LOGGER = logging.getLogger(__name__)
-
-
-class Fam(FileMonitor):
- """ **Deprecated** file monitor backend with support for the `File
- Alteration Monitor <http://oss.sgi.com/projects/fam/>`_ (also
- abbreviated "FAM")."""
-
- #: FAM is the worst actual monitor backend, so give it a low
- #: priority.
- __priority__ = 10
-
- def __init__(self, ignore=None, debug=False):
- FileMonitor.__init__(self, ignore=ignore, debug=debug)
- self.filemonitor = _fam.open()
- self.users = {}
- LOGGER.warning("The Fam file monitor backend is deprecated. Please "
- "switch to a supported file monitor.")
- __init__.__doc__ = FileMonitor.__init__.__doc__
-
- def fileno(self):
- return self.filemonitor.fileno()
- fileno.__doc__ = FileMonitor.fileno.__doc__
-
- def handle_event_set(self, _=None):
- self.Service()
- handle_event_set.__doc__ = FileMonitor.handle_event_set.__doc__
-
- def handle_events_in_interval(self, interval):
- now = time()
- while (time() - now) < interval:
- if self.Service():
- now = time()
- handle_events_in_interval.__doc__ = \
- FileMonitor.handle_events_in_interval.__doc__
-
- def AddMonitor(self, path, obj, _=None):
- mode = os.stat(path)[stat.ST_MODE]
- if stat.S_ISDIR(mode):
- handle = self.filemonitor.monitorDirectory(path, None)
- else:
- handle = self.filemonitor.monitorFile(path, None)
- self.handles[handle.requestID()] = handle
- if obj is not None:
- self.users[handle.requestID()] = obj
- return handle.requestID()
- AddMonitor.__doc__ = FileMonitor.AddMonitor.__doc__
-
- def Service(self, interval=0.50):
- """ Handle events for the specified period of time (in
- seconds). This call will block for ``interval`` seconds.
-
- :param interval: The interval, in seconds, during which events
- should be handled. Any events that are
- already pending when :func:`Service` is
- called will also be handled.
- :type interval: int
- :returns: None
- """
- count = 0
- collapsed = 0
- rawevents = []
- start = time()
- now = time()
- while (time() - now) < interval:
- if self.filemonitor.pending():
- while self.filemonitor.pending():
- count += 1
- rawevents.append(self.filemonitor.nextEvent())
- now = time()
- unique = []
- bookkeeping = []
- for event in rawevents:
- if self.should_ignore(event):
- continue
- if event.code2str() != 'changed':
- # process all non-change events
- unique.append(event)
- else:
- if (event.filename, event.requestID) not in bookkeeping:
- bookkeeping.append((event.filename, event.requestID))
- unique.append(event)
- else:
- collapsed += 1
- for event in unique:
- if event.requestID in self.users:
- try:
- self.users[event.requestID].HandleEvent(event)
- except: # pylint: disable=W0702
- LOGGER.error("Handling event for file %s" % event.filename,
- exc_info=1)
- end = time()
- LOGGER.info("Processed %s fam events in %03.03f seconds. "
- "%s coalesced" % (count, (end - start), collapsed))
- return count
diff --git a/src/lib/Bcfg2/Server/FileMonitor/__init__.py b/src/lib/Bcfg2/Server/FileMonitor/__init__.py
index e430e3160..522ddb705 100644
--- a/src/lib/Bcfg2/Server/FileMonitor/__init__.py
+++ b/src/lib/Bcfg2/Server/FileMonitor/__init__.py
@@ -325,6 +325,45 @@ class FileMonitor(Debuggable):
return rv
+#: A module-level FAM object that all plugins, etc., can use. This
+#: should not be used directly, but retrieved via :func:`get_fam`.
+_FAM = None
+
+
+def load_fam(filemonitor='default', ignore=None, debug=False):
+ """ Load a new :class:`Bcfg2.Server.FileMonitor.FileMonitor`
+ object, caching it in :attr:`_FAM` for later retrieval via
+ :func:`get_fam`.
+
+ :param filemonitor: Which filemonitor backend to use
+ :type filemonitor: string
+ :param ignore: A list of filenames to ignore
+ :type ignore: list of strings (filename globs)
+ :param debug: Produce debugging information
+ :type debug: bool
+ :returns: :class:`Bcfg2.Server.FileMonitor.FileMonitor`
+ """
+ global _FAM # pylint: disable=W0603
+ if _FAM is None:
+ if ignore is None:
+ ignore = []
+ _FAM = available[filemonitor](ignore=ignore, debug=debug)
+ return _FAM
+
+
+def get_fam():
+ """ Get an already-created
+ :class:`Bcfg2.Server.FileMonitor.FileMonitor` object. If
+ :attr:`_FAM` has not been populated, then a new default
+ FileMonitor will be created.
+
+ :returns: :class:`Bcfg2.Server.FileMonitor.FileMonitor`
+ """
+ if _FAM is None:
+ return load_fam('default')
+ return _FAM
+
+
#: A dict of all available FAM backends. Keys are the human-readable
#: names of the backends, which are used in bcfg2.conf to select a
#: backend; values are the backend classes. In addition, the
@@ -337,12 +376,6 @@ from Bcfg2.Server.FileMonitor.Pseudo import Pseudo
available['pseudo'] = Pseudo
try:
- from Bcfg2.Server.FileMonitor.Fam import Fam
- available['fam'] = Fam
-except ImportError:
- pass
-
-try:
from Bcfg2.Server.FileMonitor.Gamin import Gamin
available['gamin'] = Gamin
except ImportError:
diff --git a/src/lib/Bcfg2/Server/Hostbase/.gitignore b/src/lib/Bcfg2/Server/Hostbase/.gitignore
deleted file mode 100644
index 8e15b5395..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-*.pyc
-dev.db
-bcfg2.conf
diff --git a/src/lib/Bcfg2/Server/Hostbase/__init__.py b/src/lib/Bcfg2/Server/Hostbase/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/__init__.py
+++ /dev/null
diff --git a/src/lib/Bcfg2/Server/Hostbase/backends.py b/src/lib/Bcfg2/Server/Hostbase/backends.py
deleted file mode 100644
index cfa9e1e16..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/backends.py
+++ /dev/null
@@ -1,63 +0,0 @@
-from django.contrib.auth.models import User
-#from ldapauth import *
-from nisauth import *
-
-## class LDAPBackend(object):
-
-## def authenticate(self,username=None,password=None):
-## try:
-
-## l = ldapauth(username,password)
-## temp_pass = User.objects.make_random_password(100)
-## ldap_user = dict(username=l.sAMAccountName,
-## )
-## user_session_obj = dict(
-## email=l.email,
-## first_name=l.name_f,
-## last_name=l.name_l,
-## uid=l.badge_no
-## )
-## #fixme: need to add this user session obj to session
-## user,created = User.objects.get_or_create(username=username)
-## return user
-
-## except LDAPAUTHError,e:
-## return None
-
-## def get_user(self,user_id):
-## try:
-## return User.objects.get(pk=user_id)
-## except User.DoesNotExist, e:
-## return None
-
-
-class NISBackend(object):
-
- def authenticate(self, username=None, password=None):
- try:
- n = nisauth(username, password)
- temp_pass = User.objects.make_random_password(100)
- nis_user = dict(username=username,
- )
-
- user_session_obj = dict(
- email = username + "@mcs.anl.gov",
- first_name = None,
- last_name = None,
- uid = n.uid
- )
- user, created = User.objects.get_or_create(username=username)
-
- return user
-
- except NISAUTHError:
- e = sys.exc_info()[1]
- return None
-
-
- def get_user(self, user_id):
- try:
- return User.objects.get(pk=user_id)
- except User.DoesNotExist:
- e = sys.exc_info()[1]
- return None
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/__init__.py b/src/lib/Bcfg2/Server/Hostbase/hostbase/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/__init__.py
+++ /dev/null
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/admin.py b/src/lib/Bcfg2/Server/Hostbase/hostbase/admin.py
deleted file mode 100644
index 70a2233cc..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/admin.py
+++ /dev/null
@@ -1,15 +0,0 @@
-from django.contrib import admin
-
-from models import Host, Interface, IP, MX, Name, CName, Nameserver, ZoneAddress, Zone, Log, ZoneLog
-
-admin.site.register(Host)
-admin.site.register(Interface)
-admin.site.register(IP)
-admin.site.register(MX)
-admin.site.register(Name)
-admin.site.register(CName)
-admin.site.register(Nameserver)
-admin.site.register(ZoneAddress)
-admin.site.register(Zone)
-admin.site.register(Log)
-admin.site.register(ZoneLog)
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/models.py b/src/lib/Bcfg2/Server/Hostbase/hostbase/models.py
deleted file mode 100644
index 3f08a09a0..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/models.py
+++ /dev/null
@@ -1,210 +0,0 @@
-from django.db import models
-
-# Create your models here.
-class Host(models.Model):
- NETGROUP_CHOICES = (
- ('none', 'none'),('cave', 'cave'),('ccst', 'ccst'),('mcs', 'mcs'),
- ('mmlab', 'mmlab'),('sp', 'sp'),('red', 'red'),('virtual', 'virtual'),
- ('win', 'win'),('xterm', 'xterm'),('lcrc', 'lcrc'),('anlext', 'anlext'),
- ('teragrid', 'teragrid')
- )
- STATUS_CHOICES = (
- ('active','active'),('dormant','dormant')
- )
- SUPPORT_CHOICES = (
- ('green','green'),('yellow','yellow'),('red','red')
- )
- CLASS_CHOICES = (
- ('scientific','scientific'),
- ('operations','operations'),('guest','guest'),
- ('confidential','confidential'),('public','public')
- )
- WHATAMI_CHOICES = (
- ('aix-3', 'aix-3'), ('aix-4', 'aix-4'),
- ('aix-5', 'aix-5'), ('baytech', 'baytech'),
- ('decserver', 'decserver'), ('dialup', 'dialup'),
- ('dos', 'dos'), ('freebsd', 'freebsd'),
- ('hpux', 'hpux'), ('irix-5', 'irix-5'),
- ('irix-6', 'irix-6'), ('linux', 'linux'),
- ('linux-2', 'linux-2'), ('linux-rh73', 'linux-rh73'),
- ('linux-rh8', 'linux-rh8'), ('linux-sles8', 'linux-sles8'),
- ('linux-sles8-64', 'linux-sles8-64'), ('linux-sles8-ia32', 'linux-sles8-ia32'),
- ('linux-sles8-ia64', 'linux-sles8-ia64'), ('mac', 'mac'),
- ('network', 'network'), ('next', 'next'),
- ('none', 'none'), ('osf', 'osf'), ('printer', 'printer'),
- ('robot', 'robot'), ('solaris-2', 'solaris-2'),
- ('sun4', 'sun4'), ('unknown', 'unknown'), ('virtual', 'virtual'),
- ('win31', 'win31'), ('win95', 'win95'),
- ('winNTs', 'winNTs'), ('winNTw', 'winNTw'),
- ('win2k', 'win2k'), ('winXP', 'winXP'), ('xterm', 'xterm')
- )
- hostname = models.CharField(max_length=64)
- whatami = models.CharField(max_length=16)
- netgroup = models.CharField(max_length=32, choices=NETGROUP_CHOICES)
- security_class = models.CharField('class', max_length=16)
- support = models.CharField(max_length=8, choices=SUPPORT_CHOICES)
- csi = models.CharField(max_length=32, blank=True)
- printq = models.CharField(max_length=32, blank=True)
- outbound_smtp = models.BooleanField()
- primary_user = models.EmailField()
- administrator = models.EmailField(blank=True)
- location = models.CharField(max_length=16)
- comments = models.TextField(blank=True)
- expiration_date = models.DateField(null=True, blank=True)
- last = models.DateField(auto_now=True, auto_now_add=True)
- status = models.CharField(max_length=7, choices=STATUS_CHOICES)
- dirty = models.BooleanField()
-
- class Admin:
- list_display = ('hostname', 'last')
- search_fields = ['hostname']
-
- def __str__(self):
- return self.hostname
-
- def get_logs(self):
- """
- Get host's log.
- """
- return Log.objects.filter(hostname=self.hostname)
-
-class Interface(models.Model):
- TYPE_CHOICES = (
- ('eth', 'ethernet'), ('wl', 'wireless'), ('virtual', 'virtual'), ('myr', 'myr'),
- ('mgmt', 'mgmt'), ('tape', 'tape'), ('fe', 'fe'), ('ge', 'ge'),
- )
- # FIXME: The new admin interface has change a lot.
- #host = models.ForeignKey(Host, edit_inline=models.TABULAR, num_in_admin=2)
- host = models.ForeignKey(Host)
- # FIXME: The new admin interface has change a lot.
- #mac_addr = models.CharField(max_length=32, core=True)
- mac_addr = models.CharField(max_length=32)
- hdwr_type = models.CharField('type', max_length=16, choices=TYPE_CHOICES, blank=True)
- # FIXME: The new admin interface has change a lot.
- # radio_admin=True, blank=True)
- dhcp = models.BooleanField()
-
- def __str__(self):
- return self.mac_addr
-
- class Admin:
- list_display = ('mac_addr', 'host')
- search_fields = ['mac_addr']
-
-class IP(models.Model):
- interface = models.ForeignKey(Interface)
- # FIXME: The new admin interface has change a lot.
- # edit_inline=models.TABULAR, num_in_admin=1)
- #ip_addr = models.IPAddressField(core=True)
- ip_addr = models.IPAddressField()
-
- def __str__(self):
- return self.ip_addr
-
- class Admin:
- pass
-
- class Meta:
- ordering = ('ip_addr', )
-
-class MX(models.Model):
- priority = models.IntegerField(blank=True)
- # FIXME: The new admin interface has change a lot.
- #mx = models.CharField(max_length=64, blank=True, core=True)
- mx = models.CharField(max_length=64, blank=True)
-
- def __str__(self):
- return (" ".join([str(self.priority), self.mx]))
-
- class Admin:
- pass
-
-class Name(models.Model):
- DNS_CHOICES = (
- ('global','global'),('internal','ANL internal'),
- ('private','private')
- )
- # FIXME: The new admin interface has change a lot.
- #ip = models.ForeignKey(IP, edit_inline=models.TABULAR, num_in_admin=1)
- ip = models.ForeignKey(IP)
- # FIXME: The new admin interface has change a lot.
- #name = models.CharField(max_length=64, core=True)
- name = models.CharField(max_length=64)
- dns_view = models.CharField(max_length=16, choices=DNS_CHOICES)
- only = models.BooleanField(blank=True)
- mxs = models.ManyToManyField(MX)
-
- def __str__(self):
- return self.name
-
- class Admin:
- pass
-
-class CName(models.Model):
- # FIXME: The new admin interface has change a lot.
- #name = models.ForeignKey(Name, edit_inline=models.TABULAR, num_in_admin=1)
- name = models.ForeignKey(Name)
- # FIXME: The new admin interface has change a lot.
- #cname = models.CharField(max_length=64, core=True)
- cname = models.CharField(max_length=64)
-
- def __str__(self):
- return self.cname
-
- class Admin:
- pass
-
-class Nameserver(models.Model):
- name = models.CharField(max_length=64, blank=True)
-
- def __str__(self):
- return self.name
-
- class Admin:
- pass
-
-class ZoneAddress(models.Model):
- ip_addr = models.IPAddressField(blank=True)
-
- def __str__(self):
- return self.ip_addr
-
- class Admin:
- pass
-
-class Zone(models.Model):
- zone = models.CharField(max_length=64)
- serial = models.IntegerField()
- admin = models.CharField(max_length=64)
- primary_master = models.CharField(max_length=64)
- expire = models.IntegerField()
- retry = models.IntegerField()
- refresh = models.IntegerField()
- ttl = models.IntegerField()
- nameservers = models.ManyToManyField(Nameserver, blank=True)
- mxs = models.ManyToManyField(MX, blank=True)
- addresses = models.ManyToManyField(ZoneAddress, blank=True)
- aux = models.TextField(blank=True)
-
- def __str__(self):
- return self.zone
-
- class Admin:
- pass
-
-class Log(models.Model):
- # FIXME: Proposal hostname = models.ForeignKey(Host)
- hostname = models.CharField(max_length=64)
- date = models.DateTimeField(auto_now=True, auto_now_add=True)
- log = models.TextField()
-
- def __str__(self):
- return self.hostname
-
-class ZoneLog(models.Model):
- zone = models.CharField(max_length=64)
- date = models.DateTimeField(auto_now=True, auto_now_add=True)
- log = models.TextField()
-
- def __str__(self):
- return self.zone
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/sql/zone.sql b/src/lib/Bcfg2/Server/Hostbase/hostbase/sql/zone.sql
deleted file mode 100644
index b78187ab2..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/sql/zone.sql
+++ /dev/null
@@ -1,2 +0,0 @@
-INSERT INTO hostbase_zone (zone, serial, admin, primary_master, expire, retry, refresh, ttl, aux)
-VALUES ('.rev', 0, '', '', 1209600, 1800, 7200, 7200, ''); \ No newline at end of file
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/urls.py b/src/lib/Bcfg2/Server/Hostbase/hostbase/urls.py
deleted file mode 100644
index 0ee204abe..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/urls.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# -*- coding: utf-8 -*-
-from django.conf.urls.defaults import *
-from django.contrib.auth.decorators import login_required
-from django.core.urlresolvers import reverse
-from django.views.generic.create_update import create_object, update_object, delete_object
-from django.views.generic.list_detail import object_detail, object_list
-
-from models import Host, Zone, Log
-
-host_detail_dict = {
- 'queryset':Host.objects.all(),
- 'template_name':'host.html',
- 'template_object_name':'host',
-}
-
-host_delete_dict = {
- 'model':Host,
- 'post_delete_redirect':'/',
-}
-
-host_log_detail_dict = host_detail_dict.copy()
-host_log_detail_dict['template_name'] = 'logviewer.html'
-
-host_dns_detail_dict = host_detail_dict.copy()
-host_dns_detail_dict['template_name'] = 'dns.html'
-
-zone_new_dict = {
- 'model':Zone,
- 'template_name':'zonenew.html',
- 'post_save_redirect':'../%(id)s',
-}
-
-zones_list_dict = {
- 'queryset':Zone.objects.all(),
- 'template_name':'zones.html',
- 'template_object_name':'zone',
-}
-
-zone_detail_dict = {
- 'queryset':Zone.objects.all(),
- 'template_name':'zoneview.html',
- 'template_object_name':'zone',
-}
-
-urlpatterns = patterns('',
- (r'^(?P<object_id>\d+)/$', object_detail, host_detail_dict, 'host_detail'),
- (r'^zones/new/$', login_required(create_object), zone_new_dict, 'zone_new'),
- (r'^zones/(?P<object_id>\d+)/edit', login_required(update_object), zone_new_dict, 'zone_edit'),
- (r'^zones/$', object_list, zones_list_dict, 'zone_list'),
- (r'^zones/(?P<object_id>\d+)/$', object_detail, zone_detail_dict, 'zone_detail'),
- (r'^zones/(?P<object_id>\d+)/$', object_detail, zone_detail_dict, 'zone_detail'),
- (r'^\d+/logs/(?P<object_id>\d+)/', object_detail, { 'queryset':Log.objects.all() }, 'log_detail'),
- (r'^(?P<object_id>\d+)/logs/', object_detail, host_log_detail_dict, 'host_log_list'),
- (r'^(?P<object_id>\d+)/dns', object_detail, host_dns_detail_dict, 'host_dns_list'),
- (r'^(?P<object_id>\d+)/remove', login_required(delete_object), host_delete_dict, 'host_delete'),
-)
-
-urlpatterns += patterns('Bcfg2.Server.Hostbase.hostbase.views',
- (r'^$', 'search'),
- (r'^(?P<host_id>\d+)/edit', 'edit'),
- (r'^(?P<host_id>\d+)/(?P<item>\D+)/(?P<item_id>\d+)/confirm', 'confirm'),
- (r'^(?P<host_id>\d+)/(?P<item>\D+)/(?P<item_id>\d+)/(?P<name_id>\d+)/confirm', 'confirm'),
- (r'^(?P<host_id>\d+)/dns/edit', 'dnsedit'),
- (r'^new', 'new'),
- (r'^(?P<host_id>\d+)/copy', 'copy'),
-# (r'^hostinfo', 'hostinfo'),
- (r'^zones/(?P<zone_id>\d+)/(?P<item>\D+)/(?P<item_id>\d+)/confirm', 'confirm'),
-)
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/views.py b/src/lib/Bcfg2/Server/Hostbase/hostbase/views.py
deleted file mode 100644
index 57ef5eff8..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/views.py
+++ /dev/null
@@ -1,970 +0,0 @@
-"""Views.py
-Contains all the views associated with the hostbase app
-Also has does form validation
-"""
-from django.http import HttpResponse, HttpResponseRedirect
-
-from django.contrib.auth.decorators import login_required
-from django.contrib.auth import logout
-from django.template import RequestContext
-from Bcfg2.Server.Hostbase.hostbase.models import *
-from datetime import date
-from django.db import connection
-from django.shortcuts import render_to_response
-from django import forms
-from Bcfg2.Server.Hostbase import settings, regex
-import re, copy
-
-attribs = ['hostname', 'whatami', 'netgroup', 'security_class', 'support',
- 'csi', 'printq', 'primary_user', 'administrator', 'location',
- 'status', 'comments']
-
-zoneattribs = ['zone', 'admin', 'primary_master', 'expire', 'retry',
- 'refresh', 'ttl', 'aux']
-
-dispatch = {'mac_addr':'i.mac_addr LIKE \'%%%%%s%%%%\'',
- 'ip_addr':'p.ip_addr LIKE \'%%%%%s%%%%\'',
- 'name':'n.name LIKE \'%%%%%s%%%%\'',
-## 'hostname':'n.name LIKE \'%%%%%s%%%%\'',
-## 'cname':'n.name LIKE \'%%%%%s%%%%\'',
- 'mx':'m.mx LIKE \'%%%%%s%%%%\'',
- 'dns_view':'n.dns_view = \'%s\'',
- 'hdwr_type':'i.hdwr_type = \'%s\'',
- 'dhcp':'i.dhcp = \'%s\''}
-
-def search(request):
- """Search for hosts in the database
- If more than one field is entered, logical AND is used
- """
- if 'sub' in request.GET:
- querystring = """SELECT DISTINCT h.hostname, h.id, h.status
- FROM (((((hostbase_host h
- INNER JOIN hostbase_interface i ON h.id = i.host_id)
- INNER JOIN hostbase_ip p ON i.id = p.interface_id)
- INNER JOIN hostbase_name n ON p.id = n.ip_id)
- INNER JOIN hostbase_name_mxs x ON n.id = x.name_id)
- INNER JOIN hostbase_mx m ON m.id = x.mx_id)
- LEFT JOIN hostbase_cname c ON n.id = c.name_id
- WHERE """
-
- _and = False
- for field in request.POST:
- if request.POST[field] and field == 'hostname':
- if _and:
- querystring += ' AND '
- querystring += 'n.name LIKE \'%%%%%s%%%%\' or c.cname LIKE \'%%%%%s%%%%\'' % (request.POST[field], request.POST[field])
- _and = True
- elif request.POST[field] and field in dispatch:
- if _and:
- querystring += ' AND '
- querystring += dispatch[field] % request.POST[field]
- _and = True
- elif request.POST[field]:
- if _and:
- querystring += ' AND '
- querystring += "h.%s LIKE \'%%%%%s%%%%\'" % (field, request.POST[field])
- _and = True
-
- if not _and:
- cursor = connection.cursor()
- cursor.execute("""SELECT hostname, id, status
- FROM hostbase_host ORDER BY hostname""")
- results = cursor.fetchall()
- else:
- querystring += " ORDER BY h.hostname"
- cursor = connection.cursor()
- cursor.execute(querystring)
- results = cursor.fetchall()
-
- return render_to_response('results.html',
- {'hosts': results,
- 'logged_in': request.session.get('_auth_user_id', False)},
- context_instance = RequestContext(request))
- else:
- return render_to_response('search.html',
- {'TYPE_CHOICES': Interface.TYPE_CHOICES,
- 'DNS_CHOICES': Name.DNS_CHOICES,
- 'yesno': [(1, 'yes'), (0, 'no')],
- 'logged_in': request.session.get('_auth_user_id', False)},
- context_instance = RequestContext(request))
-
-
-def gethostdata(host_id, dnsdata=False):
- """Grabs the necessary data about a host
- Replaces a lot of repeated code"""
- hostdata = {}
- hostdata['ips'] = {}
- hostdata['names'] = {}
- hostdata['cnames'] = {}
- hostdata['mxs'] = {}
- hostdata['host'] = Host.objects.get(id=host_id)
- hostdata['interfaces'] = hostdata['host'].interface_set.all()
- for interface in hostdata['interfaces']:
- hostdata['ips'][interface.id] = interface.ip_set.all()
- if dnsdata:
- for ip in hostdata['ips'][interface.id]:
- hostdata['names'][ip.id] = ip.name_set.all()
- for name in hostdata['names'][ip.id]:
- hostdata['cnames'][name.id] = name.cname_set.all()
- hostdata['mxs'][name.id] = name.mxs.all()
- return hostdata
-
-def fill(template, hostdata, dnsdata=False):
- """Fills a generic template
- Replaces a lot of repeated code"""
- if dnsdata:
- template.names = hostdata['names']
- template.cnames = hostdata['cnames']
- template.mxs = hostdata['mxs']
- template.host = hostdata['host']
- template.interfaces = hostdata['interfaces']
- template.ips = hostdata['ips']
- return template
-
-def edit(request, host_id):
- """edit general host information"""
- manipulator = Host.ChangeManipulator(host_id)
- changename = False
- if request.method == 'POST':
- host = Host.objects.get(id=host_id)
- before = host.__dict__.copy()
- if request.POST['hostname'] != host.hostname:
- oldhostname = host.hostname.split(".")[0]
- changename = True
- interfaces = host.interface_set.all()
- old_interfaces = [interface.__dict__.copy() for interface in interfaces]
-
- new_data = request.POST.copy()
-
- errors = manipulator.get_validation_errors(new_data)
- if not errors:
-
- # somehow keep track of multiple interface change manipulators
- # as well as multiple ip chnage manipulators??? (add manipulators???)
- # change to many-to-many??????
-
- # dynamically look up mx records?
- text = ''
-
- for attrib in attribs:
- if host.__dict__[attrib] != request.POST[attrib]:
- text = do_log(text, attrib, host.__dict__[attrib], request.POST[attrib])
- host.__dict__[attrib] = request.POST[attrib]
-
- if 'expiration_date' in request.POST:
- ymd = request.POST['expiration_date'].split("-")
- if date(int(ymd[0]), int(ymd[1]), int(ymd[2])) != host.__dict__['expiration_date']:
- text = do_log(text, 'expiration_date', host.__dict__['expiration_date'],
- request.POST['expiration_date'])
- host.__dict__['expiration_date'] = date(int(ymd[0]), int(ymd[1]), int(ymd[2]))
-
- for inter in interfaces:
- changetype = False
- ips = IP.objects.filter(interface=inter.id)
- if inter.mac_addr != request.POST['mac_addr%d' % inter.id]:
- text = do_log(text, 'mac_addr', inter.mac_addr, request.POST['mac_addr%d' % inter.id])
- inter.mac_addr = request.POST['mac_addr%d' % inter.id].lower().replace('-',':')
- if inter.hdwr_type != request.POST['hdwr_type%d' % inter.id]:
- oldtype = inter.hdwr_type
- text = do_log(text, 'hdwr_type', oldtype, request.POST['hdwr_type%d' % inter.id])
- inter.hdwr_type = request.POST['hdwr_type%d' % inter.id]
- changetype = True
- if (('dhcp%d' % inter.id) in request.POST and not inter.dhcp or
- not ('dhcp%d' % inter.id) in request.POST and inter.dhcp):
- text = do_log(text, 'dhcp', inter.dhcp, int(not inter.dhcp))
- inter.dhcp = not inter.dhcp
- for ip in ips:
- names = ip.name_set.all()
- if not ip.ip_addr == request.POST['ip_addr%d' % ip.id]:
- oldip = ip.ip_addr
- oldsubnet = oldip.split(".")[2]
- ip.ip_addr = request.POST['ip_addr%d' % ip.id]
- ip.save()
- text = do_log(text, 'ip_addr', oldip, ip.ip_addr)
- for name in names:
- if name.name.split(".")[0].endswith('-%s' % oldsubnet):
- name.name = name.name.replace('-%s' % oldsubnet, '-%s' % ip.ip_addr.split(".")[2])
- name.save()
- if changetype:
- for name in names:
- if name.name.split(".")[0].endswith('-%s' % oldtype):
- name.name = name.name.replace('-%s' % oldtype, '-%s' % inter.hdwr_type)
- name.save()
- if changename:
- for name in names:
- if name.name.startswith(oldhostname):
- name.name = name.name.replace(oldhostname, host.hostname.split(".")[0])
- name.save()
- if request.POST['%dip_addr' % inter.id]:
- mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX)
- if created:
- mx.save()
- new_ip = IP(interface=inter, ip_addr=request.POST['%dip_addr' % inter.id])
- new_ip.save()
- text = do_log(text, '*new*', 'ip_addr', new_ip.ip_addr)
- new_name = "-".join([host.hostname.split(".")[0],
- new_ip.ip_addr.split(".")[2]])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- new_name = "-".join([host.hostname.split(".")[0],
- inter.hdwr_type])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- name = Name(ip=new_ip, name=host.hostname,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- inter.save()
- if request.POST['mac_addr_new']:
- new_inter = Interface(host=host,
- mac_addr=request.POST['mac_addr_new'].lower().replace('-',':'),
- hdwr_type=request.POST['hdwr_type_new'],
- dhcp=request.POST['dhcp_new'])
- text = do_log(text, '*new*', 'mac_addr', new_inter.mac_addr)
- new_inter.save()
- if request.POST['mac_addr_new'] and request.POST['ip_addr_new']:
- mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX)
- if created:
- mx.save()
- new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new'])
- new_ip.save()
- text = do_log(text, '*new*', 'ip_addr', new_ip.ip_addr)
- new_name = "-".join([host.hostname.split(".")[0],
- new_ip.ip_addr.split(".")[2]])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- new_name = "-".join([host.hostname.split(".")[0],
- new_inter.hdwr_type])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- name = Name(ip=new_ip, name=host.hostname,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- if request.POST['ip_addr_new'] and not request.POST['mac_addr_new']:
- mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX)
- if created:
- mx.save()
- new_inter = Interface(host=host, mac_addr="",
- hdwr_type=request.POST['hdwr_type_new'],
- dhcp=False)
- new_inter.save()
- new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new'])
- new_ip.save()
- text = do_log(text, '*new*', 'ip_addr', new_ip.ip_addr)
- new_name = "-".join([host.hostname.split(".")[0],
- new_ip.ip_addr.split(".")[2]])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- new_name = "-".join([host.hostname.split(".")[0],
- new_inter.hdwr_type])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- name = Name(ip=new_ip, name=host.hostname,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- if text:
- log = Log(hostname=host.hostname, log=text)
- log.save()
- host.save()
- return HttpResponseRedirect('/hostbase/%s/' % host.id)
- else:
- return render_to_response('errors.html',
- {'failures': errors,
- 'logged_in': request.session.get('_auth_user_id', False)},
- context_instance = RequestContext(request))
- else:
- host = Host.objects.get(id=host_id)
- interfaces = []
- for interface in host.interface_set.all():
- interfaces.append([interface, interface.ip_set.all()])
- return render_to_response('edit.html',
- {'host': host,
- 'interfaces': interfaces,
- 'TYPE_CHOICES': Interface.TYPE_CHOICES,
- 'logged_in': request.session.get('_auth_user_id', False)},
- context_instance = RequestContext(request))
-
-def confirm(request, item, item_id, host_id=None, name_id=None, zone_id=None):
- """Asks if the user is sure he/she wants to remove an item"""
- if 'sub' in request.GET:
- if item == 'interface':
- for ip in Interface.objects.get(id=item_id).ip_set.all():
- for name in ip.name_set.all():
- name.cname_set.all().delete()
- ip.name_set.all().delete()
- Interface.objects.get(id=item_id).ip_set.all().delete()
- Interface.objects.get(id=item_id).delete()
- elif item=='ip':
- for name in IP.objects.get(id=item_id).name_set.all():
- name.cname_set.all().delete()
- IP.objects.get(id=item_id).name_set.all().delete()
- IP.objects.get(id=item_id).delete()
- elif item=='cname':
- CName.objects.get(id=item_id).delete()
- elif item=='mx':
- mx = MX.objects.get(id=item_id)
- Name.objects.get(id=name_id).mxs.remove(mx)
- elif item=='name':
- Name.objects.get(id=item_id).cname_set.all().delete()
- Name.objects.get(id=item_id).delete()
- elif item=='nameserver':
- nameserver = Nameserver.objects.get(id=item_id)
- Zone.objects.get(id=zone_id).nameservers.remove(nameserver)
- elif item=='zonemx':
- mx = MX.objects.get(id=item_id)
- Zone.objects.get(id=zone_id).mxs.remove(mx)
- elif item=='address':
- address = ZoneAddress.objects.get(id=item_id)
- Zone.objects.get(id=zone_id).addresses.remove(address)
- if item == 'cname' or item == 'mx' or item == 'name':
- return HttpResponseRedirect('/hostbase/%s/dns/edit' % host_id)
- elif item == 'nameserver' or item == 'zonemx' or item == 'address':
- return HttpResponseRedirect('/hostbase/zones/%s/edit' % zone_id)
- else:
- return HttpResponseRedirect('/hostbase/%s/edit' % host_id)
- else:
- interface = None
- ips = []
- names = []
- cnames = []
- mxs = []
- zonemx = None
- nameserver = None
- address = None
- if item == 'interface':
- interface = Interface.objects.get(id=item_id)
- ips = interface.ip_set.all()
- for ip in ips:
- for name in ip.name_set.all():
- names.append((ip.id, name))
- for cname in name.cname_set.all():
- cnames.append((name.id, cname))
- for mx in name.mxs.all():
- mxs.append((name.id, mx))
- elif item=='ip':
- ips = [IP.objects.get(id=item_id)]
- for name in ips[0].name_set.all():
- names.append((ips[0].id, name))
- for cname in name.cname_set.all():
- cnames.append((name.id, cname))
- for mx in name.mxs.all():
- mxs.append((name.id, mx))
- elif item=='name':
- names = [Name.objects.get(id=item_id)]
- for cname in names[0].cname_set.all():
- cnames.append((names[0].id, cname))
- for mx in names[0].mxs.all():
- mxs.append((names[0].id, mx))
- elif item=='cname':
- cnames = [CName.objects.get(id=item_id)]
- elif item=='mx':
- mxs = [MX.objects.get(id=item_id)]
- elif item=='zonemx':
- zonemx = MX.objects.get(id=item_id)
- elif item=='nameserver':
- nameserver = Nameserver.objects.get(id=item_id)
- elif item=='address':
- address = ZoneAddress.objects.get(id=item_id)
- return render_to_response('confirm.html',
- {'interface': interface,
- 'ips': ips,
- 'names': names,
- 'cnames': cnames,
- 'id': item_id,
- 'type': item,
- 'host_id': host_id,
- 'mxs': mxs,
- 'zonemx': zonemx,
- 'nameserver': nameserver,
- 'address': address,
- 'zone_id': zone_id,
- 'logged_in': request.session.get('_auth_user_id', False)},
- context_instance = RequestContext(request))
-
-def dnsedit(request, host_id):
- """Edits specific DNS information
- Data is validated before committed to the database"""
- text = ''
- if 'sub' in request.GET:
- hostdata = gethostdata(host_id, True)
- for ip in hostdata['names']:
- ipaddr = IP.objects.get(id=ip)
- ipaddrstr = ipaddr.__str__()
- for name in hostdata['cnames']:
- for cname in hostdata['cnames'][name]:
- if regex.host.match(request.POST['cname%d' % cname.id]):
- text = do_log(text, 'cname', cname.cname, request.POST['cname%d' % cname.id])
- cname.cname = request.POST['cname%d' % cname.id]
- cname.save()
- for name in hostdata['mxs']:
- for mx in hostdata['mxs'][name]:
- if (mx.priority != request.POST['priority%d' % mx.id] and mx.mx != request.POST['mx%d' % mx.id]):
- text = do_log(text, 'mx', ' '.join([str(mx.priority), str(mx.mx)]),
- ' '.join([request.POST['priority%d' % mx.id], request.POST['mx%d' % mx.id]]))
- nameobject = Name.objects.get(id=name)
- nameobject.mxs.remove(mx)
- newmx, created = MX.objects.get_or_create(priority=request.POST['priority%d' % mx.id], mx=request.POST['mx%d' % mx.id])
- if created:
- newmx.save()
- nameobject.mxs.add(newmx)
- nameobject.save()
- for name in hostdata['names'][ip]:
- name.name = request.POST['name%d' % name.id]
- name.dns_view = request.POST['dns_view%d' % name.id]
- if (request.POST['%dcname' % name.id] and
- regex.host.match(request.POST['%dcname' % name.id])):
- cname = CName(name=name,
- cname=request.POST['%dcname' % name.id])
- text = do_log(text, '*new*', 'cname', cname.cname)
- cname.save()
- if (request.POST['%dpriority' % name.id] and
- request.POST['%dmx' % name.id]):
- mx, created = MX.objects.get_or_create(priority=request.POST['%dpriority' % name.id],
- mx=request.POST['%dmx' % name.id])
- if created:
- mx.save()
- text = do_log(text, '*new*', 'mx',
- ' '.join([request.POST['%dpriority' % name.id],
- request.POST['%dmx' % name.id]]))
- name.mxs.add(mx)
- name.save()
- if request.POST['%sname' % ipaddrstr]:
- name = Name(ip=ipaddr,
- dns_view=request.POST['%sdns_view' % ipaddrstr],
- name=request.POST['%sname' % ipaddrstr], only=False)
- text = do_log(text, '*new*', 'name', name.name)
- name.save()
- if (request.POST['%scname' % ipaddrstr] and
- regex.host.match(request.POST['%scname' % ipaddrstr])):
- cname = CName(name=name,
- cname=request.POST['%scname' % ipaddrstr])
- text = do_log(text, '*new*', 'cname', cname.cname)
- cname.save()
- if (request.POST['%smx' % ipaddrstr] and
- request.POST['%spriority' % ipaddrstr]):
- mx, created = MX.objects.get_or_create(priority=request.POST['%spriority' % ipaddrstr],
- mx=request.POST['%smx' % ipaddrstr])
- if created:
- mx.save()
- text = do_log(text, '*new*', 'mx',
- ' '.join([request.POST['%spriority' % ipaddrstr], request.POST['%smx' % ipaddrstr]]))
- name.mxs.add(mx)
- if text:
- log = Log(hostname=hostdata['host'].hostname, log=text)
- log.save()
- return HttpResponseRedirect('/hostbase/%s/dns' % host_id)
- else:
- host = Host.objects.get(id=host_id)
- ips = []
- info = []
- cnames = []
- mxs = []
- interfaces = host.interface_set.all()
- for interface in host.interface_set.all():
- ips.extend(interface.ip_set.all())
- for ip in ips:
- info.append([ip, ip.name_set.all()])
- for name in ip.name_set.all():
- cnames.extend(name.cname_set.all())
- mxs.append((name.id, name.mxs.all()))
- return render_to_response('dnsedit.html',
- {'host': host,
- 'info': info,
- 'cnames': cnames,
- 'mxs': mxs,
- 'request': request,
- 'interfaces': interfaces,
- 'DNS_CHOICES': Name.DNS_CHOICES,
- 'logged_in': request.session.get('_auth_user_id', False)},
- context_instance = RequestContext(request))
-
-def new(request):
- """Function for creating a new host in hostbase
- Data is validated before committed to the database"""
- if 'sub' in request.GET:
- try:
- Host.objects.get(hostname=request.POST['hostname'].lower())
- return render_to_response('errors.html',
- {'failures': ['%s already exists in hostbase' % request.POST['hostname']],
- 'logged_in': request.session.get('_auth_user_id', False)},
- context_instance = RequestContext(request))
- except:
- pass
- if not validate(request, True):
- if not request.POST['ip_addr_new'] and not request.POST['ip_addr_new2']:
- return render_to_response('errors.html',
- {'failures': ['ip_addr: You must enter an ip address'],
- 'logged_in': request.session.get('_auth_user_id', False)},
- context_instance = RequestContext(request))
- host = Host()
- # this is the stuff that validate() should take care of
- # examine the check boxes for any changes
- host.outbound_smtp = 'outbound_smtp' in request.POST
- for attrib in attribs:
- if attrib in request.POST:
- host.__dict__[attrib] = request.POST[attrib].lower()
- if 'comments' in request.POST:
- host.comments = request.POST['comments']
- if 'expiration_date' in request.POST:
-# ymd = request.POST['expiration_date'].split("-")
-# host.__dict__['expiration_date'] = date(int(ymd[0]), int(ymd[1]), int(ymd[2]))
- host.__dict__['expiration_date'] = date(2000, 1, 1)
- host.status = 'active'
- host.save()
- else:
- return render_to_response('errors.html',
- {'failures': validate(request, True),
- 'logged_in': request.session.get('_auth_user_id', False)},
- context_instance = RequestContext(request))
-
- if request.POST['mac_addr_new']:
- new_inter = Interface(host=host,
- mac_addr = request.POST['mac_addr_new'].lower().replace('-',':'),
- hdwr_type = request.POST['hdwr_type_new'],
- dhcp = 'dhcp_new' in request.POST)
- new_inter.save()
- if request.POST['mac_addr_new'] and request.POST['ip_addr_new']:
- new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new'])
-# Change all this things. Use a "post_save" signal handler for model Host to create all sociate models
-# and use a generi view.
- new_ip.save()
- mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX)
- if created:
- mx.save()
- new_name = "-".join([host.hostname.split(".")[0],
- new_ip.ip_addr.split(".")[2]])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name, dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- new_name = "-".join([host.hostname.split(".")[0],
- new_inter.hdwr_type])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- name = Name(ip=new_ip, name=host.hostname,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- if request.POST['ip_addr_new'] and not request.POST['mac_addr_new']:
- new_inter = Interface(host=host,
- mac_addr="",
- hdwr_type=request.POST['hdwr_type_new'],
- dhcp=False)
- new_inter.save()
- new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new'])
- new_ip.save()
- mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX)
- if created:
- mx.save()
- new_name = "-".join([host.hostname.split(".")[0],
- new_ip.ip_addr.split(".")[2]])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- new_name = "-".join([host.hostname.split(".")[0],
- new_inter.hdwr_type])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- name = Name(ip=new_ip, name=host.hostname,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- if request.POST['mac_addr_new2']:
- new_inter = Interface(host=host,
- mac_addr = request.POST['mac_addr_new2'].lower().replace('-',':'),
- hdwr_type = request.POST['hdwr_type_new2'],
- dhcp = 'dhcp_new2' in request.POST)
- new_inter.save()
- if request.POST['mac_addr_new2'] and request.POST['ip_addr_new2']:
- new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new2'])
- new_ip.save()
- mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX)
- if created:
- mx.save()
- new_name = "-".join([host.hostname.split(".")[0],
- new_ip.ip_addr.split(".")[2]])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- new_name = "-".join([host.hostname.split(".")[0],
- new_inter.hdwr_type])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- name = Name(ip=new_ip, name=host.hostname,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- if request.POST['ip_addr_new2'] and not request.POST['mac_addr_new2']:
- new_inter = Interface(host=host,
- mac_addr="",
- hdwr_type=request.POST['hdwr_type_new2'],
- dhcp=False)
- new_inter.save()
- new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new2'])
- new_ip.save()
- mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX)
- if created:
- mx.save()
- new_name = "-".join([host.hostname.split(".")[0],
- new_ip.ip_addr.split(".")[2]])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- new_name = "-".join([host.hostname.split(".")[0],
- new_inter.hdwr_type])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- name = Name(ip=new_ip, name=host.hostname,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- host.save()
- return HttpResponseRedirect('/hostbase/%s/' % host.id)
- else:
- return render_to_response('new.html',
- {'TYPE_CHOICES': Interface.TYPE_CHOICES,
- 'NETGROUP_CHOICES': Host.NETGROUP_CHOICES,
- 'CLASS_CHOICES': Host.CLASS_CHOICES,
- 'SUPPORT_CHOICES': Host.SUPPORT_CHOICES,
- 'WHATAMI_CHOICES': Host.WHATAMI_CHOICES,
- 'logged_in': request.session.get('_auth_user_id', False)},
- context_instance = RequestContext(request))
-
-def copy(request, host_id):
- """Function for creating a new host in hostbase
- Data is validated before committed to the database"""
- if 'sub' in request.GET:
- try:
- Host.objects.get(hostname=request.POST['hostname'].lower())
- return render_to_response('errors.html',
- {'failures': ['%s already exists in hostbase' % request.POST['hostname']],
- 'logged_in': request.session.get('_auth_user_id', False)},
- context_instance = RequestContext(request))
- except:
- pass
- if not validate(request, True):
- if not request.POST['ip_addr_new'] and not request.POST['ip_addr_new2']:
- return render_to_response('errors.html',
- {'failures': ['ip_addr: You must enter an ip address'],
- 'logged_in': request.session.get('_auth_user_id', False)},
- context_instance = RequestContext(request))
- host = Host()
- # this is the stuff that validate() should take care of
- # examine the check boxes for any changes
- host.outbound_smtp = 'outbound_smtp' in request.POST
- for attrib in attribs:
- if attrib in request.POST:
- host.__dict__[attrib] = request.POST[attrib].lower()
- if 'comments' in request.POST:
- host.comments = request.POST['comments']
- if 'expiration_date' in request.POST:
-# ymd = request.POST['expiration_date'].split("-")
-# host.__dict__['expiration_date'] = date(int(ymd[0]), int(ymd[1]), int(ymd[2]))
- host.__dict__['expiration_date'] = date(2000, 1, 1)
- host.status = 'active'
- host.save()
- else:
- return render_to_response('errors.html',
- {'failures': validate(request, True),
- 'logged_in': request.session.get('_auth_user_id', False)},
- context_instance = RequestContext(request))
-
- if request.POST['mac_addr_new']:
- new_inter = Interface(host=host,
- mac_addr = request.POST['mac_addr_new'].lower().replace('-',':'),
- hdwr_type = request.POST['hdwr_type_new'],
- dhcp = 'dhcp_new' in request.POST)
- new_inter.save()
- if request.POST['mac_addr_new'] and request.POST['ip_addr_new']:
- new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new'])
- new_ip.save()
- mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX)
- if created:
- mx.save()
- new_name = "-".join([host.hostname.split(".")[0],
- new_ip.ip_addr.split(".")[2]])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name, dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- new_name = "-".join([host.hostname.split(".")[0],
- new_inter.hdwr_type])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- name = Name(ip=new_ip, name=host.hostname,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- if request.POST['ip_addr_new'] and not request.POST['mac_addr_new']:
- new_inter = Interface(host=host,
- mac_addr="",
- hdwr_type=request.POST['hdwr_type_new'],
- dhcp=False)
- new_inter.save()
- new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new'])
- new_ip.save()
- mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX)
- if created:
- mx.save()
- new_name = "-".join([host.hostname.split(".")[0],
- new_ip.ip_addr.split(".")[2]])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- new_name = "-".join([host.hostname.split(".")[0],
- new_inter.hdwr_type])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- name = Name(ip=new_ip, name=host.hostname,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- if request.POST['mac_addr_new2']:
- new_inter = Interface(host=host,
- mac_addr = request.POST['mac_addr_new2'].lower().replace('-',':'),
- hdwr_type = request.POST['hdwr_type_new2'],
- dhcp = 'dhcp_new2' in request.POST)
- new_inter.save()
- if request.POST['mac_addr_new2'] and request.POST['ip_addr_new2']:
- new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new2'])
- new_ip.save()
- mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX)
- if created:
- mx.save()
- new_name = "-".join([host.hostname.split(".")[0],
- new_ip.ip_addr.split(".")[2]])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- new_name = "-".join([host.hostname.split(".")[0],
- new_inter.hdwr_type])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- name = Name(ip=new_ip, name=host.hostname,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- if request.POST['ip_addr_new2'] and not request.POST['mac_addr_new2']:
- new_inter = Interface(host=host,
- mac_addr="",
- hdwr_type=request.POST['hdwr_type_new2'],
- dhcp=False)
- new_inter.save()
- new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new2'])
- new_ip.save()
- mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX)
- if created:
- mx.save()
- new_name = "-".join([host.hostname.split(".")[0],
- new_ip.ip_addr.split(".")[2]])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- new_name = "-".join([host.hostname.split(".")[0],
- new_inter.hdwr_type])
- new_name += "." + host.hostname.split(".", 1)[1]
- name = Name(ip=new_ip, name=new_name,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- name = Name(ip=new_ip, name=host.hostname,
- dns_view='global', only=False)
- name.save()
- name.mxs.add(mx)
- host.save()
- return HttpResponseRedirect('/hostbase/%s/' % host.id)
- else:
- host = Host.objects.get(id=host_id)
- return render_to_response('copy.html',
- {'host': host,
- 'TYPE_CHOICES': Interface.TYPE_CHOICES,
- 'NETGROUP_CHOICES': Host.NETGROUP_CHOICES,
- 'CLASS_CHOICES': Host.CLASS_CHOICES,
- 'SUPPORT_CHOICES': Host.SUPPORT_CHOICES,
- 'WHATAMI_CHOICES': Host.WHATAMI_CHOICES,
- 'logged_in': request.session.get('_auth_user_id', False)},
- context_instance = RequestContext(request))
-
-# FIXME: delete all this things in a signal handler "pre_delete"
-#def remove(request, host_id):
-# host = Host.objects.get(id=host_id)
-# if 'sub' in request:
-# for interface in host.interface_set.all():
-# for ip in interface.ip_set.all():
-# for name in ip.name_set.all():
-# name.cname_set.all().delete()
-# ip.name_set.all().delete()
-# interface.ip_set.all().delete()
-# interface.delete()
-# host.delete()
-
-def validate(request, new=False, host_id=None):
- """Function for checking form data"""
- failures = []
- if (request.POST['expiration_date']
- and regex.date.match(request.POST['expiration_date'])):
- try:
- (year, month, day) = request.POST['expiration_date'].split("-")
- date(int(year), int(month), int(day))
- except (ValueError):
- failures.append('expiration_date')
- elif request.POST['expiration_date']:
- failures.append('expiration_date')
-
- if not (request.POST['hostname']
- and regex.host.match(request.POST['hostname'])):
- failures.append('hostname')
-
-## if not regex.printq.match(request.POST['printq']) and request.POST['printq']:
-## failures.append('printq')
-
-## if not regex.user.match(request.POST['primary_user']):
-## failures.append('primary_user')
-
-## if (not regex.user.match(request.POST['administrator'])
-## and request.POST['administrator']):
-## failures.append('administrator')
-
-## if not (request.POST['location']
-## and regex.location.match(request.POST['location'])):
-## failures.append('location')
-
- if new:
- if (not regex.macaddr.match(request.POST['mac_addr_new'])
- and request.POST['mac_addr_new']):
- failures.append('mac_addr (#1)')
- if ((request.POST['mac_addr_new'] or request.POST['ip_addr_new']) and
- not 'hdwr_type_new' in request.REQUEST):
- failures.append('hdwr_type (#1)')
- if ((request.POST['mac_addr_new2'] or request.POST['ip_addr_new2']) and
- not 'hdwr_type_new2' in request.REQUEST):
- failures.append('hdwr_type (#2)')
-
- if (not regex.macaddr.match(request.POST['mac_addr_new2'])
- and request.POST['mac_addr_new2']):
- failures.append('mac_addr (#2)')
-
- if (not regex.ipaddr.match(request.POST['ip_addr_new'])
- and request.POST['ip_addr_new']):
- failures.append('ip_addr (#1)')
- if (not regex. ipaddr.match(request.POST['ip_addr_new2'])
- and request.POST['ip_addr_new2']):
- failures.append('ip_addr (#2)')
-
- [failures.append('ip_addr (#1)') for number in
- request.POST['ip_addr_new'].split(".")
- if number.isdigit() and int(number) > 255
- and 'ip_addr (#1)' not in failures]
- [failures.append('ip_addr (#2)') for number in
- request.POST['ip_addr_new2'].split(".")
- if number.isdigit() and int(number) > 255
- and 'ip_addr (#2)' not in failures]
-
- elif host_id:
- interfaces = Interface.objects.filter(host=host_id)
- for interface in interfaces:
- if (not regex.macaddr.match(request.POST['mac_addr%d' % interface.id])
- and request.POST['mac_addr%d' % interface.id]):
- failures.append('mac_addr (%s)' % request.POST['mac_addr%d' % interface.id])
- for ip in interface.ip_set.all():
- if not regex.ipaddr.match(request.POST['ip_addr%d' % ip.id]):
- failures.append('ip_addr (%s)' % request.POST['ip_addr%d' % ip.id])
- [failures.append('ip_addr (%s)' % request.POST['ip_addr%d' % ip.id])
- for number in request.POST['ip_addr%d' % ip.id].split(".")
- if (number.isdigit() and int(number) > 255 and
- 'ip_addr (%s)' % request.POST['ip_addr%d' % ip.id] not in failures)]
- if (request.POST['%dip_addr' % interface.id]
- and not regex.ipaddr.match(request.POST['%dip_addr' % interface.id])):
- failures.append('ip_addr (%s)' % request.POST['%dip_addr' % interface.id])
- if (request.POST['mac_addr_new']
- and not regex.macaddr.match(request.POST['mac_addr_new'])):
- failures.append('mac_addr (%s)' % request.POST['mac_addr_new'])
- if (request.POST['ip_addr_new']
- and not regex.ipaddr.match(request.POST['ip_addr_new'])):
- failures.append('ip_addr (%s)' % request.POST['ip_addr_new'])
-
- if not failures:
- return 0
- return failures
-
-def do_log(text, attribute, previous, new):
- if previous != new:
- text += "%-20s%-20s -> %s\n" % (attribute, previous, new)
- return text
-
-## login required stuff
-## uncomment the views below that you would like to restrict access to
-
-## uncomment the lines below this point to restrict access to pages that modify the database
-## anonymous users can still view data in Hostbase
-
-edit = login_required(edit)
-confirm = login_required(confirm)
-dnsedit = login_required(dnsedit)
-new = login_required(new)
-copy = login_required(copy)
-#remove = login_required(remove)
-#zoneedit = login_required(zoneedit)
-#zonenew = login_required(zonenew)
-
-## uncomment the lines below this point to restrict access to all of hostbase
-
-## search = login_required(search)
-## look = login_required(look)
-## dns = login_required(dns)
-## zones = login_required(zones)
-## zoneview = login_required(zoneview)
-
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/base.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/base.html
deleted file mode 100644
index 1d7c5565b..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/base.html
+++ /dev/null
@@ -1,34 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
- "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
-<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
-<head>
- <title>{% block title %}BCFG2 - Hostbase{% endblock %}</title>
- <link rel="stylesheet" type="text/css" href="{{ MEDIA_URL }}/boxypastel.css" />
- <link rel="stylesheet" type="text/css" href="{{ MEDIA_URL }}/base.css" />
- <!--<script type="text/javascript" src="http://hostbase.mcs.anl.gov/site_media/main.js"> -->
- {% block extra_header_info %}{% endblock %}
-</head>
-
-<body>
- <div id="header">
- <div id="branding">
- <h1>BCFG2</h1>
- </div>
- <div id="user-tools">...Change is Coming...</div>
- </div>
- <div id="sidebar">
- {% block sidebar %}
- <ul class="sidebar">
- </ul>
- {% endblock %}
- </div>
-
- <div id="content-main">
- <div id="container">
- {% block pagebanner %}{% endblock %}
- {% block content %}{% endblock %}
-
- </div>
- </div>
-</body>
-</html>
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/confirm.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/confirm.html
deleted file mode 100644
index ca8b0cc07..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/confirm.html
+++ /dev/null
@@ -1,117 +0,0 @@
-{% extends "base.html" %}
-
-{% block pagebanner %}
- <div class="header">
- <h2>Confirm Removal</h2>
- </div>
- <br/>
-{% endblock %}
-
-{% block sidebar %}
-{% include "navbar.tmpl" %}
-{% include "logout.tmpl" %}
-{% endblock %}
-
-{% block content %}
-
-<form name="input" action="confirm.html?sub=true" method="post">
-Are you sure you want to remove these items?
-
-{% if interface %}
-<ul>
-<li> interface: {{ interface.mac_addr }} </li>
-{% endif %}
-
-
-{% if ips %}
-<ul>
-{% for ip in ips %}
-<li> ip: {{ ip.ip_addr }} </li>
-<ul>
-{% for name in names %}
-{% ifequal name.0 ip.id %}
-<li> name: {{ name.1.name }} </li>
-<ul>
-{% endifequal %}
-{% for cname in cnames %}
-{% ifequal cname.0 name.1.id %}
-<li> cname: {{ cname.1.name }} </li>
-{% endifequal %}
-{% endfor %}
-</ul>
-<ul>
-{% for mx in mxs %}
-{% ifequal mx.0 name.1.id %}
-<li> mx: {{ mx.1.priority }} {{ mx.1.mx }} </li>
-{% endifequal %}
-{% endfor %}
-</ul>
-{% endfor %}
-</ul>
-{% endfor %}
-</ul>
-{% endif %}
-
-{% if names and not ips %}
-<ul>
-{% for name in names %}
-<li> name: {{ name.name }} </li>
-<ul>
-{% for cname in cnames %}
-{% ifequal cname.0 name.id %}
-<li> cname: {{ cname.1.cname }} </li>
-{% endifequal %}
-{% endfor %}
-</ul>
-<ul>
-{% for mx in mxs %}
-{% ifequal mx.0 name.id %}
-<li> mx: {{ mx.1.priority }} {{ mx.1.mx }} </li>
-{% endifequal %}
-{% endfor %}
-</ul>
-{% endfor %}
-</ul>
-{% endif %}
-
-{% if cnames and not names %}
-<ul>
-{% for cname in cnames %}
-<li> cname: {{ cname.cname }} </li>
-{% endfor %}
-</ul>
-{% endif %}
-
-{% if mxs and not names %}
-<ul>
-{% for mx in mxs %}
-<li> mx: {{ mx.priority }} {{ mx.mx }} </li>
-{% endfor %}
-</ul>
-{% endif %}
-
-{% if interface %}
-</ul>
-{% endif %}
-
-{% if zone_id %}
-<ul>
-{% ifequal type 'zonemx' %}
-<li> mx: {{ zonemx.priority }} {{ zonemx.mx }} </li>
-{% endifequal %}
-
-{% ifequal type 'nameserver' %}
-<li> nameserver: {{ nameserver.name }} </li>
-{% endifequal %}
-
-{% ifequal type 'address' %}
-<li> address: {{ address.ip_addr }} </li>
-{% endifequal %}
-</ul>
-{% endif %}
-
-<input type="submit" value="confirm">
-<input type="reset" value="cancel" onclick="history.back()">
-</form>
-
-{% endblock %}
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/copy.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/copy.html
deleted file mode 100644
index 400ef58f2..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/copy.html
+++ /dev/null
@@ -1,122 +0,0 @@
-{% extends "base.html" %}
-
-{% block pagebanner %}
- <div class="header">
- <h2>new host information</h2>
- </div>
- <br/>
-{% endblock %}
-
-{% block sidebar %}
-<a href="/hostbase/" class="sidebar">search hostbase</a>
-{% include "logout.tmpl" %}
-{% endblock %}
-
-{% block content %}
-
-<form name="hostdata" action="?sub=true" method="post">
-<input type="hidden" name="host">
-<table border="0" width="100%">
- <colgroup>
- <col width="150">
- <col width="*">
- <tr> <td> <b>hostname</b></td>
- <td> <input name="hostname" type="text" value="{{ host.hostname }}" ></td></tr>
- <tr> <td> <b>whatami</b></td>
- <td>
- <select name="whatami">
- {% for choice in WHATAMI_CHOICES %}
- {% ifequal host.whatami choice.0 %}
- <option value="{{ choice.0 }}" selected="selected" >{{ choice.1 }}
- {% else %}
- <option value="{{ choice.0 }}">{{ choice.1 }}
- {% endifequal %}
- {% endfor %}
- </select>
- </td></tr>
- <tr> <td> <b>netgroup</b></td>
- <td>
- <select name="netgroup">
- {% for choice in NETGROUP_CHOICES %}
- {% ifequal host.netgroup choice.0 %}
- <option value="{{ choice.0 }}" selected="selected" >{{ choice.1 }}
- {% else %}
- <option value="{{ choice.0 }}">{{ choice.1 }}
- {% endifequal %}
- {% endfor %}
- </select>
- </td></tr>
- <tr> <td> <b>class</b></td>
- <td>
- <select name="security_class">
- {% for choice in CLASS_CHOICES %}
- {% ifequal host.security_class choice.0 %}
- <option value="{{ choice.0 }}" selected="selected" >{{ choice.1 }}
- {% else %}
- <option value="{{ choice.0 }}">{{ choice.1 }}
- {% endifequal %}
- {% endfor %}
- </select></td></tr>
- <tr> <td> <b>support</b></td>
- <td>
- <select name="support">
- {% for choice in SUPPORT_CHOICES %}
- {% ifequal host.support choice.0 %}
- <option value="{{ choice.0 }}" selected="selected" >{{ choice.1 }}
- {% else %}
- <option value="{{ choice.0 }}">{{ choice.1 }}
- {% endifequal %}
- {% endfor %}
- </select></td></tr>
- <tr> <td> <b>csi</b></td>
- <td> <input name="csi" type="text" value="{{ host.csi }}" ></td></tr>
- <tr> <td> <b>printq</b></td>
- <td> <input name="printq" type="text" value="{{ host.printq }}" ></td></tr>
- <tr> <td> <b>outbound_smtp</b></td>
- <td>
- {% if host.outbound_smtp %}
- <input type="checkbox" name="outbound_smtp" checked="checked" ></td></tr>
- {% else %}
- <input type="checkbox" name="outbound_smtp" ></td></tr>
- {% endif %}
- <tr> <td> <b>primary_user</b></td>
- <td> <input name="primary_user" type="text" size="32" value="{{ host.primary_user }}"> (email address)</td></tr>
- <tr> <td> <b>administrator</b></td>
- <td> <input name="administrator" type="text" size="32" value="{{ host.administrator }}"> (email address)</td></tr>
- <tr> <td> <b>location</b></td>
- <td> <input name="location" type="text" value="{{ host.location }}"></td></tr>
- <tr> <td> <b>expiration_date</b></td>
- <td> <input name="expiration_date" type="text" size="10" value="{{ host.expiration_date }}">YYYY-MM-DD</td></tr>
- <tr> <td><br><b>Interface</b></td><td><br>
- {% for choice in TYPE_CHOICES %}
- <input type="radio" name="hdwr_type_new" value="{{ choice.0 }}" >{{ choice.1 }}
- {% endfor %}
- </td></tr>
- <tr> <td> <b>dhcp</b></td>
- <td>
- <input type="checkbox" name="dhcp_new"></td></tr>
- <tr> <td> <b>mac_addr</b></td>
- <td> <input name="mac_addr_new" type="text"></td></tr>
- <tr> <td> <b>ip_addr</b></td>
- <td> <input name="ip_addr_new" type="text"></td></tr>
- <tr> <td><br><b>Interface</b></td><td><br>
- {% for choice in TYPE_CHOICES %}
- <input type="radio" name="hdwr_type_new2" value="{{ choice.0 }}" >{{ choice.1 }}
- {% endfor %}
- </td></tr>
- <tr> <td> <b>dhcp</b></td>
- <td>
- <input type="checkbox" name="dhcp_new2"></td></tr>
- <tr> <td> <b>mac_addr</b></td>
- <td> <input name="mac_addr_new2" type="text"></td></tr>
- <tr> <td> <b>ip_addr</b></td>
- <td> <input name="ip_addr_new2" type="text"></td></tr>
- <tr> <td> <b>comments</b></td>
- <td> <textarea rows="10" cols="50" name="comments"></textarea></td></tr>
-</table>
-<br>
-<p><input type="submit" value="Submit">
-</form>
-
-{% endblock %}
-
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/dns.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/dns.html
deleted file mode 100644
index da179e5a1..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/dns.html
+++ /dev/null
@@ -1,40 +0,0 @@
-{% extends "base.html" %}
-
-{% block pagebanner %}
- <div class="header">
- <h2>dns info for {{ host.hostname }}</h2>
- </div>
- <br/>
-{% endblock %}
-
-{% block sidebar %}
-{% include "navbar.tmpl" %}
-<ul class="sidebar">
- <li><a href="/hostbase/{{ host.id }}/" class="sidebar">host info</a></li>
- <li><a href="/hostbase/{{ host.id }}/edit/" class="sidebar">edit host info</a></li>
- <li><a href="edit/" class="sidebar">edit dns info</a></li>
- <li><a href="/hostbase/{{ host.id }}/logs/" class="sidebar">change logs</a></li>
-</ul>
-{% include "logout.tmpl" %}
-{% endblock %}
-
-{% block content %}
-
-{% for interface in host.interface_set.all %}
- {% for ip in interface.ip_set.all %}
- <ul><li> <b>ip_addr:</b> {{ ip.ip_addr }}</li>
- {% for name in ip.name_set.all %}
- <ul> <li><b>name:</b> {{ name.name }}</li> <ul>
- {% for cname in name.cname_set.all %}
- <li> <b>cname:</b> {{ cname.cname }}</li>
- {% endfor %}
- {% for mx in name.mxs.all %}
- <li> <b>mx:</b> {{ mx.priority }} {{ mx.mx }}</li>
- {% endfor %}
- </ul></ul>
- {% endfor %}
- </ul>
- {% endfor %}
-{% endfor %}
-{% endblock %}
-
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/dnsedit.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/dnsedit.html
deleted file mode 100644
index b1b71ab67..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/dnsedit.html
+++ /dev/null
@@ -1,98 +0,0 @@
-{% extends "base.html" %}
-
-{% block pagebanner %}
- <div class="header">
- <h2>dns info for {{ host.hostname }}</h2>
- </div>
- <br/>
-{% endblock %}
-
-{% block sidebar %}
-{% include "navbar.tmpl" %}
-<ul class="sidebar">
- <li><a href="/hostbase/{{ host.id }}/" class="sidebar">host info</a></li>
- <li><a href="/hostbase/{{ host.id }}/edit/" class="sidebar">edit host info</a></li>
- <li><a href="/hostbase/{{ host.id }}/dns/" class="sidebar">see dns info</a></li>
- <li><a href="/hostbase/{{ host.id }}/logs/" class="sidebar">change logs</a></li>
-</ul>
-{% include "logout.tmpl" %}
-{% endblock %}
-
-{% block content %}
-
-<form name="dns" action="?sub=true" method="post">
-<input type="hidden" name="host" value="{{ host.id }}">
-<table border="0" width="100%">
- <colgroup>
- <col width="150">
- <col width="*">
- {% for interface in interfaces %}
- <tr><td><br></td></tr>
- <tr> <td> <b>interface type</b> </td>
- <td> {{ interface.hdwr_type }} </td></tr>
- <tr> <td> <b>mac_addr</b> </td>
- <td> {{ interface.mac_addr }} </td></tr>
- <tr><td><hr></td><td><hr></td></tr>
- {% for ip in info %}
- {% ifequal ip.0.interface interface %}
- <tr> <td> <b>ip_addr</b></td>
- <td>{{ ip.0.ip_addr }}</td></tr>
- {% for name in ip.1 %}
- <tr> <td><b>name(dns)</b></td>
- <td> <input name="name{{ name.id }}" type="text" value="{{ name.name }}">
- <select name="dns_view{{ name.id }}">
- {% for choice in DNS_CHOICES %}
- {% ifequal name.dns_view choice.0 %}
- <option value="{{ choice.0 }}" selected="selected">{{ choice.1 }}
- {% else %}
- <option value="{{ choice.0 }}">{{ choice.1 }}
- {% endifequal %}
- {% endfor %}
- </select>
- <a style="font-size:75%" href="/hostbase/{{ host.id }}/name/{{ name.id }}/confirm">remove</a></td></tr>
- {% for cname in cnames %}
- {% ifequal name cname.name %}
- <tr> <td> <b>cname</b></td>
- <td> <input name="cname{{ cname.id }}" type="text" value="{{ cname.cname }}">
- <a style="font-size:75%" href="/hostbase/{{ host.id }}/cname/{{ cname.id }}/confirm">remove</a></td></tr>
- {% endifequal %}
- {% endfor %}
- <tr> <td> <b>cname</b></td>
- <td> <input name="{{ name.id }}cname" type="text"></td></tr>
- {% for mx in mxs %}
- {% ifequal mx.0 name.id %}
- {% for record in mx.1 %}
- <tr> <td> <b>mx</b></td>
- <td> <input name="priority{{ record.id }}" type="text" size="6" value="{{ record.priority }}">
- <input name="mx{{ record.id }}" type="text" value="{{ record.mx }}">
- <a style="font-size:75%" href="/hostbase/{{ host.id }}/mx/{{ record.id }}/{{ name.id }}/confirm">remove</a></td></tr>
- {% endfor %}
- {% endifequal %}
- {% endfor %}
- <tr> <td> <b>mx</b></td>
- <td> <input name="{{ name.id }}priority" type="text" size="6">
- <input name="{{ name.id }}mx" type="text"></td></tr>
- {% endfor %}
- <tr> <td> <b>name</b></td>
- <td> <input name="{{ ip.0.ip_addr }}name" type="text">
- <select name="{{ ip.0.ip_addr }}dns_view">
- {% for choice in DNS_CHOICES %}
- <option value="{{ choice.0 }}">{{ choice.1 }}
- {% endfor %}
- </select></td></tr>
- <tr> <td> <b>cname</b></td>
- <td> <input name="{{ ip.0.ip_addr }}cname" type="text"></td></tr>
- <tr> <td> <b>mx</b></td>
- <td> <input name="{{ ip.0.ip_addr }}priority" type="text" size="6">
- <input name="{{ ip.0.ip_addr }}mx" type="text"></td></tr>
- <tr><td></td></tr>
- <tr><td><hr></td><td><hr></td></tr>
- {% endifequal %}
- {% endfor %}
- {% endfor %}
- </table>
-
-<p><input type="submit" value="Submit">
-</form>
-
-{% endblock %}
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/edit.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/edit.html
deleted file mode 100644
index 961c9d143..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/edit.html
+++ /dev/null
@@ -1,191 +0,0 @@
-{% extends "base.html" %}
-
-{% block pagebanner %}
- <div class="header">
- <h2>{{ host.hostname }}</h2>
- </div>
- <br/>
-{% endblock %}
-
-{% block sidebar %}
-{% include "navbar.tmpl" %}
-<ul class="sidebar">
-<li><a href="/hostbase/{{ host.id }}/" class="sidebar">host info</a></li>
-<li><a href="/hostbase/{{ host.id }}/dns/" class="sidebar">detailed dns info</a></li>
-<li><a href="/hostbase/{{ host.id }}/dns/edit/" class="sidebar">edit dns info</a></li>
-<li><a href="/hostbase/{{ host.id }}/logs/" class="sidebar">change logs</a></li>
-</ul>
-{% include "logout.tmpl" %}
-{% endblock %}
-
-{% block content %}
-
-<script language="JavaScript" type="text/Javascript">
-function toggleAddr(interface_id){
- if(document.getElementById){
- var style = document.getElementById('ipaddr'+interface_id).style;
- style.display = style.display? "":"block";
- }
-}
-function toggleInter(){
- if(document.getElementById){
- var style = document.getElementById('interface').style;
- style.display = style.display? "":"block";
- }
-}
-</script>
-
-<style type=text/css>
-{% for interface in interfaces %}
-div#ipaddr{{ interface.0.id }}{
- display: none;
-}
-{% endfor %}
-div#interface{
- display: none;
-}
-</style>
-
-<form name="hostdata" action="" method="post">
-<fieldset class="module aligned ()">
-<input type="hidden" name="host" value="{{ host.id }}">
- <label for="id_hostname">hostname:</label>
- <input name="hostname" value="{{ host.hostname }}"><br>
- <label for="id_whatami">whatami:</label>
- <select name="whatami">
- {% for choice in host.WHATAMI_CHOICES %}
- {% ifequal host.whatami choice.0 %}
- <option value="{{ choice.0 }}" selected="selected">{{ choice.1 }}
- {% else %}
- <option value="{{ choice.0 }}">{{ choice.1 }}
- {% endifequal %}
- {% endfor %}
- </select><br>
- <label for="id_netgroup">netgroup:</label>
- <select name="netgroup">
- {% for choice in host.NETGROUP_CHOICES %}
- {% ifequal host.netgroup choice.0 %}
- <option value="{{ choice.0 }}" selected="selected">{{ choice.1 }}
- {% else %}
- <option value="{{ choice.0 }}">{{ choice.1 }}
- {% endifequal %}
- {% endfor %}
- </select><br>
- <label for="id_security_class">class:</label>
- <select name="security_class">
- {% for choice in host.CLASS_CHOICES %}
- {% ifequal host.security_class choice.0 %}
- <option value="{{ choice.0 }}" selected="selected">{{ choice.1 }}
- {% else %}
- <option value="{{ choice.0 }}">{{ choice.1 }}
- {% endifequal %}
- {% endfor %}
- </select><br>
- <label for="id_support">support:</label>
- <select name="support">
- {% for choice in host.SUPPORT_CHOICES %}
- {% ifequal host.support choice.0 %}
- <option value="{{ choice.0 }}" selected="selected">{{ choice.1 }}
- {% else %}
- <option value="{{ choice.0 }}">{{ choice.1 }}
- {% endifequal %}
- {% endfor %}
- </select><br>
- <label for="id_csi">csi:</label>
- <input name="csi" type="text" value="{{ host.csi }}"><br>
- <label for="id_printq">printq:</label>
- <input name="printq" type="text" value="{{ host.printq }}"><br>
- <label for="id_outbound_smtp">outbound_smtp:</label>
- {% if host.outbound_smtp %}
- <input type="checkbox" checked="checked" name="outbound_smtp">
- {% else %}
- <input type="checkbox" name="outbound_smtp">
- {% endif %}<br>
- <label for="id_primary_user">primary_user:</label>
- <input name="primary_user" type="text" size="32" value="{{ host.primary_user }}"><br>
- <label for="id_administrator">administrator:</label>
- <input name="administrator" type="text" size="32" value="{{ host.administrator }}"><br>
- <label for="id_location">location:</label>
- <input name="location" type="text" value="{{ host.location }}"><br>
- <label for="id_expiration_date">expiration_date:</label>
- <input name="expiration_date" type="text" value="{{ host.expiration_date }}"> YYYY-MM-DD<br>
- {% for interface in interfaces %}
- <label for="id_interface">Interface:</label>
- <select name="hdwr_type{{ interface.0.id }}">
- {% for choice in interface.0.TYPE_CHOICES %}
- {% ifequal interface.0.hdwr_type choice.0 %}
- <option value="{{ choice.0 }}" selected="selected">{{ choice.1 }}
- {% else %}
- <option value="{{ choice.0 }}">{{ choice.1 }}
- {% endifequal %}
- {% endfor %}
- </select><br>
- <label for="id_dhcp">dhcp:</label>
- {% if interface.0.dhcp %}
- <input type="checkbox" checked="checked" name="dhcp{{ interface.0.id }}">
- {% else %}
- <input type="checkbox" name="dhcp{{ interface.0.id }}">
- {% endif %}<br>
- <label for="id_mac_addr">mac_addr:</label>
- <input name="mac_addr{{ interface.0.id }}" type="text" value="{{ interface.0.mac_addr }}">
- <a style="font-size:75%" href="/hostbase/{{ host.id }}/interface/{{ interface.0.id }}/confirm">remove</a><br>
- {% for ip in interface.1 %}
- <label for="id_ip_addr">ip_addr:</label>
- <input name="ip_addr{{ ip.id }}" type="text" value="{{ ip.ip_addr }}">
- <a style="font-size:75%" href="/hostbase/{{ host.id }}/ip/{{ ip.id }}/confirm">remove</a><br>
- {% endfor %}
-
-<!-- Section for adding a new IP address to an existing interface -->
-<!-- By default, section is hidden -->
- <div id=ipaddr{{ interface.0.id }}>
- <label for="id_ip_addr">ip_addr:</label>
- <input name="{{ interface.0.id }}ip_addr" type="text"><br>
- </div>
- <a style="font-size:75%" href=# onclick="toggleAddr({{ interface.0.id }})">Add a New IP Address</a><br>
- {% endfor %}
-<!-- End section for new IP address -->
-
-<!-- Section for add an entirely new interface to a host -->
-<!-- By default, section is hidden -->
- <div id=interface>
- <label for="id_interface">Interface:</label>
- <select name="hdwr_type_new">
- {% for choice in TYPE_CHOICES %}
- <option value="{{ choice.0 }}">{{ choice.1 }}
- {% endfor %}
- </select><br>
- <label for="id_dhcp">dhcp:</label>
- {% if host.dhcp %}
- <input type="checkbox" checked="checked" name="dhcp_new">
- {% else %}
- <input type="checkbox" name="dhcp_new">
- {% endif %}<br>
- <label for="id_mac_addr">mac_addr:</label>
- <td> <input name="mac_addr_new" type="text"><br>
- <label for="id_ip_addr">ip_addr:</label>
- <td> <input name="ip_addr_new" type="text"><br>
-</div>
-<a style="font-size:75%" href=# onclick="toggleInter()">Add a New Interface</a><br>
-<!-- End new interface section -->
-
-
-<label for="id_comments">comments:</label>
-<textarea rows="10" cols="50" name="comments">{{ host.comments }}</textarea><br>
-<a style="font-size:75%" href="/hostbase/{{ host.id }}/dns/edit">edit detailed DNS information for this host</a>
-<br>
-this host is
-<select name="status">
-{% for choice in host.STATUS_CHOICES %}
-{% ifequal host.status choice.0 %}
-<option value="{{ choice.0 }}" selected="selected">{{ choice.1 }}
-{% else %}
-<option value="{{ choice.0 }}">{{ choice.1 }}
-{% endifequal %}
-{% endfor %}
-</select><br>
-last update on {{ host.last }}<br>
-<input type="submit" value="submit">
-<input type="reset" value="cancel" onclick="history.back()">
-</form>
-
-{% endblock %}
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/errors.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/errors.html
deleted file mode 100644
index e5429b86c..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/errors.html
+++ /dev/null
@@ -1,31 +0,0 @@
-{% extends "base.html" %}
-
-{% block pagebanner %}
- <div class="header">
- <h2>Search Results</h2>
- </div>
- <br/>
-{% endblock %}
-
-{% block sidebar %}
-{% include "navbar.tmpl" %}
-{% include "logout.tmpl" %}
-{% endblock %}
-
-{% block content %}
-
-{% if failures %}
-There were errors in the following fields<br><br>
-{% for failure in failures %}
-
-<font color="#FF0000">{{ failure }}</font><br>
-{% comment %}
-{{ failure.1|join:", " }}
-{% endcomment %}
-
-{% endfor %}
-{% endif %}
-<br>
-Press the back button on your browser and edit those field(s)
-
-{% endblock %}
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/host.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/host.html
deleted file mode 100644
index d6b8873bc..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/host.html
+++ /dev/null
@@ -1,80 +0,0 @@
-{% extends "base.html" %}
-
-{% block pagebanner %}
- <div class="header">
- <h2>{{ host.hostname }}</h2>
- </div>
- <br/>
-{% endblock %}
-
-{% block sidebar %}
-{% include "navbar.tmpl" %}
-<ul class="sidebar">
- <li><a href="dns/" class="sidebar">detailed dns info</a></li>
- <li><a href="edit/" class="sidebar">edit host info</a></li>
- <li><a href="dns/edit/" class="sidebar">edit dns info</a></li>
- <li><a href="logs/" class="sidebar">change logs</a></li>
-</ul>
-{% include "logout.tmpl" %}
-{% endblock %}
-
-{% block content %}
-
-<table border="0" width="100%">
- <colgroup>
- <col width="150">
- <col width="*">
- <tr> <td> <b>hostname</b></td>
- <td> {{ host.hostname }}</td></tr>
- <tr> <td> <b>whatami</b></td>
- <td> {{ host.whatami }}</td></tr>
- <tr> <td> <b>netgroup</b></td>
- <td> {{ host.netgroup }}</td></tr>
- <tr> <td> <b>class</b></td>
- <td> {{ host.security_class }}</td></tr>
- <tr> <td> <b>support</b></td>
- <td> {{ host.support }}</td></tr>
- <tr> <td> <b>csi</b></td>
- <td> {{ host.csi }}</td></tr>
- <tr> <td> <b>printq</b></td>
- <td> {{ host.printq }}</td></tr>
- <tr> <td> <b>outbound_smtp</b></td>
- {% if host.outbound_smtp %}
- <td> y </td></tr>
- {% else %}
- <td> n </td></tr>
- {% endif %}
- <tr> <td> <b>primary_user</b></td>
- <td> {{ host.primary_user }}</td></tr>
- <tr> <td> <b>administrator</b></td>
- <td> {{ host.administrator }}</td></tr>
- <tr> <td> <b>location</b></td>
- <td> {{ host.location }}</td></tr>
- <tr> <td> <b>expiration_date</b></td>
- <td> {{ host.expiration_date }}</td></tr>
- {% for interface in host.inserface_set.all %}
- <tr> <td><br><b>Interface</b></td>
- {% ifnotequal interface.0.hdwr_type 'no' %}
- <td><br>{{ interface.0.hdwr_type }}</td></tr>
- {% endifnotequal %}
- {% if interface.0.dhcp %}
- <tr> <td> <b>mac_addr</b></td>
- <td> {{ interface.0.mac_addr }}</b></td></tr>
- {% endif %}
- {% for ip in interface.1 %}
- <tr> <td> <b>ip_addr</b></td>
- <td> {{ ip.ip_addr }}</td></tr>
- {% endfor %}
- {% endfor %}
- <tr> <td valign="top"> <b>comments</b></td>
- <td>
- {{ host.comments|linebreaksbr }}<br>
- </td></tr>
-
-</table>
-<a style="font-size:75%" href="/hostbase/{{ host.id }}/dns/">see detailed DNS information for this host</a>
-<br><br>
-this host is {{ host.status }}<br>
-last update on {{ host.last }}<br>
-
-{% endblock %}
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/hostbase/host_confirm_delete.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/hostbase/host_confirm_delete.html
deleted file mode 100644
index b5d794b50..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/hostbase/host_confirm_delete.html
+++ /dev/null
@@ -1,89 +0,0 @@
-{% extends "base.html" %}
-
-{% block pagebanner %}
- <div class="header">
- <h2>Are you sure you want to remove {{ object.hostname }}?</h2>
- </div>
- <br/>
-{% endblock %}
-
-{% block sidebar %}
-{% include "navbar.tmpl" %}
-<ul class="sidebar">
- <li><a href="dns/" class="sidebar">detailed dns info</a></li>
- <li><a href="edit/" class="sidebar">edit host info</a></li>
- <li><a href="dns/edit/" class="sidebar">edit dns info</a></li>
-</ul>
-{% include "logout.tmpl" %}
-{% endblock %}
-
-{% block content %}
-
-<table border="0" width="100%">
- <colgroup>
- <col width="150">
- <col width="*">
- <tr> <td> <b>hostname</b></td>
- <td> {{ object.hostname }}</td></tr>
- <tr> <td> <b>whatami</b></td>
- <td> {{ object.whatami }}</td></tr>
- <tr> <td> <b>netgroup</b></td>
- <td> {{ object.netgroup }}</td></tr>
- <tr> <td> <b>class</b></td>
- <td> {{ object.security_class }}</td></tr>
- <tr> <td> <b>support</b></td>
- <td> {{ object.support }}</td></tr>
- <tr> <td> <b>csi</b></td>
- <td> {{ object.csi }}</td></tr>
- <tr> <td> <b>printq</b></td>
- <td> {{ object.printq }}</td></tr>
- <tr> <td> <b>dhcp</b></td>
- {% if host.dhcp %}
- <td> y </td></tr>
- {% else %}
- <td> n </td></tr>
- {% endif %}
- <tr> <td> <b>outbound_smtp</b></td>
- {% if host.outbound_smtp %}
- <td> y </td></tr>
- {% else %}
- <td> n </td></tr>
- {% endif %}
- <tr> <td> <b>primary_user</b></td>
- <td> {{ object.primary_user }}</td></tr>
- <tr> <td> <b>administrator</b></td>
- <td> {{ object.administrator }}</td></tr>
- <tr> <td> <b>location</b></td>
- <td> {{ object.location }}</td></tr>
- <tr> <td> <b>expiration_date</b></td>
- <td> {{ object.expiration_date }}</td></tr>
- {% for interface in interfaces %}
- <tr> <td><br><b>Interface</b></td>
- {% ifnotequal interface.0.hdwr_type 'no' %}
- <td><br>{{ interface.0.hdwr_type }}</td></tr>
- {% endifnotequal %}
- <tr> <td> <b>mac_addr</b></td>
- <td> {{ interface.0.mac_addr }}</b></td></tr>
- {% for ip in interface.1 %}
- <tr> <td> <b>ip_addr</b></td>
- <td> {{ ip.ip_addr }}</td></tr>
- {% endfor %}
- {% endfor %}
- <tr> <td valign="top"> <b>comments</b></td>
- <td>
- {{ object.comments|linebreaksbr }}<br>
- </td></tr>
-
-</table>
-<a style="font-size:75%" href="/hostbase/{{ object.id }}/dns/">see detailed DNS information for this host</a>
-<br><br>
-this host is {{ object.status }}<br>
-last update on {{ object.last }}<br>
-
-<form name="input" action="remove.html?sub=true" method="post">
-<input type="submit" value="remove">
-<input type="reset" value="cancel" onclick="history.back()">
-</form>
-
-{% endblock %}
-
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/hostbase/log_detail.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/hostbase/log_detail.html
deleted file mode 100644
index aa9679cbd..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/hostbase/log_detail.html
+++ /dev/null
@@ -1,23 +0,0 @@
-{% extends "base.html" %}
-
-{% block pagebanner %}
- <div class="header">
- <h2>Change Logs for {{ object.hostname }}</h2>
- </div>
- <br/>
-{% endblock %}
-
-{% block sidebar %}
-{% include "navbar.tmpl" %}
-{% include "logout.tmpl" %}
-{% endblock %}
-
-{% block content %}
-
-<ul>
-<li><b>Hostname:</b>{{ object.hostname }}</li>
-<li><b>Date:</b>{{ object.date }}</li>
-<li><b>Log:</b>{{ object.log }}</li>
-</ul>
-
-{% endblock %}
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/index.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/index.html
deleted file mode 100644
index 92258b648..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/index.html
+++ /dev/null
@@ -1,16 +0,0 @@
-{% extends "base.html" %}
-{% block pagebanner %}
- <div class="header">
- <h2>Welcome to Hostbase!</h2>
- <p>Hostbase is a web based management tools for Bcfg2 Hosts</p>
- </div>
- <br/>
-{% endblock %}
-{% block sidebar %}
-<a href="/login/" class="sidebar">login to hostbase</a><br>
-<a href="/hostbase/" class="sidebar">search for hosts</a><br>
-<a href="hostbase/zones/" class="sidebar">zone file information</a>
-{% endblock %}
-{% block content %}
-{% endblock %}
-
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/login.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/login.html
deleted file mode 100644
index ec24a0fc0..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/login.html
+++ /dev/null
@@ -1,37 +0,0 @@
-{% extends "base.html" %}
-{% block pagebanner %}
- <div class="header">
- <h2>Login to Hostbase!</h2>
- <p>You must login to manage hosts</p>
- </div>
- <br/>
-{% endblock %}
-{% block sidebar %}
-<a href="/hostbase/" class="sidebar">search for hosts</a><br>
-<a href="/hostbase/new" class="sidebar">add a new host</a><br>
-<a href="hostbase/zones/" class="sidebar">zone file information</a>
-{% endblock %}
-{% block content %}
- {% if form.has_errors %}
- {{ form.username.errors|join:", " }}
- <p>Login Failed.</p>
- {% endif %}
- {% if user.is_authenticated %}
- <p>Welcome, {{ user.username }}. Thanks for logging in.</p>
- {% else %}
- <p>Welcome, user. Please log in.</p>
- <form name="input" action="." method="post">
- <input name="username" type="text">
- <br />
- <input name="password" type="password">
- <br />
- <input type="submit" value="Login">
- {% if next %}
- <input type="hidden" name="next" value="{{ next }}" />
- {% else %}
- <input type="hidden" name="next" value="/hostbase/" />
- {% endif %}
-
- </form>
- {% endif %}
-{% endblock %}
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/logout.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/logout.html
deleted file mode 100644
index 994f631a8..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/logout.html
+++ /dev/null
@@ -1,13 +0,0 @@
-{% extends "base.html" %}
-{% block pagebanner %}
- <div class="header">
- <h2>You are logged out of Hostbase!</h2>
- </div>
- <br/>
-{% endblock %}
-{% block sidebar %}
-<a href="/login/" class="sidebar">Login to Hostbase</a>
-{% endblock %}
-{% block content %}
-{% endblock %}
-
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/logout.tmpl b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/logout.tmpl
deleted file mode 100644
index e71e90e76..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/logout.tmpl
+++ /dev/null
@@ -1,6 +0,0 @@
-<p>
-{% if logged_in %}
-<a href="/logout/" class="sidebar">logout</a>
-{% else %}
-<a href="/login/" class="sidebar">login</a>
-{% endif %}
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/logviewer.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/logviewer.html
deleted file mode 100644
index 806ccd63d..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/logviewer.html
+++ /dev/null
@@ -1,27 +0,0 @@
-{% extends "base.html" %}
-
-{% block pagebanner %}
- <div class="header">
- <h2>Change Logs for {{ hostname }}</h2>
- </div>
- <br/>
-{% endblock %}
-
-{% block sidebar %}
-{% include "navbar.tmpl" %}
-{% include "logout.tmpl" %}
-{% endblock %}
-
-{% block content %}
-
-{% if host.get_logs %}
-<ul>
-{% for log in host.get_logs %}
-<li><a href="{{ log.id }}/">{{ log.date }}</li>
-{% endfor %}
-</ul>
-{% else %}
-There are no logs for this host<br>
-{% endif %}
-
-{% endblock %}
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/navbar.tmpl b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/navbar.tmpl
deleted file mode 100644
index 877d427d0..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/navbar.tmpl
+++ /dev/null
@@ -1,5 +0,0 @@
-<a href="/hostbase/" class="sidebar">host search</a><br>
-<a href="/hostbase/new" class="sidebar">add a new host</a><br>
-<a href="/hostbase/zones" class="sidebar">zone file information</a><br>
-<a href="/hostbase/zones/new" class="sidebar">add a zone</a><br>
-
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/new.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/new.html
deleted file mode 100644
index 2dcd6271f..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/new.html
+++ /dev/null
@@ -1,102 +0,0 @@
-{% extends "base.html" %}
-
-{% block pagebanner %}
- <div class="header">
- <h2>new host information</h2>
- </div>
- <br/>
-{% endblock %}
-
-{% block sidebar %}
-<a href="/hostbase/" class="sidebar">search hostbase</a>
-{% include "logout.tmpl" %}
-{% endblock %}
-
-{% block content %}
-
-<form name="hostdata" action="?sub=true" method="post">
-<input type="hidden" name="host">
-<table border="0" width="100%">
- <colgroup>
- <col width="150">
- <col width="*">
- <tr> <td> <b>hostname</b></td>
- <td> <input name="hostname" type="text" ></td></tr>
- <tr> <td> <b>whatami</b></td>
- <td>
- <select name="whatami">
- {% for choice in WHATAMI_CHOICES %}
- <option value="{{ choice.0 }}">{{ choice.1 }}
- {% endfor %}
- </select>
- </td></tr>
- <tr> <td> <b>netgroup</b></td>
- <td>
- <select name="netgroup">
- {% for choice in NETGROUP_CHOICES %}
- <option value="{{ choice.0 }}">{{ choice.1 }}
- {% endfor %}
- </select>
- </td></tr>
- <tr> <td> <b>class</b></td>
- <td>
- <select name="security_class">
- {% for choice in CLASS_CHOICES %}
- <option value="{{ choice.0 }}">{{ choice.1 }}
- {% endfor %}
- </select></td></tr>
- <tr> <td> <b>support</b></td>
- <td>
- <select name="support">
- {% for choice in SUPPORT_CHOICES %}
- <option value="{{ choice.0 }}">{{ choice.1 }}
- {% endfor %}
- </select></td></tr>
- <tr> <td> <b>csi</b></td>
- <td> <input name="csi" type="text" ></td></tr>
- <tr> <td> <b>printq</b></td>
- <td> <input name="printq" type="text" ></td></tr>
- <tr> <td> <b>outbound_smtp</b></td>
- <td>
- <input type="checkbox" name="outbound_smtp"></td></tr>
- <tr> <td> <b>primary_user</b></td>
- <td> <input name="primary_user" type="text" size="32" > (email address)</td></tr>
- <tr> <td> <b>administrator</b></td>
- <td> <input name="administrator" type="text" size="32" > (email address)</td></tr>
- <tr> <td> <b>location</b></td>
- <td> <input name="location" type="text" ></td></tr>
- <tr> <td> <b>expiration_date</b></td>
- <td> <input name="expiration_date" type="text" size="10" >YYYY-MM-DD</td></tr>
- <tr> <td><br><b>Interface</b></td><td><br>
- {% for choice in TYPE_CHOICES %}
- <input type="radio" name="hdwr_type_new" value="{{ choice.0 }}" >{{ choice.1 }}
- {% endfor %}
- </td></tr>
- <tr> <td> <b>dhcp</b></td>
- <td>
- <input type="checkbox" name="dhcp_new"></td></tr>
- <tr> <td> <b>mac_addr</b></td>
- <td> <input name="mac_addr_new" type="text"></td></tr>
- <tr> <td> <b>ip_addr</b></td>
- <td> <input name="ip_addr_new" type="text"></td></tr>
- <tr> <td><br><b>Interface</b></td><td><br>
- {% for choice in TYPE_CHOICES %}
- <input type="radio" name="hdwr_type_new2" value="{{ choice.0 }}" >{{ choice.1 }}
- {% endfor %}
- </td></tr>
- <tr> <td> <b>dhcp</b></td>
- <td>
- <input type="checkbox" name="dhcp_new2"></td></tr>
- <tr> <td> <b>mac_addr</b></td>
- <td> <input name="mac_addr_new2" type="text"></td></tr>
- <tr> <td> <b>ip_addr</b></td>
- <td> <input name="ip_addr_new2" type="text"></td></tr>
- <tr> <td> <b>comments</b></td>
- <td> <textarea rows="10" cols="50" name="comments"></textarea></td></tr>
-</table>
-<br>
-<p><input type="submit" value="Submit">
-</form>
-
-{% endblock %}
-
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/remove.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/remove.html
deleted file mode 100644
index 4329200dd..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/remove.html
+++ /dev/null
@@ -1,89 +0,0 @@
-{% extends "base.html" %}
-
-{% block pagebanner %}
- <div class="header">
- <h2>Are you sure you want to remove {{ host.hostname }}?</h2>
- </div>
- <br/>
-{% endblock %}
-
-{% block sidebar %}
-{% include "navbar.tmpl" %}
-<ul class="sidebar">
- <li><a href="dns/" class="sidebar">detailed dns info</a></li>
- <li><a href="edit/" class="sidebar">edit host info</a></li>
- <li><a href="dns/edit/" class="sidebar">edit dns info</a></li>
-</ul>
-{% include "logout.tmpl" %}
-{% endblock %}
-
-{% block content %}
-
-<table border="0" width="100%">
- <colgroup>
- <col width="150">
- <col width="*">
- <tr> <td> <b>hostname</b></td>
- <td> {{ host.hostname }}</td></tr>
- <tr> <td> <b>whatami</b></td>
- <td> {{ host.whatami }}</td></tr>
- <tr> <td> <b>netgroup</b></td>
- <td> {{ host.netgroup }}</td></tr>
- <tr> <td> <b>class</b></td>
- <td> {{ host.security_class }}</td></tr>
- <tr> <td> <b>support</b></td>
- <td> {{ host.support }}</td></tr>
- <tr> <td> <b>csi</b></td>
- <td> {{ host.csi }}</td></tr>
- <tr> <td> <b>printq</b></td>
- <td> {{ host.printq }}</td></tr>
- <tr> <td> <b>dhcp</b></td>
- {% if host.dhcp %}
- <td> y </td></tr>
- {% else %}
- <td> n </td></tr>
- {% endif %}
- <tr> <td> <b>outbound_smtp</b></td>
- {% if host.outbound_smtp %}
- <td> y </td></tr>
- {% else %}
- <td> n </td></tr>
- {% endif %}
- <tr> <td> <b>primary_user</b></td>
- <td> {{ host.primary_user }}</td></tr>
- <tr> <td> <b>administrator</b></td>
- <td> {{ host.administrator }}</td></tr>
- <tr> <td> <b>location</b></td>
- <td> {{ host.location }}</td></tr>
- <tr> <td> <b>expiration_date</b></td>
- <td> {{ host.expiration_date }}</td></tr>
- {% for interface in interfaces %}
- <tr> <td><br><b>Interface</b></td>
- {% ifnotequal interface.0.hdwr_type 'no' %}
- <td><br>{{ interface.0.hdwr_type }}</td></tr>
- {% endifnotequal %}
- <tr> <td> <b>mac_addr</b></td>
- <td> {{ interface.0.mac_addr }}</b></td></tr>
- {% for ip in interface.1 %}
- <tr> <td> <b>ip_addr</b></td>
- <td> {{ ip.ip_addr }}</td></tr>
- {% endfor %}
- {% endfor %}
- <tr> <td valign="top"> <b>comments</b></td>
- <td>
- {{ host.comments|linebreaksbr }}<br>
- </td></tr>
-
-</table>
-<a style="font-size:75%" href="/hostbase/{{ host.id }}/dns/">see detailed DNS information for this host</a>
-<br><br>
-this host is {{ host.status }}<br>
-last update on {{ host.last }}<br>
-
-<form name="input" action="remove.html?sub=true" method="post">
-<input type="submit" value="remove">
-<input type="reset" value="cancel" onclick="history.back()">
-</form>
-
-{% endblock %}
-
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/results.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/results.html
deleted file mode 100644
index 45b22058d..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/results.html
+++ /dev/null
@@ -1,45 +0,0 @@
-{% extends "base.html" %}
-
-{% block pagebanner %}
- <div class="header">
- <h2>Search Results</h2>
- </div>
- <br/>
-{% endblock %}
-
-{% block sidebar %}
-{% include "navbar.tmpl" %}
-{% include "logout.tmpl" %}
-{% endblock %}
-
-{% block content %}
-
-{% if hosts %}
-<table border="0" width="100%">
- <colgroup>
- <col width="200">
- <col width="75">
- <col width="50">
- <col width="50">
- <col width="50">
- <col width="*">
- <tr> <td><b>hostname</b></td>
- <td> <b>status</b> </td>
- </tr>
- {% for host in hosts %}
- <tr> <td>{{ host.0 }}</td>
- <td> {{ host.2 }} </td>
- <td> <a href="{{ host.1 }}">view</a> </td>
- <td> <a href="{{ host.1 }}/edit">edit</a> </td>
- <td> <a href="{{ host.1 }}/copy">copy</a> </td>
- <td> <a href="{{ host.1 }}/logs">logs</a> </td>
-<!-- <td> <a href="{{ host.1 }}/remove">remove</a> </td> -->
- </tr>
- {% endfor %}
-</table>
-{% else %}
-No hosts matched your query<br>
-Click the back button on your browser to edit your search
-{% endif %}
-
-{% endblock %}
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/search.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/search.html
deleted file mode 100644
index 409d418fe..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/search.html
+++ /dev/null
@@ -1,57 +0,0 @@
-{% extends "base.html" %}
-
-{% block pagebanner %}
- <div class="header">
- <h2>Welcome to Hostbase!</h2>
- <p>search for hosts using one or more of the fields below
- </div>
- <br/>
-{% endblock %}
-
-{% block sidebar %}
-<a href="/hostbase/new" class="sidebar">add a new host</a><br>
-<a href="/hostbase/zones" class="sidebar">zone file information</a><br>
-{% include "logout.tmpl" %}
-{% endblock %}
-
-{% block content %}
-{% comment %}
- ...or go to <a href="hostinfo">this</a>
- page to enter hostinfo-like queries<br><br>
-{% endcomment %}
-
-<form name="input" action="?sub=true" method="post">
- <fieldset class="module aligned ()">
- <label for="hostname">hostname:</label><input name="hostname" type="text" ><br>
- <label for="netgroup">netgroup:</label><input name="netgroup" type="text" ><br>
- <label for="security_class">class:</label><input name="security_class" type="text" ><br>
- <label for="support">support:</label><input name="support" type="text" ><br>
- <label for="csi">csi:</label><input name="csi" type="text" ><br>
- <label for="printq">printq:</label><input name="printq" type="text" ><br>
- <label for="outbound_smtp">outbound_smtp:</label>
- {% for choice in yesno %}
- <input type="radio" name="outbound_smtp" value="{{ choice.0 }}" >{{ choice.1 }}
- {% endfor %}<br>
- <label for="primary_user">primary_user:</label><input name="primary_user" type="text" ><br>
- <label for="administrator">administrator:</label><input name="administrator" type="text" ><br>
- <label for="location">location:</label><input name="location" type="text" ><br>
- <label for="expiration_date">expiration_date:</label><input name="expiration_date" type="text" ><br>
- <br><label for="Interface">Interface:</label>
- {% for choice in TYPE_CHOICES %}
- <input type="radio" name="hdwr_type" value="{{ choice.0 }}" >{{ choice.1 }}
- {% endfor %}<br>
- <label for="dhcp">dhcp:</label>
- {% for choice in yesno %}
- <input type="radio" name="dhcp" value="{{ choice.0 }}" >{{ choice.1 }}
- {% endfor %}<br>
- <label for="mac_addr">mac_addr:</label><input name="mac_addr" type="text" ><br>
- <label for="ip_addr">ip_addr:</label><input name="ip_addr" type="text" ><br>
- <label for="dns_view">dns_viewer:</label>
- {% for choice in DNS_CHOICES %}
- <input type="radio" name="dns_view" value="{{ choice.0 }}" >{{ choice.1 }}
- {% endfor %}<br>
- <label for="mx">mx:</label><input name="mx" type="text" ><br>
-<p>
-<input type="submit" value="Search">
-</form>
-{% endblock %}
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zoneedit.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zoneedit.html
deleted file mode 100644
index ee355ee87..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zoneedit.html
+++ /dev/null
@@ -1,81 +0,0 @@
-{% extends "base.html" %}
-
-{% block pagebanner %}
- <div class="header">
- <h2>Zones</h2>
- <p>Edit information for {{ zone }}
- </div>
- <br/>
-{% endblock %}
-
-{% block sidebar %}
-{% include "navbar.tmpl" %}
-<ul>
-<li><a href="/hostbase/zones/{{ zone_id }}/" class="sidebar">view zone</a><br>
-</li>
-</ul>
-{% include "logout.tmpl" %}
-{% endblock %}
-
-{% block content %}
-
-<script language="JavaScript" type="text/Javascript">
-function toggleField(fieldname){
- if(document.getElementById){
- var style = document.getElementById(fieldname).style;
- style.display = style.display? "":"block";
- }
-}
-</script>
-
-<style type=text/css>
-div#nameserver{
- display: none;
-}
-div#mx{
- display: none;
-}
-div#address{
- display: none;
-}
-</style>
-
-<form name="zonedata" action="" method="post">
- <fieldset class="module aligned ()">
-<label for="id_zone">zone:</label></td> <td>{{ form.zone }}<br>
-<label for="id_admin">admin:</label></td> <td>{{ form.admin }}<br>
-<label for="id_primary_master">primary_master:</label></td> <td>{{ form.primary_master }}<br>
-<label for="id_expire">expire:</label></td> <td>{{ form.expire }}<br>
-<label for="id_retry">retry:</label></td> <td>{{ form.retry }}<br>
-<label for="id_refresh">refresh:</label></td> <td>{{ form.refresh }}<br>
-<label for="id_ttl">ttl:</label></td> <td>{{ form.ttl }}<br>
-{% for ns in nsforms %}
-<label for="id_name">nameserver:</label></td> <td>{{ ns.name }}<br>
-{% endfor %}
-</table>
-<div id=nameserver>
- <label for="id_name">nameserver:</label></td> <td>{{ nsadd.name }}<br>
- <label for="id_name">nameserver:</label></td> <td>{{ nsadd.name }}<br>
-</div>
-<a style="font-size:75%" href=# onclick="toggleField('nameserver')">Add NS records</a><br>
-{% for mx in mxforms %}
-<label for="id_mx">mx:</label></td> <td>{{ mx.priority }} {{ mx.mx }}<br>
-{% endfor %}
-<div id=mx>
- <label for="id_mx">mx:</label></td> <td>{{ mxadd.priority }} {{ mxadd.mx }}<br>
- <label for="id_mx">mx:</label></td> <td>{{ mxadd.priority }} {{ mxadd.mx }}<br>
-</div>
-<a style="font-size:75%" href=# onclick="toggleField('mx')">Add MX records</a><br>
-{% for a in aforms %}
-<label for="id_address">ip address:</label></td> <td>{{ a.ip_addr }}<br>
-{% endfor %}
-<div id=address>
- <label for="id_address">ip address:</label></td> <td>{{ addadd.ip_addr }}<br>
- <label for="id_address">ip address:</label></td> <td>{{ addadd.ip_addr }}<br>
-</div>
-<a style="font-size:75%" href=# onclick="toggleField('address')">Add A records</a><br>
-<label for="id_aux">aux:</label></td> <td>{{ form.aux }}<br>
-<p><input type="submit" value="Submit">
-</form>
-
-{% endblock %}
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zonenew.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zonenew.html
deleted file mode 100644
index b59fa9e3c..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zonenew.html
+++ /dev/null
@@ -1,43 +0,0 @@
-{% extends "base.html" %}
-
-{% block pagebanner %}
- <div class="header">
- <h2>Zones</h2>
- <p>Enter information for a new zone to be generated by Hostbase
- </div>
- <br/>
-{% endblock %}
-
-{% block sidebar %}
-{% include "navbar.tmpl" %}
-{% include "logout.tmpl" %}
-{% endblock %}
-
-{% block content %}
-<form name="zonedata" action="" method="post">
- <fieldset class="module aligned ()">
- {{ form.as_p}}
-<!--
- <label for="id_zone">zone:</label>{{ form.zone }}<br>
- <label for="id_admin">admin:</label>{{ form.admin }}<br>
- <label for="id_primary_master">primary_master:</label>{{ form.primary_master }}<br>
- <label for="id_expire">expire:</label>{{ form.expire }}<br>
- <label for="id_retry">retry:</label>{{ form.retry }}<br>
- <label for="id_refresh">refresh:</label>{{ form.refresh }}<br>
- <label for="id_ttl">ttl:</label>{{ form.ttl }}<br>
- <label for="id_name">nameserver:</label>{{ nsform.name }}<br>
- <label for="id_name">nameserver:</label>{{ nsform.name }}<br>
- <label for="id_name">nameserver:</label>{{ nsform.name }}<br>
- <label for="id_name">nameserver:</label>{{ nsform.name }}<br>
- <label for="id_mx">mx:</label>{{ mxform.priority }} {{ mxform.mx }}<br>
- <label for="id_mx">mx:</label>{{ mxform.priority }} {{ mxform.mx }}<br>
- <label for="id_mx">ip address:</label>{{ aform.ip_addr }}<br>
- <label for="id_mx">ip address:</label>{{ aform.ip_addr }}<br>
- <label for="id_aux">aux:
-(information not generated from Hostbase)</label>{{ form.aux }}<br>
---!>
- <p><input type="submit" value="Submit">
- </fieldset>
-</form>
-{% endblock %}
-
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zones.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zones.html
deleted file mode 100644
index c773e7922..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zones.html
+++ /dev/null
@@ -1,37 +0,0 @@
-{% extends "base.html" %}
-
-{% block pagebanner %}
- <div class="header">
- <h2>Zones</h2>
- <p>Hostbase generates DNS zone files for the following zones.
- </div>
- <br/>
-{% endblock %}
-
-{% block sidebar %}
-{% include "navbar.tmpl" %}
-{% include "logout.tmpl" %}
-{% endblock %}
-
-{% block content %}
-{% if zone_list %}
-<table border="0" width="100%">
- <colgroup>
- <col width="200">
- <col width="75">
- <col width="50">
- <col width="*">
- <tr> <td><b>zone</b></td>
- </tr>
- {% for zone in zone_list|dictsort:"zone" %}
- <tr> <td> {{ zone.zone }}</td>
- <td> <a href="{{ zone.id }}">view</a> </td>
- <td> <a href="{{ zone.id }}/edit">edit</a> </td>
- </tr>
- {% endfor %}
-</table>
-{% else %}
-There is no zone data currently in the database<br>
-{% endif %}
-{% endblock %}
-
diff --git a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zoneview.html b/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zoneview.html
deleted file mode 100644
index fa12e3ec5..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/zoneview.html
+++ /dev/null
@@ -1,71 +0,0 @@
-{% extends "base.html" %}
-
-{% block pagebanner %}
- <div class="header">
- <h2>Zones</h2>
- <p>Hostbase generates DNS zone files for the following zones.
- </div>
- <br/>
-{% endblock %}
-
-{% block sidebar %}
-{% include "navbar.tmpl" %}
-<ul class="sidebar">
-<li><a href="/hostbase/zones/{{ zone.id }}/edit/" class="sidebar">edit zone</a><br>
-</li>
-</ul>
-{% include "logout.tmpl" %}
-{% endblock %}
-
-{% block content %}
-<table border="0" width="100%">
- <colgroup>
- <col width="200">
- <col width="*">
- <tr> <td> <b>zone</b></td>
- <td> {{ zone.zone }}</td></tr>
- <tr> <td> <b>serial</b></td>
- <td> {{ zone.serial }}</td></tr>
- <tr> <td> <b>admin</b></td>
- <td> {{ zone.admin }}</td></tr>
- <tr> <td> <b>primary_master</b></td>
- <td> {{ zone.primary_master }}</td></tr>
- <tr> <td> <b>expire</b></td>
- <td> {{ zone.expire }}</td></tr>
- <tr> <td> <b>retry</b></td>
- <td> {{ zone.retry }}</td></tr>
- <tr> <td> <b>refresh</b></td>
- <td> {{ zone.refresh }}</td></tr>
- <tr> <td> <b>ttl</b></td>
- <td> {{ zone.ttl }}</td></tr>
-
- <tr><td valign="top"> <b>nameservers</b></td>
- <td>
- {% for nameserver in zone.nameservers.all %}
- {{ nameserver.name }}<br>
- {% endfor %}
- </td></tr>
- <tr><td valign="top"> <b>mxs</b></td>
- <td>
- {% for mx in zone.mxs.all %}
- {{ mx.priority }} {{ mx.mx }}<br>
- {% endfor %}
- </td></tr>
- {% if addresses %}
- <tr><td valign="top"> <b>A records</b></td>
- <td>
- {% for address in sof.addresses.all %}
- {{ address.ip_addr }}<br>
- {% endfor %}
- </td></tr>
- {% endif %}
-
- <tr> <td valign="top"> <b>aux</b></td>
- <td>
- {{ zone.aux|linebreaksbr }}
- </td></tr>
-
-</table>
-<br><br>
-{% endblock %}
-
diff --git a/src/lib/Bcfg2/Server/Hostbase/ldapauth.py b/src/lib/Bcfg2/Server/Hostbase/ldapauth.py
deleted file mode 100644
index fc2ca1bf1..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/ldapauth.py
+++ /dev/null
@@ -1,179 +0,0 @@
-"""
-Checks with LDAP (ActiveDirectory) to see if the current user is an LDAP(AD)
-user, and returns a subset of the user's profile that is needed by Argonne/CIS
-to set user level privleges in Django
-"""
-
-import os
-import ldap
-
-
-class LDAPAUTHError(Exception):
- """LDAPAUTHError is raised when somehting goes boom."""
- pass
-
-
-class ldapauth(object):
- group_test = False
- check_member_of = os.environ['LDAP_CHECK_MBR_OF_GRP']
- securitylevel = 0
- distinguishedName = None
- sAMAccountName = None
- telephoneNumber = None
- title = None
- memberOf = None
- department = None # this will be a list
- mail = None
- extensionAttribute1 = None # badgenumber
- badge_no = None
-
- def __init__(self, login, passwd):
- """get username (if using ldap as auth the
- apache env var REMOTE_USER should be used)
- from username get user profile from AD/LDAP
- """
- #p = self.user_profile(login,passwd)
- d = self.user_dn(login) # success, distname
- print(d[1])
- if d[0] == 'success':
- pass
- p = self.user_bind(d[1], passwd)
- if p[0] == 'success':
- #parse results
- parsed = self.parse_results(p[2])
- print(self.department)
- self.group_test = self.member_of()
- securitylevel = self.security_level()
- print("ACCESS LEVEL: " + str(securitylevel))
- else:
- raise LDAPAUTHError(p[2])
- else:
- raise LDAPAUTHError(p[2])
-
- def user_profile(self, login, passwd=None):
- """NOT USED RIGHT NOW"""
- ldap_login = "CN=%s" % login
- svc_acct = os.environ['LDAP_SVC_ACCT_NAME']
- svc_pass = os.environ['LDAP_SVC_ACCT_PASS']
- #svc_acct = 'CN=%s,DC=anl,DC=gov' % login
- #svc_pass = passwd
-
- search_pth = os.environ['LDAP_SEARCH_PTH']
-
- try:
- conn = ldap.initialize(os.environ['LDAP_URI'])
- conn.bind(svc_acct, svc_pass, ldap.AUTH_SIMPLE)
- result_id = conn.search(search_pth,
- ldap.SCOPE_SUBTREE,
- ldap_login,
- None)
- result_type, result_data = conn.result(result_id, 0)
- return ('success', 'User profile found', result_data,)
- except ldap.LDAPError:
- e = sys.exc_info()[1]
- #connection failed
- return ('error', 'LDAP connect failed', e,)
-
- def user_bind(self, distinguishedName, passwd):
- """Binds to LDAP Server"""
- search_pth = os.environ['LDAP_SEARCH_PTH']
- try:
- conn = ldap.initialize(os.environ['LDAP_URI'])
- conn.bind(distinguishedName, passwd, ldap.AUTH_SIMPLE)
- cn = distinguishedName.split(",")
- result_id = conn.search(search_pth,
- ldap.SCOPE_SUBTREE,
- cn[0],
- None)
- result_type, result_data = conn.result(result_id, 0)
- return ('success', 'User profile found', result_data,)
- except ldap.LDAPError:
- e = sys.exc_info()[1]
- #connection failed
- return ('error', 'LDAP connect failed', e,)
-
- def user_dn(self, cn):
- """Uses Service Account to get distinguishedName"""
- ldap_login = "CN=%s" % cn
- svc_acct = os.environ['LDAP_SVC_ACCT_NAME']
- svc_pass = os.environ['LDAP_SVC_ACCT_PASS']
- search_pth = os.environ['LDAP_SEARCH_PTH']
-
- try:
- conn = ldap.initialize(os.environ['LDAP_URI'])
- conn.bind(svc_acct, svc_pass, ldap.AUTH_SIMPLE)
- result_id = conn.search(search_pth,
- ldap.SCOPE_SUBTREE,
- ldap_login,
- None)
- result_type, result_data = conn.result(result_id, 0)
- raw_obj = result_data[0][1]
- distinguishedName = raw_obj['distinguishedName']
- return ('success', distinguishedName[0],)
- except ldap.LDAPError:
- e = sys.exc_info()[1]
- #connection failed
- return ('error', 'LDAP connect failed', e,)
-
- def parse_results(self, user_obj):
- """Clean up the huge ugly object handed to us in the LDAP query"""
- #user_obj is a list formatted like this:
- #[('LDAP_DN',{user_dict},),]
- try:
- raw_obj = user_obj[0][1]
- self.memberOf = raw_obj['memberOf']
- self.sAMAccountName = raw_obj['sAMAccountName'][0]
- self.distinguishedName = raw_obj['distinguishedName'][0]
- self.telephoneNumber = raw_obj['telephoneNumber'][0]
- self.title = raw_obj['title'][0]
- self.department = raw_obj['department'][0]
- self.mail = raw_obj['mail'][0]
- self.badge_no = raw_obj['extensionAttribute1'][0]
- self.email = raw_obj['extensionAttribute2'][0]
- display_name = raw_obj['displayName'][0].split(",")
- self.name_f = raw_obj['givenName'][0]
- self.name_l = display_name[0]
- self.is_staff = False
- self.is_superuser = False
-
- return
- except KeyError:
- e = sys.exc_info()[1]
- raise LDAPAUTHError("Portions of the LDAP User profile not present")
-
- def member_of(self):
- """See if this user is in our group that is allowed to login"""
- m = [g for g in self.memberOf if g == self.check_member_of]
- if len(m) == 1:
- return True
- else:
- return False
-
- def security_level(self):
- level = self.securitylevel
-
- user = os.environ['LDAP_GROUP_USER']
- m = [g for g in self.memberOf if g == user]
- if len(m) == 1:
- if level < 1:
- level = 1
-
- cspr = os.environ['LDAP_GROUP_SECURITY_LOW']
- m = [g for g in self.memberOf if g == cspr]
- if len(m) == 1:
- if level < 2:
- level = 2
-
- cspo = os.environ['LDAP_GROUP_SECURITY_HIGH']
- m = [g for g in self.memberOf if g == cspo]
- if len(m) == 1:
- if level < 3:
- level = 3
-
- admin = os.environ['LDAP_GROUP_ADMIN']
- m = [g for g in self.memberOf if g == admin]
- if len(m) == 1:
- if level < 4:
- level = 4
-
- return level
diff --git a/src/lib/Bcfg2/Server/Hostbase/manage.py b/src/lib/Bcfg2/Server/Hostbase/manage.py
deleted file mode 100755
index 5e78ea979..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/manage.py
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env python
-from django.core.management import execute_manager
-try:
- import settings # Assumed to be in the same directory.
-except ImportError:
- import sys
- sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
- sys.exit(1)
-
-if __name__ == "__main__":
- execute_manager(settings)
diff --git a/src/lib/Bcfg2/Server/Hostbase/media/base.css b/src/lib/Bcfg2/Server/Hostbase/media/base.css
deleted file mode 100644
index ddbf02165..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/media/base.css
+++ /dev/null
@@ -1,5 +0,0 @@
-
-/* Import other styles */
-@import url('global.css');
-@import url('layout.css');
-@import url('boxypastel.css');
diff --git a/src/lib/Bcfg2/Server/Hostbase/media/boxypastel.css b/src/lib/Bcfg2/Server/Hostbase/media/boxypastel.css
deleted file mode 100644
index 7ae0684ef..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/media/boxypastel.css
+++ /dev/null
@@ -1,179 +0,0 @@
-body {
- background-color: #fff;
- color: #000;
- font: 12px 'Lucida Grande', Arial, Helvetica, sans-serif;
- margin-left:0px;
- margin-right:100px;
-}
-/* links */
-a:link {
- color: #00f;
- text-decoration: none;
-}
-a:visited {
- color: #00a;
- text-decoration: none;
-}
-a:hover {
- color: #00a;
- text-decoration: underline;
-}
-a:active {
- color: #00a;
- text-decoration: underline;
-}
-/* divs*/
-div.bad {
- border: 1px solid #660000;
- background: #FF6A6A;
- margin: 10px 0;
- padding: 8px;
- text-align: left;
- margin-left:50px;
- margin-right:50px;
-}
-div.modified {
- border: 1px solid #CC9900;
- background: #FFEC8B;
- margin: 10px 0;
- padding: 8px;
- text-align: left;
- margin-left:50px;
- margin-right:50px;
-}
-div.clean {
- border: 1px solid #006600;
- background: #9AFF9A;
- margin: 10px 0;
- padding: 8px;
- text-align: left;
- margin-left:50px;
- margin-right:50px;
-}
-div.extra {
- border: 1px solid #006600;
- background: #6699CC;
- margin: 10px 0;
- padding: 8px;
- text-align: left;
- margin-left:50px;
- margin-right:50px;
-}
-div.warning {
- border: 1px
- solid #CC3300;
- background: #FF9933;
- margin: 10px 0;
- padding: 8px;
- text-align: left;
- margin-left:50px;
- margin-right:50px;
-}
-div.all-warning {
- border: 1px solid #DD5544;
- background: #FFD9A2;
- margin: 10px 0;
- padding: 8px;
- text-align: left;
- margin-left:50px;
- margin-right:50px;
-}
-div.down {
- border: 1px
- solid #999;
- background-color: #DDD;
- margin: 10px 0;
- padding: 8px;
- text-align: left;
- margin-left:50px;
- margin-right:50px;
-}
-div.items{
- display: none;
-}
-div.nodebox {
- border: 1px solid #c7cfd5;
- background: #f1f5f9;
- margin: 20px 0;
- padding: 8px 8px 16px 8px;
- text-align: left;
- position:relative;
-}
-div.header {
- background-color: #DDD;
- padding: 8px;
- text-indent:50px;
- position:relative;
-}
-
-/*Spans*/
-.nodename {
- font-style: italic;
-}
-.nodelisttitle {
- font-size: 14px;
-}
-
-h2{
- font-size: 16px;
- color: #000;
-}
-
-ul.plain {
- list-style-type:none;
- text-align: left;
-}
-
-.notebox {
- position: absolute;
- top: 0px;
- right: 0px;
- padding: 1px;
- text-indent:0px;
- border: 1px solid #FFF;
- background: #999;
- color: #FFF;
-}
-
-.configbox {
- position: absolute;
- bottom: 0px;
- right: 0px;
- padding: 1px;
- text-indent:0px;
- border: 1px solid #999;
- background: #FFF;
- color: #999;
-}
-
-p.indented{
- text-indent: 50px
-}
-
-/*
- Sortable tables */
-table.sortable a.sortheader {
- background-color:#dfd;
- font-weight: bold;
- text-decoration: none;
- display: block;
-}
-table.sortable {
- padding: 2px 4px 2px 4px;
- border: 1px solid #000000;
- border-spacing: 0px
-}
-td.sortable{
- padding: 2px 8px 2px 8px;
-}
-
-th.sortable{
- background-color:#F3DD91;
- border: 1px solid #FFFFFF;
-}
-tr.tablelist {
- background-color:#EDF3FE;
-}
-tr.tablelist-alt{
- background-color:#FFFFFF;
-}
diff --git a/src/lib/Bcfg2/Server/Hostbase/media/global.css b/src/lib/Bcfg2/Server/Hostbase/media/global.css
deleted file mode 100644
index 73451e1bc..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/media/global.css
+++ /dev/null
@@ -1,8 +0,0 @@
-body {
- margin:0;
- padding:0;
- font-size:12px;
- font-family:"Lucida Grande","Bitstream Vera Sans",Verdana,Arial,sans-serif;
- color:#000;
- background:#fff;
- }
diff --git a/src/lib/Bcfg2/Server/Hostbase/media/layout.css b/src/lib/Bcfg2/Server/Hostbase/media/layout.css
deleted file mode 100644
index 9085cc220..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/media/layout.css
+++ /dev/null
@@ -1,62 +0,0 @@
-/* Page Structure */
-#container { position:absolute; top: 3em; margin-left:1em; margin-right:2em; padding:0; margin-top:1.5em; min-width:
- 650px; }
-#header { width:100%; }
-#content-main { float:left; }
-
-/* HEADER */
-#header {
-background:#000;
-color:#ffc;
-position:absolute;
-}
-#header a:link, #header a:visited { color:white; }
-#header a:hover { text-decoration:underline; }
-#branding h1 { padding:0 10px; font-size:18px; margin:8px 0; font-weight:normal; color:#f4f379; }
-#branding h2 { padding:0 10px; font-size:14px; margin:-8px 0 8px 0; font-weight:normal; color:#ffc; }
-#user-tools { position:absolute; top:0; right:0; padding:1.2em 10px; font-size:11px; text-align:right; }
-
-/*SIDEBAR*/
-#sidebar {
- float:left;
- position: relative;
- width: auto;
- height: 100%;
- margin-top: 3em;
- padding-right: 1.5em;
- padding-left: 1.5em;
- padding-top: 1em;
- padding-bottom:3em;
- background: #000;
- color:ffc;
-}
-
-a.sidebar:link {color: #fff;}
-a.sidebar:active {color: #fff;}
-a.sidebar:visited {color: #fff;}
-a.sidebar:hover {color: #fff;}
-
-ul.sidebar {
- color: #ffc;
- text-decoration: none;
- list-style-type: none;
- text-indent: -1em;
-}
-ul.sidebar-level2 {
- text-indent: -2em;
- list-style-type: none;
- font-size: 11px;
-}
-
-/* ALIGNED FIELDSETS */
-.aligned label { display:block; padding:0 1em 3px 0; float:left; width:8em; }
-.aligned label.inline { display:inline; float:none; }
-.colMS .aligned .vLargeTextField, .colMS .aligned .vXMLLargeTextField { width:350px; }
-form .aligned p, form .aligned ul { margin-left:7em; padding-left:30px; }
-form .aligned table p { margin-left:0; padding-left:0; }
-form .aligned p.help { padding-left:38px; }
-.aligned .vCheckboxLabel { float:none !important; display:inline; padding-left:4px; }
-.colM .aligned .vLargeTextField, colM .aligned .vXMLLargeTextField { width:610px; }
-.checkbox-row p.help { margin-left:0; padding-left:0 !important; }
-
-
diff --git a/src/lib/Bcfg2/Server/Hostbase/nisauth.py b/src/lib/Bcfg2/Server/Hostbase/nisauth.py
deleted file mode 100644
index ae4c6c021..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/nisauth.py
+++ /dev/null
@@ -1,40 +0,0 @@
-"""Checks with NIS to see if the current user is in the support group"""
-import os
-import crypt, nis
-from Bcfg2.Server.Hostbase.settings import AUTHORIZED_GROUP
-
-
-class NISAUTHError(Exception):
- """NISAUTHError is raised when somehting goes boom."""
- pass
-
-class nisauth(object):
- group_test = False
-# check_member_of = os.environ['LDAP_CHECK_MBR_OF_GRP']
- samAcctName = None
- distinguishedName = None
- sAMAccountName = None
- telephoneNumber = None
- title = None
- memberOf = None
- department = None #this will be a list
- mail = None
- extensionAttribute1 = None #badgenumber
- badge_no = None
- uid = None
-
- def __init__(self,login,passwd=None):
- """get user profile from NIS"""
- try:
- p = nis.match(login, 'passwd.byname').split(":")
- except:
- raise NISAUTHError('username')
- # check user password using crypt and 2 character salt from passwd file
- if p[1] == crypt.crypt(passwd, p[1][:2]):
- # check to see if user is in valid support groups
- # will have to include these groups in a settings file eventually
- if not login in nis.match(AUTHORIZED_GROUP, 'group.byname').split(':')[-1].split(',') and p[3] != nis.match(AUTHORIZED_GROUP, 'group.byname').split(':')[2]:
- raise NISAUTHError('group')
- self.uid = p[2]
- else:
- raise NISAUTHError('password')
diff --git a/src/lib/Bcfg2/Server/Hostbase/regex.py b/src/lib/Bcfg2/Server/Hostbase/regex.py
deleted file mode 100644
index 41cc0f6f0..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/regex.py
+++ /dev/null
@@ -1,6 +0,0 @@
-import re
-
-date = re.compile('^[0-9]{4}-[0-9]{2}-[0-9]{2}$')
-host = re.compile('^[a-z0-9-_]+(\.[a-z0-9-_]+)+$')
-macaddr = re.compile('^[0-9abcdefABCDEF]{2}(:[0-9abcdefABCDEF]{2}){5}$|virtual')
-ipaddr = re.compile('^[0-9]{1,3}(\.[0-9]{1,3}){3}$')
diff --git a/src/lib/Bcfg2/Server/Hostbase/settings.py b/src/lib/Bcfg2/Server/Hostbase/settings.py
deleted file mode 100644
index 7660e1bdc..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/settings.py
+++ /dev/null
@@ -1,143 +0,0 @@
-import os.path
-# Compatibility import
-from Bcfg2.Compat import ConfigParser
-
-PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
-
-c = ConfigParser.ConfigParser()
-#This needs to be configurable one day somehow
-c.read(['./bcfg2.conf'])
-
-defaults = {'database_engine':'sqlite3',
- 'database_name':'./dev.db',
- 'database_user':'',
- 'database_password':'',
- 'database_host':'',
- 'database_port':3306,
- 'default_mx':'localhost',
- 'priority':10,
- 'authorized_group':'admins',
- }
-
-if c.has_section('hostbase'):
- options = dict(c.items('hostbase'))
-else:
- options = defaults
-
-# Django settings for Hostbase project.
-DEBUG = True
-TEMPLATE_DEBUG = DEBUG
-ADMINS = (
- ('Root', 'root'),
-)
-MANAGERS = ADMINS
-
-# 'postgresql', 'mysql', 'sqlite3' or 'ado_mssql'.
-DATABASE_ENGINE = options['database_engine']
-# Or path to database file if using sqlite3.
-DATABASE_NAME = options['database_name']
-# Not used with sqlite3.
-DATABASE_USER = options['database_user']
-# Not used with sqlite3.
-DATABASE_PASSWORD = options['database_password']
-# Set to empty string for localhost. Not used with sqlite3.
-DATABASE_HOST = options['database_host']
-# Set to empty string for default. Not used with sqlite3.
-DATABASE_PORT = int(options['database_port'])
-# Local time zone for this installation. All choices can be found here:
-# http://docs.djangoproject.com/en/dev/ref/settings/#time-zone
-try:
- TIME_ZONE = c.get('statistics', 'time_zone')
-except:
- TIME_ZONE = None
-
-# enter the defauly MX record machines will get in Hostbase
-# this setting may move elsewhere eventually
-DEFAULT_MX = options['default_mx']
-PRIORITY = int(options['priority'])
-
-SESSION_EXPIRE_AT_BROWSER_CLOSE = True
-
-# Uncomment a backend below if you would like to use it for authentication
-AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',
- 'Bcfg2.Server.Hostbase.backends.NISBackend',
- #'Bcfg2.Server.Hostbase.backends.LDAPBacken',
- )
-# enter an NIS group name you'd like to give access to edit hostbase records
-AUTHORIZED_GROUP = options['authorized_group']
-
-#create login url area:
-import django.contrib.auth
-django.contrib.auth.LOGIN_URL = '/login'
-# Absolute path to the directory that holds media.
-# Example: "/home/media/media.lawrence.com/"
-MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')
-# Just for development
-SERVE_MEDIA = DEBUG
-
-# Language code for this installation. All choices can be found here:
-# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
-# http://blogs.law.harvard.edu/tech/stories/storyReader$15
-LANGUAGE_CODE = 'en-us'
-SITE_ID = 1
-# URL that handles the media served from MEDIA_ROOT.
-# Example: "http://media.lawrence.com"
-MEDIA_URL = '/site_media/'
-# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
-# trailing slash.
-# Examples: "http://foo.com/media/", "/media/".
-ADMIN_MEDIA_PREFIX = '/media/'
-# Make this unique, and don't share it with anybody.
-SECRET_KEY = '*%=fv=yh9zur&gvt4&*d#84o(cy^-*$ox-v1e9%32pzf2*qu#s'
-# List of callables that know how to import templates from various sources.
-TEMPLATE_LOADERS = (
- 'django.template.loaders.filesystem.load_template_source',
- 'django.template.loaders.app_directories.load_template_source',
-# 'django.template.loaders.eggs.load_template_source',
-)
-
-TEMPLATE_CONTEXT_PROCESSORS = (
- "django.core.context_processors.auth",
- "django.core.context_processors.debug",
- "django.core.context_processors.i18n",
- "django.core.context_processors.request",
- "django.core.context_processors.media",
-# Django development version.
-# "django.core.context_processors.csrf",
-)
-
-
-MIDDLEWARE_CLASSES = (
- 'django.middleware.common.CommonMiddleware',
- 'django.contrib.sessions.middleware.SessionMiddleware',
- 'django.middleware.locale.LocaleMiddleware',
- 'django.contrib.auth.middleware.AuthenticationMiddleware',
- 'django.middleware.doc.XViewMiddleware',
-)
-
-ROOT_URLCONF = 'Bcfg2.Server.Hostbase.urls'
-
-TEMPLATE_DIRS = (
- # Put strings here, like "/home/html/django_templates".
- # Always use forward slashes, even on Windows.
- '/usr/lib/python2.3/site-packages/Bcfg2/Server/Hostbase/hostbase/webtemplates',
- '/usr/lib/python2.4/site-packages/Bcfg2/Server/Hostbase/hostbase/webtemplates',
- '/usr/lib/python2.3/site-packages/Bcfg2/Server/Hostbase/templates',
- '/usr/lib/python2.4/site-packages/Bcfg2/Server/Hostbase/templates',
- '/usr/share/bcfg2/Hostbase/templates',
- os.path.join(PROJECT_ROOT, 'templates'),
- os.path.join(PROJECT_ROOT, 'hostbase/webtemplates'),
-)
-
-INSTALLED_APPS = (
- 'django.contrib.admin',
- 'django.contrib.admindocs',
- 'django.contrib.auth',
- 'django.contrib.contenttypes',
- 'django.contrib.sessions',
- 'django.contrib.sites',
- 'django.contrib.humanize',
- 'Bcfg2.Server.Hostbase.hostbase',
-)
-
-LOGIN_URL = '/login/'
diff --git a/src/lib/Bcfg2/Server/Hostbase/templates/batchadd.tmpl b/src/lib/Bcfg2/Server/Hostbase/templates/batchadd.tmpl
deleted file mode 100644
index 74ea3c047..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/templates/batchadd.tmpl
+++ /dev/null
@@ -1,29 +0,0 @@
-#mx ->
-#priority ->
-
-hostname ->
-whatami ->
-netgroup ->
-security_class ->
-support ->
-csi ->
-printq ->
-dhcp ->
-outbound_smtp ->
-primary_user ->
-administrator ->
-location ->
-expiration_date -> YYYY-MM-DD
-comments ->
-
-mac_addr ->
-hdwr_type ->
-ip_addr ->
-#ip_addr ->
-cname ->
-#cname ->
-
-#mac_addr ->
-#hdwr_type ->
-#ip_addr ->
-#cname ->
diff --git a/src/lib/Bcfg2/Server/Hostbase/templates/dhcpd.conf.head b/src/lib/Bcfg2/Server/Hostbase/templates/dhcpd.conf.head
deleted file mode 100644
index a3d19547e..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/templates/dhcpd.conf.head
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# dhcpd.conf
-#
-# Configuration file for ISC dhcpd
-#
diff --git a/src/lib/Bcfg2/Server/Hostbase/templates/dhcpd.tmpl b/src/lib/Bcfg2/Server/Hostbase/templates/dhcpd.tmpl
deleted file mode 100644
index 757b263cd..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/templates/dhcpd.tmpl
+++ /dev/null
@@ -1,17 +0,0 @@
-#
-# This file is automatically generated.
-# DO NOT EDIT IT BY HAND!
-#
-# This file contains {{ numips }} IP addresses
-# Generated on: {% now "r" %}
-#
-
-{% include "dhcpd.conf.head" %}
-
-# Hosts which require special configuration options can be listed in
-# host statements. If no address is specified, the address will be
-# allocated dynamically (if possible), but the host-specific information
-# will still come from the host declaration.
-
-{% for host in hosts %}host {{ host.0 }} {hardware ethernet {{ host.1 }};fixed-address {{ host.2 }};}
-{% endfor %}
diff --git a/src/lib/Bcfg2/Server/Hostbase/templates/hosts.tmpl b/src/lib/Bcfg2/Server/Hostbase/templates/hosts.tmpl
deleted file mode 100644
index 251cb5a79..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/templates/hosts.tmpl
+++ /dev/null
@@ -1,26 +0,0 @@
-##############################################################################
-# MCS hosts file
-#
-# This file is generated automatically - DO NOT EDIT IT.
-#
-# Generated on: {% now "r" %}
-#
-
-127.0.0.1 localhost.mcs.anl.gov localhost
-
-# This file lists hosts in these domains:
-{% for domain in domain_data %}# {{ domain.0 }}: {{ domain.1 }}
-{% endfor %}
-#
-# This file lists hosts on these networks:
-#
-# Network Hosts
-# ---------------------------------------------------------------------
-{% for octet in two_octets_data %}# {{ octet.0 }} {{octet.1 }}
-{% endfor %}
-#
-{% for octet in three_octets_data %}# {{ octet.0 }} {{ octet.1 }}
-{% endfor %}
-#
-# Total host interfaces (ip addresses) in this file: {{ num_ips }}
-
diff --git a/src/lib/Bcfg2/Server/Hostbase/templates/hostsappend.tmpl b/src/lib/Bcfg2/Server/Hostbase/templates/hostsappend.tmpl
deleted file mode 100644
index 00e0d5d04..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/templates/hostsappend.tmpl
+++ /dev/null
@@ -1,5 +0,0 @@
-##########################################################################
-# Hosts on subnet: {{ subnet.0 }}
-# total hosts: {{ subnet.1 }}
-{% for ip in ips %}{{ ip.0 }} {{ ip.1 }}{% if ip.4 and not ip.3 %} # {{ ip.5 }}{% else %}{% for name in ip.2 %} {{ name }}{% endfor %}{% for cname in ip.3 %} {{ cname }}{% endfor %} # {{ ip.5 }}{% endif %}
-{% endfor %}
diff --git a/src/lib/Bcfg2/Server/Hostbase/templates/named.tmpl b/src/lib/Bcfg2/Server/Hostbase/templates/named.tmpl
deleted file mode 100644
index 03e054198..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/templates/named.tmpl
+++ /dev/null
@@ -1,69 +0,0 @@
-// This is the primary configuration file for the BIND DNS server named.
-//
-// Please read /usr/share/doc/bind9/README.Debian.gz for information on the
-// structure of BIND configuration files in Debian, *BEFORE* you customize
-// this configuration file.
-//
-
-include "/etc/bind/named.conf.options";
-
-include "/etc/bind/rndc.key";
-
-// prime the server with knowledge of the root servers
-zone "." {
- type hint;
- file "/etc/bind/db.root";
-};
-
-// be authoritative for the localhost forward and reverse zones, and for
-// broadcast zones as per RFC 1912
-{% for zone in zones %}
-zone "{{ zone.1 }}" {
- type master;
- file "/etc/bind/hostbase/{{ zone.1 }}";
- notify no;
- also-notify { 140.221.9.6;140.221.8.10; };
-};{% endfor %}
-
-zone "localhost" {
- type master;
- file "/etc/bind/db.local";
-};
-
-zone "127.in-addr.arpa" {
- type master;
- file "/etc/bind/db.127";
-};
-
-zone "0.in-addr.arpa" {
- type master;
- file "/etc/bind/db.0";
-};
-
-zone "255.in-addr.arpa" {
- type master;
- file "/etc/bind/db.255";
-};
-{% for reverse in reverses %}
-zone "{{ reverse.0 }}.in-addr.arpa" {
- type master;
- file "/etc/bind/hostbase/{{ reverse.0 }}.rev";
- notify no;
- also-notify { 140.221.9.6;140.221.8.10; };
-};{% endfor %}
-
-// zone "com" { type delegation-only; };
-// zone "net" { type delegation-only; };
-
-// From the release notes:
-// Because many of our users are uncomfortable receiving undelegated answers
-// from root or top level domains, other than a few for whom that behaviour
-// has been trusted and expected for quite some length of time, we have now
-// introduced the "root-delegations-only" feature which applies delegation-only
-// logic to all top level domains, and to the root domain. An exception list
-// should be specified, including "MUSEUM" and "DE", and any other top level
-// domains from whom undelegated responses are expected and trusted.
-// root-delegation-only exclude { "DE"; "MUSEUM"; };
-
-include "/etc/bind/named.conf.local";
-include "/etc/bind/named.conf.static";
diff --git a/src/lib/Bcfg2/Server/Hostbase/templates/namedviews.tmpl b/src/lib/Bcfg2/Server/Hostbase/templates/namedviews.tmpl
deleted file mode 100644
index 52021620e..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/templates/namedviews.tmpl
+++ /dev/null
@@ -1,92 +0,0 @@
-// This is the primary configuration file for the BIND DNS server named.
-//
-// Please read /usr/share/doc/bind9/README.Debian.gz for information on the
-// structure of BIND configuration files in Debian, *BEFORE* you customize
-// this configuration file.
-//
-
-include "/etc/bind/named.conf.options";
-
-include "/etc/bind/rndc.key";
-
-view "internal" {
- match-clients { 140.221.9.6;140.221.8.10;140.221.8.88;140.221.8.15; };
- recursion yes;
- // prime the server with knowledge of the root servers
- zone "." {
- type hint;
- file "/etc/bind/db.root";
- };
- {% for zone in zones %}
- zone "{{ zone.1 }}" {
- type master;
- file "/etc/bind/hostbase/{{ zone.1 }}";
- notify no;
- also-notify { 140.221.9.6;140.221.8.10;140.221.8.88;140.221.8.15; };
- };{% endfor %}
- // be authoritative for the localhost forward and reverse zones, and for
- // broadcast zones as per RFC 1912
-
- zone "localhost" {
- type master;
- file "/etc/bind/db.local";
- };
-
- zone "127.in-addr.arpa" {
- type master;
- file "/etc/bind/db.127";
- };
-
- zone "0.in-addr.arpa" {
- type master;
- file "/etc/bind/db.0";
- };
-
- zone "255.in-addr.arpa" {
- type master;
- file "/etc/bind/db.255";
- };
- {% for reverse in reverses %}
- zone "{{ reverse.0 }}.in-addr.arpa" {
- type master;
- file "/etc/bind/hostbase/{{ reverse.0 }}.rev";
- notify no;
- also-notify { 140.221.9.6;140.221.8.10;140.221.8.88; };
- };{% endfor %}
- include "/etc/bind/named.conf.static";
-};
-
-view "external" {
- match-clients { any; };
- recursion no;
- {% for zone in zones %}
- zone "{{ zone.1 }}" {
- type master;
- file "/etc/bind/hostbase/{{ zone.1 }}.external";
- notify no;
- };{% endfor %}
-
- {% for reverse in reverses %}
- zone "{{ reverse.0 }}.in-addr.arpa" {
- type master;
- file "/etc/bind/hostbase/{{ reverse.0 }}.rev.external";
- notify no;
- };{% endfor %}
- include "/etc/bind/named.conf.static";
-};
-
-
-// zone "com" { type delegation-only; };
-// zone "net" { type delegation-only; };
-
-// From the release notes:
-// Because many of our users are uncomfortable receiving undelegated answers
-// from root or top level domains, other than a few for whom that behaviour
-// has been trusted and expected for quite some length of time, we have now
-// introduced the "root-delegations-only" feature which applies delegation-only
-// logic to all top level domains, and to the root domain. An exception list
-// should be specified, including "MUSEUM" and "DE", and any other top level
-// domains from whom undelegated responses are expected and trusted.
-// root-delegation-only exclude { "DE"; "MUSEUM"; };
-
-include "/etc/bind/named.conf.local";
diff --git a/src/lib/Bcfg2/Server/Hostbase/templates/reverseappend.tmpl b/src/lib/Bcfg2/Server/Hostbase/templates/reverseappend.tmpl
deleted file mode 100644
index 6ed520c98..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/templates/reverseappend.tmpl
+++ /dev/null
@@ -1,4 +0,0 @@
-{% if fileorigin %}$ORIGIN {{ fileorigin }}.in-addr.arpa.{% endif %}
-$ORIGIN {{ inaddr }}.in-addr.arpa.
-{% for host in hosts %}{{ host.0.3 }} PTR {{ host.1 }}.
-{% endfor %}
diff --git a/src/lib/Bcfg2/Server/Hostbase/templates/reversesoa.tmpl b/src/lib/Bcfg2/Server/Hostbase/templates/reversesoa.tmpl
deleted file mode 100644
index d142eaf7f..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/templates/reversesoa.tmpl
+++ /dev/null
@@ -1,13 +0,0 @@
-$ORIGIN .
-$TTL {{ zone.8 }}
-{{ inaddr }}.in-addr.arpa IN SOA {{ zone.4 }}. {{ zone.3 }} (
- {{ zone.2 }} ; serial
- {{ zone.7 }} ; refresh interval
- {{ zone.6 }} ; retry interval
- {{ zone.5 }} ; expire interval
- {{ zone.8 }} ; min ttl
- )
-
- {% for ns in nameservers %}NS {{ ns.0 }}
- {% endfor %}
-
diff --git a/src/lib/Bcfg2/Server/Hostbase/templates/zone.tmpl b/src/lib/Bcfg2/Server/Hostbase/templates/zone.tmpl
deleted file mode 100644
index aad48d179..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/templates/zone.tmpl
+++ /dev/null
@@ -1,18 +0,0 @@
-$ORIGIN .
-$TTL {{ zone.8 }}
-{{ zone.1 }}. IN SOA {{ zone.4 }}. {{ zone.3 }}. (
- {{ zone.2 }} ; serial
- {{ zone.7 }} ; refresh interval
- {{ zone.6 }} ; retry interval
- {{ zone.5 }} ; expire interval
- {{ zone.8 }} ; min ttl
- )
-
- {% for ns in nameservers %}NS {{ ns.0 }}
- {% endfor %}
- {% for a in addresses %}A {{ a.0 }}
- {% endfor %}
- {% for mx in mxs %}MX {{ mx.0 }} {{ mx.1 }}
- {% endfor %}
-$ORIGIN {{ zone.1 }}.
-localhost A 127.0.0.1
diff --git a/src/lib/Bcfg2/Server/Hostbase/urls.py b/src/lib/Bcfg2/Server/Hostbase/urls.py
deleted file mode 100644
index 01fe97d4f..000000000
--- a/src/lib/Bcfg2/Server/Hostbase/urls.py
+++ /dev/null
@@ -1,27 +0,0 @@
-from django.conf.urls.defaults import *
-from django.conf import settings
-from django.views.generic.simple import direct_to_template
-from django.contrib import admin
-
-
-admin.autodiscover()
-
-
-urlpatterns = patterns('',
- # Uncomment the admin/doc line below and add 'django.contrib.admindocs'
- # to INSTALLED_APPS to enable admin documentation:
- (r'^admin/doc/', include('django.contrib.admindocs.urls')),
-
- # Uncomment the next line to enable the admin:
- (r'^admin/', include(admin.site.urls)),
-
- (r'^$',direct_to_template, {'template':'index.html'}, 'index'),
- (r'^hostbase/', include('hostbase.urls')),
- (r'^login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}),
- (r'^logout/$', 'django.contrib.auth.views.logout', {'template_name': 'logout.html'})
-)
-
-if settings.SERVE_MEDIA:
- urlpatterns += patterns('',
- (r'^site_media/(?P<path>.*)$', 'django.views.static.serve',
- dict(document_root=settings.MEDIA_ROOT)),)
diff --git a/src/lib/Bcfg2/Server/Lint/Genshi.py b/src/lib/Bcfg2/Server/Lint/Genshi.py
index 7edeb8a49..a1d0b7fa1 100755
--- a/src/lib/Bcfg2/Server/Lint/Genshi.py
+++ b/src/lib/Bcfg2/Server/Lint/Genshi.py
@@ -4,7 +4,6 @@ import sys
import Bcfg2.Server.Lint
from genshi.template import TemplateLoader, NewTextTemplate, MarkupTemplate, \
TemplateSyntaxError
-from Bcfg2.Server.Plugins.Bundler import BundleTemplateFile
from Bcfg2.Server.Plugins.Cfg.CfgGenshiGenerator import CfgGenshiGenerator
@@ -14,8 +13,6 @@ class Genshi(Bcfg2.Server.Lint.ServerPlugin):
def Run(self):
if 'Cfg' in self.core.plugins:
self.check_cfg()
- if 'TGenshi' in self.core.plugins:
- self.check_tgenshi()
if 'Bundler' in self.core.plugins:
self.check_bundler()
@@ -38,27 +35,13 @@ class Genshi(Bcfg2.Server.Lint.ServerPlugin):
self.LintError("genshi-syntax-error",
"Genshi syntax error: %s" % err)
- def check_tgenshi(self):
- """ Check templates in TGenshi for syntax errors. """
- loader = TemplateLoader()
-
- for eset in self.core.plugins['TGenshi'].entries.values():
- for fname, sdata in list(eset.entries.items()):
- if self.HandlesFile(fname):
- try:
- loader.load(sdata.name, cls=NewTextTemplate)
- except TemplateSyntaxError:
- err = sys.exc_info()[1]
- self.LintError("genshi-syntax-error",
- "Genshi syntax error: %s" % err)
-
def check_bundler(self):
""" Check templates in Bundler for syntax errors. """
loader = TemplateLoader()
for entry in self.core.plugins['Bundler'].entries.values():
if (self.HandlesFile(entry.name) and
- isinstance(entry, BundleTemplateFile)):
+ entry.template is not None):
try:
loader.load(entry.name, cls=MarkupTemplate)
except TemplateSyntaxError:
diff --git a/src/lib/Bcfg2/Server/Lint/GroupNames.py b/src/lib/Bcfg2/Server/Lint/GroupNames.py
index b180083d5..730f32750 100644
--- a/src/lib/Bcfg2/Server/Lint/GroupNames.py
+++ b/src/lib/Bcfg2/Server/Lint/GroupNames.py
@@ -3,11 +3,6 @@
import os
import re
import Bcfg2.Server.Lint
-try:
- from Bcfg2.Server.Plugins.Bundler import BundleTemplateFile
- HAS_GENSHI = True
-except ImportError:
- HAS_GENSHI = False
class GroupNames(Bcfg2.Server.Lint.ServerPlugin):
@@ -49,9 +44,7 @@ class GroupNames(Bcfg2.Server.Lint.ServerPlugin):
def check_bundles(self):
""" Check groups used in the Bundler plugin for validity. """
for bundle in self.core.plugins['Bundler'].entries.values():
- if (self.HandlesFile(bundle.name) and
- (not HAS_GENSHI or
- not isinstance(bundle, BundleTemplateFile))):
+ if self.HandlesFile(bundle.name) and bundle.template is None:
self.check_entries(bundle.xdata.xpath("//Group"),
bundle.name)
diff --git a/src/lib/Bcfg2/Server/Lint/InfoXML.py b/src/lib/Bcfg2/Server/Lint/InfoXML.py
index 95657317e..184f657b7 100644
--- a/src/lib/Bcfg2/Server/Lint/InfoXML.py
+++ b/src/lib/Bcfg2/Server/Lint/InfoXML.py
@@ -4,7 +4,6 @@ import os
import Bcfg2.Options
import Bcfg2.Server.Lint
from Bcfg2.Server.Plugins.Cfg.CfgInfoXML import CfgInfoXML
-from Bcfg2.Server.Plugins.Cfg.CfgLegacyInfo import CfgLegacyInfo
class InfoXML(Bcfg2.Server.Lint.ServerPlugin):
@@ -33,19 +32,9 @@ class InfoXML(Bcfg2.Server.Lint.ServerPlugin):
self.LintError("no-infoxml",
"No info.xml found for %s" % filename)
- for entry in entryset.entries.values():
- if isinstance(entry, CfgLegacyInfo):
- if not self.HandlesFile(entry.path):
- continue
- self.LintError("deprecated-info-file",
- "Deprecated %s file found at %s" %
- (os.path.basename(entry.name),
- entry.path))
-
@classmethod
def Errors(cls):
return {"no-infoxml": "warning",
- "deprecated-info-file": "warning",
"paranoid-false": "warning",
"required-infoxml-attrs-missing": "error"}
diff --git a/src/lib/Bcfg2/Server/Lint/RequiredAttrs.py b/src/lib/Bcfg2/Server/Lint/RequiredAttrs.py
index 6ffdd33a0..83b00bcb3 100644
--- a/src/lib/Bcfg2/Server/Lint/RequiredAttrs.py
+++ b/src/lib/Bcfg2/Server/Lint/RequiredAttrs.py
@@ -3,16 +3,10 @@ verified with an XML schema alone. """
import os
import re
-import lxml.etree
import Bcfg2.Server.Lint
import Bcfg2.Client.Tools.VCS
from Bcfg2.Server.Plugins.Packages import Apt, Yum
from Bcfg2.Client.Tools.POSIX.base import device_map
-try:
- from Bcfg2.Server.Plugins.Bundler import BundleTemplateFile
- HAS_GENSHI = True
-except ImportError:
- HAS_GENSHI = False
# format verifying functions. TODO: These should be moved into XML
@@ -183,17 +177,9 @@ class RequiredAttrs(Bcfg2.Server.Lint.ServerPlugin):
return
for bundle in self.core.plugins['Bundler'].entries.values():
- if (self.HandlesFile(bundle.name) and
- (not HAS_GENSHI or
- not isinstance(bundle, BundleTemplateFile))):
- try:
- xdata = lxml.etree.XML(bundle.data)
- except (lxml.etree.XMLSyntaxError, AttributeError):
- xdata = \
- lxml.etree.parse(bundle.template.filepath).getroot()
-
- for path in \
- xdata.xpath("//*[substring(name(), 1, 5) = 'Bound']"):
+ if self.HandlesFile(bundle.name) and bundle.template is None:
+ for path in bundle.xdata.xpath(
+ "//*[substring(name(), 1, 5) = 'Bound']"):
self.check_entry(path, bundle.name)
def check_entry(self, entry, filename):
diff --git a/src/lib/Bcfg2/Server/Lint/Validate.py b/src/lib/Bcfg2/Server/Lint/Validate.py
index 09f3f3d25..ca9f138ef 100644
--- a/src/lib/Bcfg2/Server/Lint/Validate.py
+++ b/src/lib/Bcfg2/Server/Lint/Validate.py
@@ -6,8 +6,8 @@ import sys
import glob
import fnmatch
import lxml.etree
-from subprocess import Popen, PIPE, STDOUT
import Bcfg2.Server.Lint
+from Bcfg2.Utils import Executor
class Validate(Bcfg2.Server.Lint.ServerlessPlugin):
@@ -39,7 +39,6 @@ class Validate(Bcfg2.Server.Lint.ServerlessPlugin):
"Bundler/*.xml": "bundle.xsd",
"Bundler/*.genshi": "bundle.xsd",
"Pkgmgr/*.xml": "pkglist.xsd",
- "Base/*.xml": "base.xsd",
"Rules/*.xml": "rules.xsd",
"Defaults/*.xml": "defaults.xsd",
"etc/report-configuration.xml": "report-configuration.xsd",
@@ -56,6 +55,7 @@ class Validate(Bcfg2.Server.Lint.ServerlessPlugin):
self.filelists = {}
self.get_filelists()
+ self.cmd = Executor()
def Run(self):
schemadir = self.config['schema']
@@ -108,11 +108,10 @@ class Validate(Bcfg2.Server.Lint.ServerlessPlugin):
try:
return lxml.etree.parse(filename)
except SyntaxError:
- lint = Popen(["xmllint", filename], stdout=PIPE, stderr=STDOUT)
+ result = self.cmd.run(["xmllint", filename])
self.LintError("xml-failed-to-parse",
- "%s fails to parse:\n%s" % (filename,
- lint.communicate()[0]))
- lint.wait()
+ "%s fails to parse:\n%s" %
+ (filename, result.stdout + result.stderr))
return False
except IOError:
self.LintError("xml-failed-to-read",
@@ -145,14 +144,11 @@ class Validate(Bcfg2.Server.Lint.ServerlessPlugin):
if self.files is None:
cmd.append("--xinclude")
cmd.extend(["--noout", "--schema", schemafile, filename])
- lint = Popen(cmd, stdout=PIPE, stderr=STDOUT)
- output = lint.communicate()[0]
- # py3k fix
- if not isinstance(output, str):
- output = output.decode('utf-8')
- if lint.wait():
+ result = self.cmd.run(cmd)
+ if not result.success:
self.LintError("xml-failed-to-verify",
- "%s fails to verify:\n%s" % (filename, output))
+ "%s fails to verify:\n%s" %
+ (filename, result.stdout + result.stderr))
return False
return True
diff --git a/src/lib/Bcfg2/Server/Plugin/helpers.py b/src/lib/Bcfg2/Server/Plugin/helpers.py
index 81dc1d736..1ee14d76e 100644
--- a/src/lib/Bcfg2/Server/Plugin/helpers.py
+++ b/src/lib/Bcfg2/Server/Plugin/helpers.py
@@ -3,15 +3,15 @@
import os
import re
import sys
-import copy
import time
+import copy
import glob
-import logging
+import genshi
import operator
import lxml.etree
import Bcfg2.Server
import Bcfg2.Options
-import Bcfg2.Statistics
+import Bcfg2.Server.FileMonitor
from Bcfg2.Compat import CmpMixin, wraps
from Bcfg2.Server.Plugin.base import Debuggable, Plugin
from Bcfg2.Server.Plugin.interfaces import Generator
@@ -19,6 +19,12 @@ from Bcfg2.Server.Plugin.exceptions import SpecificityError, \
PluginExecutionError
try:
+ import Bcfg2.Server.Encryption
+ HAS_CRYPTO = True
+except ImportError:
+ HAS_CRYPTO = False
+
+try:
import django # pylint: disable=W0611
HAS_DJANGO = True
except ImportError:
@@ -111,11 +117,40 @@ class track_statistics(object): # pylint: disable=C0103
try:
return func(obj, *args, **kwargs)
finally:
- Bcfg2.Statistics.stats.add_value(name, time.time() - start)
+ Bcfg2.Server.Statistics.stats.add_value(name,
+ time.time() - start)
return inner
+def removecomment(stream):
+ """ A Genshi filter that removes comments from the stream. This
+ function is a generator.
+
+ :param stream: The Genshi stream to remove comments from
+ :type stream: genshi.core.Stream
+ :returns: tuple of ``(kind, data, pos)``, as when iterating
+ through a Genshi stream
+ """
+ for kind, data, pos in stream:
+ if kind is genshi.core.COMMENT:
+ continue
+ yield kind, data, pos
+
+
+def default_path_metadata():
+ """ Get the default Path entry metadata from the config.
+
+ :returns: dict of metadata attributes and their default values
+ """
+ attrs = Bcfg2.Options.PATH_METADATA_OPTIONS.keys()
+ setup = Bcfg2.Options.get_option_parser()
+ if not set(attrs).issubset(setup.keys()):
+ setup.add_options(Bcfg2.Options.PATH_METADATA_OPTIONS)
+ setup.reparse(argv=[Bcfg2.Options.CFILE.cmd, Bcfg2.Options.CFILE])
+ return dict([(k, setup[k]) for k in attrs])
+
+
class DatabaseBacked(Plugin):
""" Provides capabilities for a plugin to read and write to a
database.
@@ -198,13 +233,10 @@ class FileBacked(Debuggable):
principally meant to be used as a part of
:class:`Bcfg2.Server.Plugin.helpers.DirectoryBacked`. """
- def __init__(self, name, fam=None):
+ def __init__(self, name):
"""
:param name: The full path to the file to cache and monitor
:type name: string
- :param fam: The FAM object used to receive notifications of
- changes
- :type fam: Bcfg2.Server.FileMonitor.FileMonitor
"""
Debuggable.__init__(self)
@@ -215,7 +247,7 @@ class FileBacked(Debuggable):
self.name = name
#: The FAM object used to receive notifications of changes
- self.fam = fam
+ self.fam = Bcfg2.Server.FileMonitor.get_fam()
def HandleEvent(self, event=None):
""" HandleEvent is called whenever the FAM registers an event.
@@ -268,14 +300,11 @@ class DirectoryBacked(Debuggable):
#: :attr:`patterns` or ``ignore``, then a warning will be produced.
ignore = None
- def __init__(self, data, fam):
+ def __init__(self, data):
"""
:param data: The path to the data directory that will be
monitored
:type data: string
- :param fam: The FAM object used to receive notifications of
- changes
- :type fam: Bcfg2.Server.FileMonitor.FileMonitor
.. -----
.. autoattribute:: __child__
@@ -283,7 +312,7 @@ class DirectoryBacked(Debuggable):
Debuggable.__init__(self)
self.data = os.path.normpath(data)
- self.fam = fam
+ self.fam = Bcfg2.Server.FileMonitor.get_fam()
#: self.entries contains information about the files monitored
#: by this object. The keys of the dict are the relative
@@ -355,8 +384,7 @@ class DirectoryBacked(Debuggable):
:returns: None
"""
self.entries[relative] = self.__child__(os.path.join(self.data,
- relative),
- self.fam)
+ relative))
self.entries[relative].HandleEvent(event)
def HandleEvent(self, event): # pylint: disable=R0912
@@ -481,13 +509,10 @@ class XMLFileBacked(FileBacked):
#: to the constructor.
create = None
- def __init__(self, filename, fam=None, should_monitor=False, create=None):
+ def __init__(self, filename, should_monitor=False, create=None):
"""
:param filename: The full path to the file to cache and monitor
:type filename: string
- :param fam: The FAM object used to receive notifications of
- changes
- :type fam: Bcfg2.Server.FileMonitor.FileMonitor
:param should_monitor: Whether or not to monitor this file for
changes. It may be useful to disable
monitoring when, for instance, the file
@@ -507,7 +532,7 @@ class XMLFileBacked(FileBacked):
.. -----
.. autoattribute:: __identifier__
"""
- FileBacked.__init__(self, filename, fam=fam)
+ FileBacked.__init__(self, filename)
#: The raw XML data contained in the file as an
#: :class:`lxml.etree.ElementTree` object, with XIncludes
@@ -543,7 +568,7 @@ class XMLFileBacked(FileBacked):
#: Whether or not to monitor this file for changes.
self.should_monitor = should_monitor
- if fam and should_monitor:
+ if should_monitor:
self.fam.AddMonitor(filename, self)
def _follow_xincludes(self, fname=None, xdata=None):
@@ -614,7 +639,7 @@ class XMLFileBacked(FileBacked):
:returns: None
"""
self.extra_monitors.append(fpath)
- if self.fam and self.should_monitor:
+ if self.should_monitor:
self.fam.AddMonitor(fpath, self)
def __iter__(self):
@@ -627,44 +652,188 @@ class XMLFileBacked(FileBacked):
class StructFile(XMLFileBacked):
""" StructFiles are XML files that contain a set of structure file
formatting logic for handling ``<Group>`` and ``<Client>``
- tags. """
+ tags.
+
+ .. -----
+ .. autoattribute:: __identifier__
+ .. automethod:: _include_element
+ """
#: If ``__identifier__`` is not None, then it must be the name of
#: an XML attribute that will be required on the top-level tag of
#: the file being cached
__identifier__ = None
- def _include_element(self, item, metadata):
- """ determine if an XML element matches the metadata """
+ #: Callbacks used to determine if children of items with the given
+ #: tags should be included in the return value of
+ #: :func:`Bcfg2.Server.Plugin.helpers.StructFile.Match` and
+ #: :func:`Bcfg2.Server.Plugin.helpers.StructFile.XMLMatch`. Each
+ #: callback is passed the same arguments as
+ #: :func:`Bcfg2.Server.Plugin.helpers.StructFile._include_element`.
+ #: It should return True if children of the element should be
+ #: included in the match, False otherwise. The callback does
+ #: *not* need to consider negation; that will be handled in
+ #: :func:`Bcfg2.Server.Plugin.helpers.StructFile._include_element`
+ _include_tests = \
+ dict(Group=lambda el, md, *args: el.get('name') in md.groups,
+ Client=lambda el, md, *args: el.get('name') == md.hostname)
+
+ #: Callbacks used to determine if children of items with the given
+ #: tags should be included in the return value of
+ #: :func:`Bcfg2.Server.Plugin.helpers.StructFile.Match` and
+ #: :func:`Bcfg2.Server.Plugin.helpers.StructFile.XMLMatch`. Each
+ #: callback is passed the same arguments as
+ #: :func:`Bcfg2.Server.Plugin.helpers.StructFile._include_element`.
+ #: It should return True if children of the element should be
+ #: included in the match, False otherwise. The callback does
+ #: *not* need to consider negation; that will be handled in
+ #: :func:`Bcfg2.Server.Plugin.helpers.StructFile._include_element`
+ _include_tests = \
+ dict(Group=lambda el, md, *args: el.get('name') in md.groups,
+ Client=lambda el, md, *args: el.get('name') == md.hostname)
+
+ def __init__(self, filename, should_monitor=False):
+ XMLFileBacked.__init__(self, filename, should_monitor=should_monitor)
+ self.setup = Bcfg2.Options.get_option_parser()
+ self.encoding = self.setup['encoding']
+ self.template = None
+
+ def Index(self):
+ XMLFileBacked.Index(self)
+ if (self.name.endswith('.genshi') or
+ ('py' in self.xdata.nsmap and
+ self.xdata.nsmap['py'] == 'http://genshi.edgewall.org/')):
+ try:
+ loader = genshi.template.TemplateLoader()
+ self.template = loader.load(self.name,
+ cls=genshi.template.MarkupTemplate,
+ encoding=self.encoding)
+ except LookupError:
+ err = sys.exc_info()[1]
+ self.logger.error('Genshi lookup error in %s: %s' % (self.name,
+ err))
+ except genshi.template.TemplateError:
+ err = sys.exc_info()[1]
+ self.logger.error('Genshi template error in %s: %s' %
+ (self.name, err))
+ except genshi.input.ParseError:
+ err = sys.exc_info()[1]
+ self.logger.error('Genshi parse error in %s: %s' % (self.name,
+ err))
+
+ if HAS_CRYPTO:
+ strict = self.xdata.get(
+ "decrypt",
+ self.setup.cfp.get(Bcfg2.Server.Encryption.CFG_SECTION,
+ "decrypt", default="strict")) == "strict"
+ for el in self.xdata.xpath("//*[@encrypted]"):
+ try:
+ el.text = self._decrypt(el).encode('ascii',
+ 'xmlcharrefreplace')
+ except UnicodeDecodeError:
+ self.logger.info("%s: Decrypted %s to gibberish, skipping"
+ % (self.name, el.tag))
+ except Bcfg2.Server.Encryption.EVPError:
+ msg = "Failed to decrypt %s element in %s" % (el.tag,
+ self.name)
+ if strict:
+ raise PluginExecutionError(msg)
+ else:
+ self.logger.warning(msg)
+ Index.__doc__ = XMLFileBacked.Index.__doc__
+
+ def _decrypt(self, element):
+ """ Decrypt a single encrypted properties file element """
+ if not element.text or not element.text.strip():
+ return
+ passes = Bcfg2.Server.Encryption.get_passphrases()
+ try:
+ passphrase = passes[element.get("encrypted")]
+ try:
+ return Bcfg2.Server.Encryption.ssl_decrypt(element.text,
+ passphrase)
+ except Bcfg2.Server.Encryption.EVPError:
+ # error is raised below
+ pass
+ except KeyError:
+ # bruteforce_decrypt raises an EVPError with a sensible
+ # error message, so we just let it propagate up the stack
+ return Bcfg2.Server.Encryption.bruteforce_decrypt(element.text)
+ raise Bcfg2.Server.Encryption.EVPError("Failed to decrypt")
+
+ def _include_element(self, item, metadata, *args):
+ """ Determine if an XML element matches the other arguments.
+
+ The first argument is always the XML element to match, and the
+ second will always be a single
+ :class:`Bcfg2.Server.Plugins.Metadata.ClientMetadata` object
+ representing the metadata to match against. Subsequent
+ arguments are as given to
+ :func:`Bcfg2.Server.Plugin.helpers.StructFile.Match` or
+ :func:`Bcfg2.Server.Plugin.helpers.StructFile.XMLMatch`. In
+ the base StructFile implementation, there are no additional
+ arguments; in classes that inherit from StructFile, see the
+ :func:`Match` and :func:`XMLMatch` method signatures."""
if isinstance(item, lxml.etree._Comment): # pylint: disable=W0212
return False
- negate = item.get('negate', 'false').lower() == 'true'
- if item.tag == 'Group':
- return negate == (item.get('name') not in metadata.groups)
- elif item.tag == 'Client':
- return negate == (item.get('name') != metadata.hostname)
+ if item.tag in self._include_tests:
+ negate = item.get('negate', 'false').lower() == 'true'
+ return negate != self._include_tests[item.tag](item, metadata,
+ *args)
else:
return True
- def _match(self, item, metadata):
- """ recursive helper for Match() """
- if self._include_element(item, metadata):
- if item.tag == 'Group' or item.tag == 'Client':
+ def _render(self, metadata):
+ """ Render the template for the given client metadata
+
+ :param metadata: Client metadata to match against.
+ :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
+ :returns: lxml.etree._Element object representing the rendered
+ XML data
+ """
+ stream = self.template.generate(
+ metadata=metadata,
+ repo=self.setup['repo']).filter(removecomment)
+ return lxml.etree.XML(stream.render('xml',
+ strip_whitespace=False),
+ parser=Bcfg2.Server.XMLParser)
+
+ def _match(self, item, metadata, *args):
+ """ recursive helper for
+ :func:`Bcfg2.Server.Plugin.helpers.StructFile.Match` """
+ if self._include_element(item, metadata, *args):
+ if item.tag in self._include_tests.keys():
rv = []
- if self._include_element(item, metadata):
+ if self._include_element(item, metadata, *args):
for child in item.iterchildren():
- rv.extend(self._match(child, metadata))
+ rv.extend(self._match(child, metadata, *args))
return rv
else:
rv = copy.deepcopy(item)
for child in rv.iterchildren():
rv.remove(child)
for child in item.iterchildren():
- rv.extend(self._match(child, metadata))
+ rv.extend(self._match(child, metadata, *args))
return [rv]
else:
return []
+ def _do_match(self, metadata, *args):
+ """ Helper for
+ :func:`Bcfg2.Server.Plugin.helpers.StructFile.Match` that lets
+ a subclass of StructFile easily redefine the public Match()
+ interface to accept a different number of arguments. This
+ provides a sane prototype for the Match() function while
+ keeping the internals consistent. """
+ rv = []
+ if self.template is None:
+ entries = self.entries
+ else:
+ entries = self._render(metadata).getchildren()
+ for child in entries:
+ rv.extend(self._match(child, metadata, *args))
+ return rv
+
def Match(self, metadata):
""" Return matching fragments of the data in this file. A tag
is considered to match if all ``<Group>`` and ``<Client>``
@@ -675,22 +844,22 @@ class StructFile(XMLFileBacked):
Match() (and *not* their descendents) should be considered to
match the metadata.
+ Match() returns matching fragments in document order.
+
:param metadata: Client metadata to match against.
:type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
:returns: list of lxml.etree._Element objects """
- rv = []
- for child in self.entries:
- rv.extend(self._match(child, metadata))
- return rv
+ return self._do_match(metadata)
- def _xml_match(self, item, metadata):
- """ recursive helper for XMLMatch """
- if self._include_element(item, metadata):
- if item.tag == 'Group' or item.tag == 'Client':
+ def _xml_match(self, item, metadata, *args):
+ """ recursive helper for
+ :func:`Bcfg2.Server.Plugin.helpers.StructFile.XMLMatch` """
+ if self._include_element(item, metadata, *args):
+ if item.tag in self._include_tests.keys():
for child in item.iterchildren():
item.remove(child)
item.getparent().append(child)
- self._xml_match(child, metadata)
+ self._xml_match(child, metadata, *args)
if item.text:
if item.getparent().text is None:
item.getparent().text = item.text
@@ -699,10 +868,25 @@ class StructFile(XMLFileBacked):
item.getparent().remove(item)
else:
for child in item.iterchildren():
- self._xml_match(child, metadata)
+ self._xml_match(child, metadata, *args)
else:
item.getparent().remove(item)
+ def _do_xmlmatch(self, metadata, *args):
+ """ Helper for
+ :func:`Bcfg2.Server.Plugin.helpers.StructFile.XMLMatch` that lets
+ a subclass of StructFile easily redefine the public Match()
+ interface to accept a different number of arguments. This
+ provides a sane prototype for the Match() function while
+ keeping the internals consistent. """
+ if self.template is None:
+ rv = copy.deepcopy(self.xdata)
+ else:
+ rv = self._render(metadata)
+ for child in rv.iterchildren():
+ self._xml_match(child, metadata, *args)
+ return rv
+
def XMLMatch(self, metadata):
""" Return a rebuilt XML document that only contains the
matching portions of the original file. A tag is considered
@@ -712,176 +896,58 @@ class StructFile(XMLFileBacked):
All ``<Group>`` and ``<Client>`` tags will have been stripped
out.
+ The new document produced by XMLMatch() is not necessarily in
+ the same order as the original document.
+
:param metadata: Client metadata to match against.
:type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
:returns: lxml.etree._Element """
- rv = copy.deepcopy(self.xdata)
- for child in rv.iterchildren():
- self._xml_match(child, metadata)
- return rv
-
-
-class INode(object):
- """ INodes provide lists of things available at a particular group
- intersection. INodes are deprecated; new plugins should use
- :class:`Bcfg2.Server.Plugin.helpers.StructFile` instead. """
-
- raw = dict(
- Client="lambda m, e:'%(name)s' == m.hostname and predicate(m, e)",
- Group="lambda m, e:'%(name)s' in m.groups and predicate(m, e)")
- nraw = dict(
- Client="lambda m, e:'%(name)s' != m.hostname and predicate(m, e)",
- Group="lambda m, e:'%(name)s' not in m.groups and predicate(m, e)")
- containers = ['Group', 'Client']
- ignore = []
-
- def __init__(self, data, idict, parent=None):
- self.data = data
- self.contents = {}
- if parent is None:
- self.predicate = lambda m, e: True
- else:
- predicate = parent.predicate
- if data.get('negate', 'false').lower() == 'true':
- psrc = self.nraw
- else:
- psrc = self.raw
- if data.tag in list(psrc.keys()):
- self.predicate = eval(psrc[data.tag] %
- {'name': data.get('name')},
- {'predicate': predicate})
- else:
- raise PluginExecutionError("Unknown tag: %s" % data.tag)
- self.children = []
- self._load_children(data, idict)
+ return self._do_xmlmatch(metadata)
- def _load_children(self, data, idict):
- """ load children """
- for item in data.getchildren():
- if item.tag in self.ignore:
- continue
- elif item.tag in self.containers:
- self.children.append(self.__class__(item, idict, self))
- else:
- try:
- self.contents[item.tag][item.get('name')] = \
- dict(item.attrib)
- except KeyError:
- self.contents[item.tag] = \
- {item.get('name'): dict(item.attrib)}
- if item.text:
- self.contents[item.tag][item.get('name')]['__text__'] = \
- item.text
- if item.getchildren():
- self.contents[item.tag][item.get('name')]['__children__'] \
- = item.getchildren()
- try:
- idict[item.tag].append(item.get('name'))
- except KeyError:
- idict[item.tag] = [item.get('name')]
-
- def Match(self, metadata, data, entry=lxml.etree.Element("None")):
- """Return a dictionary of package mappings."""
- if self.predicate(metadata, entry):
- for key in self.contents:
- try:
- data[key].update(self.contents[key])
- except: # pylint: disable=W0702
- data[key] = {}
- data[key].update(self.contents[key])
- for child in self.children:
- child.Match(metadata, data, entry=entry)
-
-
-class InfoNode (INode):
- """ :class:`Bcfg2.Server.Plugin.helpers.INode` implementation that
- includes ``<Path>`` tags, suitable for use with :file:`info.xml`
- files."""
-
- raw = dict(
- Client="lambda m, e: '%(name)s' == m.hostname and predicate(m, e)",
- Group="lambda m, e: '%(name)s' in m.groups and predicate(m, e)",
- Path="lambda m, e: ('%(name)s' == e.get('name') or " +
- "'%(name)s' == e.get('realname')) and " +
- "predicate(m, e)")
- nraw = dict(
- Client="lambda m, e: '%(name)s' != m.hostname and predicate(m, e)",
- Group="lambda m, e: '%(name)s' not in m.groups and predicate(m, e)",
- Path="lambda m, e: '%(name)s' != e.get('name') and " +
- "'%(name)s' != e.get('realname') and " +
- "predicate(m, e)")
- containers = ['Group', 'Client', 'Path']
-
-
-class XMLSrc(XMLFileBacked):
- """ XMLSrc files contain a
- :class:`Bcfg2.Server.Plugin.helpers.INode` hierarchy that returns
- matching entries. XMLSrc objects are deprecated and
- :class:`Bcfg2.Server.Plugin.helpers.StructFile` should be
- preferred where possible."""
- __node__ = INode
- __cacheobj__ = dict
- __priority_required__ = True
-
- def __init__(self, filename, fam=None, should_monitor=False, create=None):
- XMLFileBacked.__init__(self, filename, fam, should_monitor, create)
- self.items = {}
- self.cache = None
- self.pnode = None
- self.priority = -1
- def HandleEvent(self, _=None):
- """Read file upon update."""
- try:
- data = open(self.name).read()
- except IOError:
- msg = "Failed to read file %s: %s" % (self.name, sys.exc_info()[1])
- self.logger.error(msg)
- raise PluginExecutionError(msg)
- self.items = {}
- try:
- xdata = lxml.etree.XML(data, parser=Bcfg2.Server.XMLParser)
- except lxml.etree.XMLSyntaxError:
- msg = "Failed to parse file %s: %s" % (self.name,
- sys.exc_info()[1])
- self.logger.error(msg)
- raise PluginExecutionError(msg)
- self.pnode = self.__node__(xdata, self.items)
- self.cache = None
- try:
- self.priority = int(xdata.get('priority'))
- except (ValueError, TypeError):
- if self.__priority_required__:
- msg = "Got bogus priority %s for file %s" % \
- (xdata.get('priority'), self.name)
- self.logger.error(msg)
- raise PluginExecutionError(msg)
+class InfoXML(StructFile):
+ """ InfoXML files contain Group, Client, and Path tags to set the
+ metadata (permissions, owner, etc.) of files. """
+ encryption = False
- del xdata, data
+ _include_tests = StructFile._include_tests
+ _include_tests['Path'] = lambda el, md, entry, *args: \
+ entry.get("name") == el.get("name")
- def Cache(self, metadata):
- """Build a package dict for a given host."""
- if self.cache is None or self.cache[0] != metadata:
- cache = (metadata, self.__cacheobj__())
- if self.pnode is None:
- self.logger.error("Cache method called early for %s; "
- "forcing data load" % self.name)
- self.HandleEvent()
- return
- self.pnode.Match(metadata, cache[1])
- self.cache = cache
+ def Match(self, metadata, entry): # pylint: disable=W0221
+ """ Implementation of
+ :func:`Bcfg2.Server.Plugin.helpers.StructFile.Match` that
+ considers Path tags to allow ``info.xml`` files to set
+ different file metadata for different file paths. """
+ return self._do_match(metadata, entry)
- def __str__(self):
- return str(self.items)
+ def XMLMatch(self, metadata, entry): # pylint: disable=W0221
+ """ Implementation of
+ :func:`Bcfg2.Server.Plugin.helpers.StructFile.XMLMatch` that
+ considers Path tags to allow ``info.xml`` files to set
+ different file metadata for different file paths. """
+ return self._do_xmlmatch(metadata, entry)
+ def BindEntry(self, entry, metadata):
+ """ Bind the matching file metadata for this client and entry
+ to the entry.
-class InfoXML(XMLSrc):
- """ InfoXML files contain a
- :class:`Bcfg2.Server.Plugin.helpers.InfoNode` hierarchy that
- returns matching entries, suitable for use with :file:`info.xml`
- files."""
- __node__ = InfoNode
- __priority_required__ = False
+ :param entry: The abstract entry to bind the info to. This
+ will be modified in place
+ :type entry: lxml.etree._Element
+ :param metadata: The client metadata to get info for
+ :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
+ :returns: None
+ """
+ fileinfo = self.Match(metadata, entry)
+ if len(fileinfo) == 0:
+ raise PluginExecutionError("No metadata found in %s for %s" %
+ (self.name, entry.get('name')))
+ elif len(fileinfo) > 1:
+ self.logger.warning("Multiple file metadata found in %s for %s" %
+ (self.name, entry.get('name')))
+ for attr, val in fileinfo[0].attrib.items():
+ entry.set(attr, val)
class XMLDirectoryBacked(DirectoryBacked):
@@ -897,6 +963,24 @@ class XMLDirectoryBacked(DirectoryBacked):
__child__ = XMLFileBacked
+class PriorityStructFile(StructFile):
+ """ A StructFile where each file has a priority, given as a
+ top-level XML attribute. """
+
+ def __init__(self, filename, should_monitor=False):
+ StructFile.__init__(self, filename, should_monitor=should_monitor)
+ self.priority = -1
+ __init__.__doc__ = StructFile.__init__.__doc__
+
+ def Index(self):
+ try:
+ self.priority = int(self.xdata.get('priority'))
+ except (ValueError, TypeError):
+ raise PluginExecutionError("Got bogus priority %s for file %s" %
+ (self.xdata.get('priority'), self.name))
+ Index.__doc__ = StructFile.Index.__doc__
+
+
class PrioDir(Plugin, Generator, XMLDirectoryBacked):
""" PrioDir handles a directory of XML files where each file has a
set priority.
@@ -907,13 +991,13 @@ class PrioDir(Plugin, Generator, XMLDirectoryBacked):
#: The type of child objects to create for files contained within
#: the directory that is tracked. Default is
- #: :class:`Bcfg2.Server.Plugin.helpers.XMLSrc`
- __child__ = XMLSrc
+ #: :class:`Bcfg2.Server.Plugin.helpers.PriorityStructFile`
+ __child__ = PriorityStructFile
def __init__(self, core, datastore):
Plugin.__init__(self, core, datastore)
Generator.__init__(self)
- XMLDirectoryBacked.__init__(self, self.data, self.core.fam)
+ XMLDirectoryBacked.__init__(self, self.data)
__init__.__doc__ = Plugin.__init__.__doc__
def HandleEvent(self, event):
@@ -928,21 +1012,22 @@ class PrioDir(Plugin, Generator, XMLDirectoryBacked):
self.Entries[itype] = {child: self.BindEntry}
HandleEvent.__doc__ = XMLDirectoryBacked.HandleEvent.__doc__
- def _matches(self, entry, metadata, rules): # pylint: disable=W0613
- """ Whether or not a given entry has a matching entry in this
- PrioDir. By default this does strict matching (i.e., the
- entry name is in ``rules.keys()``), but this can be overridden
- to provide regex matching, etc.
+ def _matches(self, entry, metadata, candidate): # pylint: disable=W0613
+ """ Whether or not a given candidate matches the abstract
+ entry given. By default this does strict matching (i.e., the
+ entry name matches the candidate name), but this can be
+ overridden to provide regex matching, etc.
:param entry: The entry to find a match for
:type entry: lxml.etree._Element
:param metadata: The metadata to get attributes for
:type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
- :rules: A dict of rules to look in for a matching rule
- :type rules: dict
+ :candidate: A candidate concrete entry to match with
+ :type candidate: lxml.etree._Element
:returns: bool
"""
- return entry.get('name') in rules
+ return (entry.tag == candidate.tag and
+ entry.get('name') == candidate.get('name'))
def BindEntry(self, entry, metadata):
""" Bind the attributes that apply to an entry to it. The
@@ -954,71 +1039,40 @@ class PrioDir(Plugin, Generator, XMLDirectoryBacked):
:type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
:returns: None
"""
- attrs = self.get_attrs(entry, metadata)
- for key, val in list(attrs.items()):
- entry.attrib[key] = val
-
- def get_attrs(self, entry, metadata):
- """ Get a list of attributes to add to the entry during the
- bind. This is a complex method, in that it both modifies the
- entry, and returns attributes that need to be added to the
- entry. That seems sub-optimal, and should probably be changed
- at some point. Namely:
-
- * The return value includes all XML attributes that need to be
- added to the entry, but it does not add them.
- * If text contents or child tags need to be added to the
- entry, they are added to the entry in place.
-
- :param entry: The entry to add attributes to.
- :type entry: lxml.etree._Element
- :param metadata: The metadata to get attributes for
- :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
- :returns: dict of <attr name>:<attr value>
- :raises: :class:`Bcfg2.Server.Plugin.exceptions.PluginExecutionError`
- """
+ matching = []
for src in self.entries.values():
- src.Cache(metadata)
-
- matching = [src for src in list(self.entries.values())
- if (src.cache and
- entry.tag in src.cache[1] and
- self._matches(entry, metadata,
- src.cache[1][entry.tag]))]
+ for candidate in src.XMLMatch(metadata).xpath("//%s" % entry.tag):
+ if self._matches(entry, metadata, candidate):
+ matching.append((src, candidate))
if len(matching) == 0:
raise PluginExecutionError("No matching source for entry when "
- "retrieving attributes for %s(%s)" %
- (entry.tag, entry.attrib.get('name')))
+ "retrieving attributes for %s:%s" %
+ (entry.tag, entry.get('name')))
elif len(matching) == 1:
- index = 0
+ data = matching[0][1]
else:
- prio = [int(src.priority) for src in matching]
- if prio.count(max(prio)) > 1:
- msg = "Found conflicting sources with same priority for " + \
- "%s:%s for %s" % (entry.tag, entry.get("name"),
- metadata.hostname)
+ prio = [int(m[0].priority) for m in matching]
+ priority = max(prio)
+ if prio.count(priority) > 1:
+ msg = "Found conflicting sources with same priority (%s) " \
+ "for %s:%s for %s" % (priority, entry.tag,
+ entry.get("name"), metadata.hostname)
self.logger.error(msg)
- self.logger.error([item.name for item in matching])
- self.logger.error("Priority was %s" % max(prio))
+ self.logger.error([m[0].name for m in matching])
raise PluginExecutionError(msg)
- index = prio.index(max(prio))
- for rname in list(matching[index].cache[1][entry.tag].keys()):
- if self._matches(entry, metadata, [rname]):
- data = matching[index].cache[1][entry.tag][rname]
- break
- else:
- # Fall back on __getitem__. Required if override used
- data = matching[index].cache[1][entry.tag][entry.get('name')]
- if '__text__' in data:
- entry.text = data['__text__']
- if '__children__' in data:
- for item in data['__children__']:
- entry.append(copy.copy(item))
+ for src, candidate in matching:
+ if int(src.priority) == priority:
+ data = candidate
+ break
+
+ entry.text = data.text
+ for item in data.getchildren():
+ entry.append(copy.copy(item))
- return dict([(key, data[key])
- for key in list(data.keys())
- if not key.startswith('__')])
+ for key, val in list(data.attrib.items()):
+ if key not in entry.attrib:
+ entry.attrib[key] = val
class Specificity(CmpMixin):
@@ -1107,7 +1161,7 @@ class Specificity(CmpMixin):
return "".join(rv)
-class SpecificData(object):
+class SpecificData(Debuggable):
""" A file that is specific to certain clients, groups, or all
clients. """
@@ -1123,6 +1177,7 @@ class SpecificData(object):
:param encoding: The encoding to use for data in this file
:type encoding: string
"""
+ Debuggable.__init__(self)
self.name = name
self.specific = specific
self.data = None
@@ -1144,7 +1199,7 @@ class SpecificData(object):
except UnicodeDecodeError:
self.data = open(self.name, mode='rb').read()
except: # pylint: disable=W0201
- LOGGER.error("Failed to read file %s" % self.name)
+ self.logger.error("Failed to read file %s" % self.name)
class EntrySet(Debuggable):
@@ -1213,7 +1268,7 @@ class EntrySet(Debuggable):
self.path = path
self.entry_type = entry_type
self.entries = {}
- self.metadata = DEFAULT_FILE_METADATA.copy()
+ self.metadata = default_path_metadata()
self.infoxml = None
self.encoding = encoding
@@ -1290,7 +1345,7 @@ class EntrySet(Debuggable):
"""
action = event.code2str()
- if event.filename in ['info', 'info.xml', ':info']:
+ if event.filename == 'info.xml':
if action in ['exists', 'created', 'changed']:
self.update_metadata(event)
elif action == 'deleted':
@@ -1395,8 +1450,8 @@ class EntrySet(Debuggable):
return Specificity(**kwargs)
def update_metadata(self, event):
- """ Process changes to or creation of info, :info, and
- info.xml files for the EntrySet.
+ """ Process changes to or creation of info.xml files for the
+ EntrySet.
:param event: An event that applies to an info handled by this
EntrySet
@@ -1408,24 +1463,9 @@ class EntrySet(Debuggable):
if not self.infoxml:
self.infoxml = InfoXML(fpath)
self.infoxml.HandleEvent(event)
- elif event.filename in [':info', 'info']:
- for line in open(fpath).readlines():
- match = INFO_REGEX.match(line)
- if not match:
- self.logger.warning("Failed to match line in %s: %s" %
- (fpath, line))
- continue
- else:
- mgd = match.groupdict()
- for key, value in list(mgd.items()):
- if value:
- self.metadata[key] = value
- if len(self.metadata['mode']) == 3:
- self.metadata['mode'] = "0%s" % self.metadata['mode']
def reset_metadata(self, event):
- """ Reset metadata to defaults if info. :info, or info.xml are
- removed.
+ """ Reset metadata to defaults if info.xml is removed.
:param event: An event that applies to an info handled by this
EntrySet
@@ -1434,12 +1474,10 @@ class EntrySet(Debuggable):
"""
if event.filename == 'info.xml':
self.infoxml = None
- elif event.filename in [':info', 'info']:
- self.metadata = DEFAULT_FILE_METADATA.copy()
def bind_info_to_entry(self, entry, metadata):
- """ Shortcut to call :func:`bind_info` with the base
- info/info.xml for this EntrySet.
+ """ Bind the metadata for the given client in the base
+ info.xml for this EntrySet to the entry.
:param entry: The abstract entry to bind the info to. This
will be modified in place
@@ -1448,7 +1486,10 @@ class EntrySet(Debuggable):
:type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
:returns: None
"""
- bind_info(entry, metadata, infoxml=self.infoxml, default=self.metadata)
+ for attr, val in list(self.metadata.items()):
+ entry.set(attr, val)
+ if self.infoxml is not None:
+ self.infoxml.BindEntry(entry, metadata)
def bind_entry(self, entry, metadata):
""" Return the single best fully-bound entry from the set of
@@ -1498,6 +1539,8 @@ class GroupSpool(Plugin, Generator):
Plugin.__init__(self, core, datastore)
Generator.__init__(self)
+ self.fam = Bcfg2.Server.FileMonitor.get_fam()
+
#: See :class:`Bcfg2.Server.Plugins.interfaces.Generator` for
#: details on the Entries attribute.
self.Entries[self.entry_type] = {}
@@ -1644,5 +1687,5 @@ class GroupSpool(Plugin, Generator):
if not os.path.isdir(name):
self.logger.error("Failed to open directory %s" % name)
return
- reqid = self.core.fam.AddMonitor(name, self)
+ reqid = self.fam.AddMonitor(name, self)
self.handles[reqid] = relative
diff --git a/src/lib/Bcfg2/Server/Plugin/interfaces.py b/src/lib/Bcfg2/Server/Plugin/interfaces.py
index 0fd711be9..d460cc45d 100644
--- a/src/lib/Bcfg2/Server/Plugin/interfaces.py
+++ b/src/lib/Bcfg2/Server/Plugin/interfaces.py
@@ -531,6 +531,8 @@ class Version(Plugin):
#: be ".svn"
__vcs_metadata_path__ = None
+ __rmi__ = Plugin.__rmi__ + ['get_revision']
+
def __init__(self, core, datastore):
Plugin.__init__(self, core, datastore)
@@ -594,3 +596,33 @@ class ClientRunHooks(object):
:returns: None
"""
pass
+
+
+class ClientACLs(object):
+ """ ClientACLs are used to grant or deny access to different
+ XML-RPC calls based on client IP or metadata. """
+
+ def check_acl_ip(self, address, rmi): # pylint: disable=W0613
+ """ Check if the given IP address is authorized to make the
+ named XML-RPC call.
+
+ :param address: The address pair of the client to check ACLs for
+ :type address: tuple of (<ip address>, <port>)
+ :param rmi: The fully-qualified name of the RPC call
+ :param rmi: string
+ :returns: bool or None - True to allow, False to deny, None to
+ defer to metadata ACLs
+ """
+ return True
+
+ def check_acl_metadata(self, metadata, rmi): # pylint: disable=W0613
+ """ Check if the given client is authorized to make the named
+ XML-RPC call.
+
+ :param metadata: The client metadata
+ :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
+ :param rmi: The fully-qualified name of the RPC call
+ :param rmi: string
+ :returns: bool
+ """
+ return True
diff --git a/src/lib/Bcfg2/Server/Plugins/ACL.py b/src/lib/Bcfg2/Server/Plugins/ACL.py
new file mode 100644
index 000000000..3de3f767c
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/ACL.py
@@ -0,0 +1,145 @@
+""" Support for client ACLs based on IP address and client metadata """
+
+import os
+import struct
+import socket
+import Bcfg2.Server.Plugin
+
+
+def rmi_names_equal(first, second):
+ """ Compare two XML-RPC method names and see if they match.
+ Resolves some limited wildcards; see
+ :ref:`server-plugins-misc-acl-wildcards` for details.
+
+ :param first: One of the ACLs to compare
+ :type first: string
+ :param second: The other ACL to compare
+ :type second: string
+ :returns: bool """
+ if first == second:
+ # single wildcard is special, and matches everything
+ return True
+ if first is None or second is None:
+ return False
+ if '*' not in first + second:
+ # no wildcards, and not exactly equal
+ return False
+ first_parts = first.split('.')
+ second_parts = second.split('.')
+ if len(first_parts) != len(second_parts):
+ return False
+ for i in range(len(first_parts)):
+ if (first_parts[i] != second_parts[i] and first_parts[i] != '*' and
+ second_parts[i] != '*'):
+ return False
+ return True
+
+
+def ip2int(ip):
+ """ convert a dotted-quad IP address into an integer
+ representation of the same """
+ return struct.unpack('>L', socket.inet_pton(socket.AF_INET, ip))[0]
+
+
+def ip_matches(ip, entry):
+ """ Return True if the given IP matches the IP or IP and netmask
+ in the given ACL entry; False otherwise """
+ if entry.get("netmask"):
+ try:
+ mask = int("1" * int(entry.get("netmask")) +
+ "0" * (32 - int(entry.get("netmask"))), 2)
+ except ValueError:
+ mask = ip2int(entry.get("netmask"))
+ return ip2int(ip) & mask == ip2int(entry.get("address")) & mask
+ elif entry.get("address") is None:
+ # no address, no netmask -- match all
+ return True
+ elif ip == entry.get("address"):
+ # just a plain ip address
+ return True
+ return False
+
+
+class IPACLFile(Bcfg2.Server.Plugin.XMLFileBacked):
+ """ representation of ACL ip.xml, for IP-based ACLs """
+ actions = dict(Allow=True,
+ Deny=False,
+ Defer=None)
+
+ def check_acl(self, address, rmi):
+ """ Check a client address against the ACL list """
+ if not len(self.entries):
+ # default defer if no ACLs are defined.
+ self.debug_log("ACL: %s requests %s: No IP ACLs, defer" %
+ (address, rmi))
+ return self.actions["Defer"]
+ for entry in self.entries:
+ if (ip_matches(address, entry) and
+ rmi_names_equal(entry.get("method"), rmi)):
+ self.debug_log("ACL: %s requests %s: Found matching IP ACL, "
+ "%s" % (address, rmi, entry.tag.lower()))
+ return self.actions[entry.tag]
+ if address == "127.0.0.1":
+ self.debug_log("ACL: %s requests %s: No matching IP ACLs, "
+ "localhost allowed" % (address, rmi))
+ return self.actions['Allow'] # default allow for localhost
+
+ self.debug_log("ACL: %s requests %s: No matching IP ACLs, defer" %
+ (address, rmi))
+ return self.actions["Defer"] # default defer for other machines
+
+
+class MetadataACLFile(Bcfg2.Server.Plugin.StructFile):
+ """ representation of ACL metadata.xml, for metadata-based ACLs """
+ def check_acl(self, metadata, rmi):
+ """ check client metadata against the ACL list """
+ if not len(self.entries):
+ # default allow if no ACLs are defined.
+ self.debug_log("ACL: %s requests %s: No metadata ACLs, allow" %
+ (metadata.hostname, rmi))
+ return True
+ for el in self.Match(metadata):
+ if rmi_names_equal(el.get("method"), rmi):
+ self.debug_log("ACL: %s requests %s: Found matching metadata "
+ "ACL, %s" % (metadata.hostname, rmi,
+ el.tag.lower()))
+ return el.tag == "Allow"
+ if metadata.hostname in ['localhost', 'localhost.localdomain']:
+ # default allow for localhost
+ self.debug_log("ACL: %s requests %s: No matching metadata ACLs, "
+ "localhost allowed" % (metadata.hostname, rmi))
+ return True
+ self.debug_log("ACL: %s requests %s: No matching metadata ACLs, deny" %
+ (metadata.hostname, rmi))
+ return False # default deny for other machines
+
+
+class ACL(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.ClientACLs):
+ """ allow connections to bcfg-server based on IP address """
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.ClientACLs.__init__(self)
+ self.ip_acls = IPACLFile(os.path.join(self.data, 'ip.xml'),
+ should_monitor=True)
+ self.metadata_acls = MetadataACLFile(os.path.join(self.data,
+ 'metadata.xml'),
+ should_monitor=True)
+
+ def check_acl_ip(self, address, rmi):
+ self.debug_log("ACL: %s requests %s: Checking IP ACLs" %
+ (address[0], rmi))
+ return self.ip_acls.check_acl(address[0], rmi)
+
+ def check_acl_metadata(self, metadata, rmi):
+ self.debug_log("ACL: %s requests %s: Checking metadata ACLs" %
+ (metadata.hostname, rmi))
+ return self.metadata_acls.check_acl(metadata, rmi)
+
+ def set_debug(self, debug):
+ rv = Bcfg2.Server.Plugin.Plugin.set_debug(self, debug)
+ self.ip_acls.set_debug(debug)
+ self.metadata_acls.set_debug(debug)
+ return rv
+ set_debug.__doc__ = Bcfg2.Server.Plugin.Plugin.set_debug.__doc__
diff --git a/src/lib/Bcfg2/Server/Plugins/Account.py b/src/lib/Bcfg2/Server/Plugins/Account.py
deleted file mode 100644
index fd49d3655..000000000
--- a/src/lib/Bcfg2/Server/Plugins/Account.py
+++ /dev/null
@@ -1,102 +0,0 @@
-"""This handles authentication setup."""
-
-import Bcfg2.Server.Plugin
-
-
-class Account(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Generator):
- """This module generates account config files,
- based on an internal data repo:
- static.(passwd|group|limits.conf) -> static entries
- dyn.(passwd|group) -> dynamic entries (usually acquired from yp or somesuch)
- useraccess -> users to be granted login access on some hosts
- superusers -> users to be granted root privs on all hosts
- rootlike -> users to be granted root privs on some hosts
-
- """
- name = 'Account'
- __author__ = 'bcfg-dev@mcs.anl.gov'
- deprecated = True
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Generator.__init__(self)
- self.Entries = {'ConfigFile': {'/etc/passwd': self.from_yp_cb,
- '/etc/group': self.from_yp_cb,
- '/etc/security/limits.conf': self.gen_limits_cb,
- '/root/.ssh/authorized_keys': self.gen_root_keys_cb,
- '/etc/sudoers': self.gen_sudoers}}
- try:
- self.repository = Bcfg2.Server.Plugin.DirectoryBacked(self.data,
- self.core.fam)
- except:
- self.logger.error("Failed to load repos: %s, %s" % \
- (self.data, "%s/ssh" % (self.data)))
- raise Bcfg2.Server.Plugin.PluginInitError
-
- def from_yp_cb(self, entry, metadata):
- """Build password file from cached yp data."""
- fname = entry.attrib['name'].split('/')[-1]
- entry.text = self.repository.entries["static.%s" % (fname)].data
- entry.text += self.repository.entries["dyn.%s" % (fname)].data
- perms = {'owner': 'root',
- 'group': 'root',
- 'mode': '0644'}
- [entry.attrib.__setitem__(key, value) for (key, value) in \
- list(perms.items())]
-
- def gen_limits_cb(self, entry, metadata):
- """Build limits entries based on current ACLs."""
- entry.text = self.repository.entries["static.limits.conf"].data
- superusers = self.repository.entries["superusers"].data.split()
- useraccess = [line.split(':') for line in \
- self.repository.entries["useraccess"].data.split()]
- users = [user for (user, host) in \
- useraccess if host == metadata.hostname.split('.')[0]]
- perms = {'owner': 'root',
- 'group': 'root',
- 'mode': '0600'}
- [entry.attrib.__setitem__(key, value) for (key, value) in \
- list(perms.items())]
- entry.text += "".join(["%s hard maxlogins 1024\n" % uname for uname in superusers + users])
- if "*" not in users:
- entry.text += "* hard maxlogins 0\n"
-
- def gen_root_keys_cb(self, entry, metadata):
- """Build root authorized keys file based on current ACLs."""
- superusers = self.repository.entries['superusers'].data.split()
- try:
- rootlike = [line.split(':', 1) for line in \
- self.repository.entries['rootlike'].data.split()]
- superusers += [user for (user, host) in rootlike \
- if host == metadata.hostname.split('.')[0]]
- except:
- pass
- rdata = self.repository.entries
- entry.text = "".join([rdata["%s.key" % user].data for user \
- in superusers if \
- ("%s.key" % user) in rdata])
- perms = {'owner': 'root',
- 'group': 'root',
- 'mode': '0600'}
- [entry.attrib.__setitem__(key, value) for (key, value) \
- in list(perms.items())]
-
- def gen_sudoers(self, entry, metadata):
- """Build root authorized keys file based on current ACLs."""
- superusers = self.repository.entries['superusers'].data.split()
- try:
- rootlike = [line.split(':', 1) for line in \
- self.repository.entries['rootlike'].data.split()]
- superusers += [user for (user, host) in rootlike \
- if host == metadata.hostname.split('.')[0]]
- except:
- pass
- entry.text = self.repository.entries['static.sudoers'].data
- entry.text += "".join(["%s ALL=(ALL) ALL\n" % uname \
- for uname in superusers])
- perms = {'owner': 'root',
- 'group': 'root',
- 'mode': '0440'}
- [entry.attrib.__setitem__(key, value) for (key, value) \
- in list(perms.items())]
diff --git a/src/lib/Bcfg2/Server/Plugins/Base.py b/src/lib/Bcfg2/Server/Plugins/Base.py
deleted file mode 100644
index a18204d60..000000000
--- a/src/lib/Bcfg2/Server/Plugins/Base.py
+++ /dev/null
@@ -1,33 +0,0 @@
-"""This module sets up a base list of configuration entries."""
-
-import copy
-import lxml.etree
-import Bcfg2.Server.Plugin
-from itertools import chain
-
-
-class Base(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Structure,
- Bcfg2.Server.Plugin.XMLDirectoryBacked):
- """This Structure is good for the pile of independent configs
- needed for most actual systems.
- """
- name = 'Base'
- __author__ = 'bcfg-dev@mcs.anl.gov'
- __child__ = Bcfg2.Server.Plugin.StructFile
- deprecated = True
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Structure.__init__(self)
- Bcfg2.Server.Plugin.XMLDirectoryBacked.__init__(self, self.data,
- self.core.fam)
-
- def BuildStructures(self, metadata):
- """Build structures for client described by metadata."""
- ret = lxml.etree.Element("Independent", version='2.0')
- fragments = list(chain(*[base.Match(metadata)
- for base in list(self.entries.values())]))
- for frag in fragments:
- ret.append(copy.copy(frag))
- return [ret]
diff --git a/src/lib/Bcfg2/Server/Plugins/Bundler.py b/src/lib/Bcfg2/Server/Plugins/Bundler.py
index eef176cca..2473a3ed2 100644
--- a/src/lib/Bcfg2/Server/Plugins/Bundler.py
+++ b/src/lib/Bcfg2/Server/Plugins/Bundler.py
@@ -1,80 +1,41 @@
"""This provides bundle clauses with translation functionality."""
-import copy
-import logging
-import lxml.etree
import os
-import os.path
import re
import sys
+import copy
import Bcfg2.Server
import Bcfg2.Server.Plugin
import Bcfg2.Server.Lint
-
-try:
- import genshi.template.base
- from Bcfg2.Server.Plugins.TGenshi import removecomment, TemplateFile
- HAS_GENSHI = True
-except ImportError:
- HAS_GENSHI = False
-
-
-SETUP = None
+from genshi.template import TemplateError
class BundleFile(Bcfg2.Server.Plugin.StructFile):
""" Representation of a bundle XML file """
- def get_xml_value(self, metadata):
- """ get the XML data that applies to the given client """
- bundlename = os.path.splitext(os.path.basename(self.name))[0]
- bundle = lxml.etree.Element('Bundle', name=bundlename)
- for item in self.Match(metadata):
- bundle.append(copy.copy(item))
- return bundle
-
-
-if HAS_GENSHI:
- class BundleTemplateFile(TemplateFile,
- Bcfg2.Server.Plugin.StructFile):
- """ Representation of a Genshi-templated bundle XML file """
-
- def __init__(self, name, specific, encoding):
- TemplateFile.__init__(self, name, specific, encoding)
- Bcfg2.Server.Plugin.StructFile.__init__(self, name)
- self.logger = logging.getLogger(name)
-
- def get_xml_value(self, metadata):
- """ get the rendered XML data that applies to the given
- client """
- if not hasattr(self, 'template'):
- msg = "No parsed template information for %s" % self.name
- self.logger.error(msg)
- raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
- stream = self.template.generate(
- metadata=metadata,
- repo=SETUP['repo']).filter(removecomment)
- data = lxml.etree.XML(stream.render('xml',
- strip_whitespace=False),
- parser=Bcfg2.Server.XMLParser)
- bundlename = os.path.splitext(os.path.basename(self.name))[0]
- bundle = lxml.etree.Element('Bundle', name=bundlename)
- for item in self.Match(metadata, data):
- bundle.append(copy.deepcopy(item))
- return bundle
-
- def Match(self, metadata, xdata): # pylint: disable=W0221
- """Return matching fragments of parsed template."""
- rv = []
- for child in xdata.getchildren():
- rv.extend(self._match(child, metadata))
- self.logger.debug("File %s got %d match(es)" % (self.name,
- len(rv)))
- return rv
-
- class SGenshiTemplateFile(BundleTemplateFile):
- """ provided for backwards compat with the deprecated SGenshi
- plugin """
- pass
+ bundle_name_re = re.compile(r'^(?P<name>.*)\.(xml|genshi)$')
+
+ def __init__(self, filename, should_monitor=False):
+ Bcfg2.Server.Plugin.StructFile.__init__(self, filename,
+ should_monitor=should_monitor)
+ if self.name.endswith(".genshi"):
+ self.logger.warning("Bundler: %s: Bundle filenames ending with "
+ ".genshi are deprecated; add the Genshi XML "
+ "namespace to a .xml bundle instead" %
+ self.name)
+ __init__.__doc__ = Bcfg2.Server.Plugin.StructFile.__init__.__doc__
+
+ def Index(self):
+ Bcfg2.Server.Plugin.StructFile.Index(self)
+ if self.xdata.get("name"):
+ self.logger.warning("Bundler: %s: Explicitly specifying bundle "
+ "names is deprecated" % self.name)
+ Index.__doc__ = Bcfg2.Server.Plugin.StructFile.Index.__doc__
+
+ @property
+ def bundle_name(self):
+ """ The name of the bundle, as determined from the filename """
+ return self.bundle_name_re.match(
+ os.path.basename(self.name)).group("name")
class Bundler(Bcfg2.Server.Plugin.Plugin,
@@ -83,64 +44,85 @@ class Bundler(Bcfg2.Server.Plugin.Plugin,
""" The bundler creates dependent clauses based on the
bundle/translation scheme from Bcfg1. """
__author__ = 'bcfg-dev@mcs.anl.gov'
- patterns = re.compile(r'^(?P<name>.*)\.(xml|genshi)$')
+ __child__ = BundleFile
def __init__(self, core, datastore):
Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
Bcfg2.Server.Plugin.Structure.__init__(self)
- self.encoding = core.setup['encoding']
- self.__child__ = self.template_dispatch
- Bcfg2.Server.Plugin.XMLDirectoryBacked.__init__(self, self.data,
- self.core.fam)
- global SETUP
- SETUP = core.setup
-
- def template_dispatch(self, name, _):
- """ Add the correct child entry type to Bundler depending on
- whether the XML file in question is a plain XML file or a
- templated bundle """
- bundle = lxml.etree.parse(name, parser=Bcfg2.Server.XMLParser)
- nsmap = bundle.getroot().nsmap
- if (name.endswith('.genshi') or
- ('py' in nsmap and
- nsmap['py'] == 'http://genshi.edgewall.org/')):
- if HAS_GENSHI:
- spec = Bcfg2.Server.Plugin.Specificity()
- return BundleTemplateFile(name, spec, self.encoding)
- else:
- raise Bcfg2.Server.Plugin.PluginExecutionError("Genshi not "
- "available: %s"
- % name)
- else:
- return BundleFile(name, self.fam)
+ Bcfg2.Server.Plugin.XMLDirectoryBacked.__init__(self, self.data)
+ #: Bundles by bundle name, rather than filename
+ self.bundles = dict()
+ __init__.__doc__ = Bcfg2.Server.Plugin.Plugin.__init__.__doc__
- def BuildStructures(self, metadata):
- """Build all structures for client (metadata)."""
- bundleset = []
+ def HandleEvent(self, event):
+ Bcfg2.Server.Plugin.XMLDirectoryBacked.HandleEvent(self, event)
- bundle_entries = {}
- for key, item in self.entries.items():
- bundle_entries.setdefault(
- self.patterns.match(os.path.basename(key)).group('name'),
- []).append(item)
+ self.bundles = dict([(b.bundle_name, b)
+ for b in self.entries.values()])
+ HandleEvent.__doc__ = \
+ Bcfg2.Server.Plugin.XMLDirectoryBacked.HandleEvent.__doc__
- for bundlename in metadata.bundles:
+ def BuildStructures(self, metadata):
+ bundleset = []
+ bundles = copy.copy(metadata.bundles)
+ bundles_added = set(bundles)
+ while bundles:
+ bundlename = bundles.pop()
try:
- entries = bundle_entries[bundlename]
+ bundle = self.bundles[bundlename]
except KeyError:
self.logger.error("Bundler: Bundle %s does not exist" %
bundlename)
continue
+
try:
- bundleset.append(entries[0].get_xml_value(metadata))
- except genshi.template.base.TemplateError:
+ data = bundle.XMLMatch(metadata)
+ except TemplateError:
err = sys.exc_info()[1]
self.logger.error("Bundler: Failed to render templated bundle "
"%s: %s" % (bundlename, err))
+ continue
except:
self.logger.error("Bundler: Unexpected bundler error for %s" %
bundlename, exc_info=1)
+ continue
+
+ if data.get("independent", "false").lower() == "true":
+ data.tag = "Independent"
+ del data.attrib['independent']
+
+ data.set("name", bundlename)
+
+ for child in data.findall("Bundle"):
+ if child.getchildren():
+ # XInclude'd bundle -- "flatten" it so there
+ # aren't extra Bundle tags, since other bits in
+ # Bcfg2 only handle the direct children of the
+ # top-level Bundle tag
+ if data.get("name"):
+ self.logger.warning("Bundler: In file XIncluded from "
+ "%s: Explicitly specifying "
+ "bundle names is deprecated" %
+ self.name)
+ for el in child.getchildren():
+ data.append(el)
+ data.remove(child)
+ elif child.get("name"):
+ # dependent bundle -- add it to the list of
+ # bundles for this client
+ if child.get("name") not in bundles_added:
+ bundles.append(child.get("name"))
+ bundles_added.add(child.get("name"))
+ data.remove(child)
+ else:
+ # neither name or children -- wat
+ self.logger.warning("Bundler: Useless empty Bundle tag "
+ "in %s" % self.name)
+ data.remove(child)
+ bundleset.append(data)
return bundleset
+ BuildStructures.__doc__ = \
+ Bcfg2.Server.Plugin.Structure.BuildStructures.__doc__
class BundlerLint(Bcfg2.Server.Lint.ServerPlugin):
@@ -150,15 +132,15 @@ class BundlerLint(Bcfg2.Server.Lint.ServerPlugin):
def Run(self):
self.missing_bundles()
for bundle in self.core.plugins['Bundler'].entries.values():
- if (self.HandlesFile(bundle.name) and
- (not HAS_GENSHI or
- not isinstance(bundle, BundleTemplateFile))):
+ if self.HandlesFile(bundle.name):
self.bundle_names(bundle)
@classmethod
def Errors(cls):
return {"bundle-not-found": "error",
- "inconsistent-bundle-name": "warning"}
+ "unused-bundle": "warning",
+ "explicit-bundle-name": "error",
+ "genshi-extension-bundle": "error"}
def missing_bundles(self):
""" Find bundles listed in Metadata but not implemented in
@@ -170,31 +152,28 @@ class BundlerLint(Bcfg2.Server.Lint.ServerPlugin):
ref_bundles = set([b.get("name")
for b in groupdata.findall("//Bundle")])
- allbundles = self.core.plugins['Bundler'].entries.keys()
+ allbundles = self.core.plugins['Bundler'].bundles.keys()
for bundle in ref_bundles:
- xmlbundle = "%s.xml" % bundle
- genshibundle = "%s.genshi" % bundle
- if (xmlbundle not in allbundles and
- genshibundle not in allbundles):
+ if bundle not in allbundles:
self.LintError("bundle-not-found",
"Bundle %s referenced, but does not exist" %
bundle)
+ for bundle in allbundles:
+ if bundle not in ref_bundles:
+ self.LintError("unused-bundle",
+ "Bundle %s defined, but is not referenced "
+ "in Metadata" % bundle)
+
def bundle_names(self, bundle):
- """ Verify bundle name attribute matches filename.
-
- :param bundle: The bundle to verify
- :type bundle: Bcfg2.Server.Plugins.Bundler.BundleFile
- """
- try:
- xdata = lxml.etree.XML(bundle.data)
- except AttributeError:
- # genshi template
- xdata = lxml.etree.parse(bundle.template.filepath).getroot()
-
- fname = os.path.splitext(os.path.basename(bundle.name))[0]
- bname = xdata.get('name')
- if fname != bname:
- self.LintError("inconsistent-bundle-name",
- "Inconsistent bundle name: filename is %s, "
- "bundle name is %s" % (fname, bname))
+ """ Verify that deprecated bundle .genshi bundles and explicit
+ bundle names aren't used """
+ if bundle.xdata.get('name'):
+ self.LintError("explicit-bundle-name",
+ "Deprecated explicit bundle name in %s" %
+ bundle.name)
+
+ if bundle.name.endswith(".genshi"):
+ self.LintError("genshi-extension-bundle",
+ "Bundle %s uses deprecated .genshi extension" %
+ bundle.name)
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgAuthorizedKeysGenerator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgAuthorizedKeysGenerator.py
index 824d01023..a859da0ba 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgAuthorizedKeysGenerator.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgAuthorizedKeysGenerator.py
@@ -4,7 +4,7 @@ access. """
import lxml.etree
from Bcfg2.Server.Plugin import StructFile, PluginExecutionError
-from Bcfg2.Server.Plugins.Cfg import CfgGenerator, SETUP, CFG
+from Bcfg2.Server.Plugins.Cfg import CfgGenerator, CFG
from Bcfg2.Server.Plugins.Metadata import ClientMetadata
@@ -20,10 +20,6 @@ class CfgAuthorizedKeysGenerator(CfgGenerator, StructFile):
#: Handle authorized keys XML files
__basenames__ = ['authorizedkeys.xml', 'authorized_keys.xml']
- #: This handler is experimental, in part because it depends upon
- #: the (experimental) CfgPrivateKeyCreator handler
- experimental = True
-
def __init__(self, fname):
CfgGenerator.__init__(self, fname, None, None)
StructFile.__init__(self, fname)
@@ -35,9 +31,9 @@ class CfgAuthorizedKeysGenerator(CfgGenerator, StructFile):
def category(self):
""" The name of the metadata category that generated keys are
specific to """
- if (SETUP.cfp.has_section("sshkeys") and
- SETUP.cfp.has_option("sshkeys", "category")):
- return SETUP.cfp.get("sshkeys", "category")
+ if (self.setup.cfp.has_section("sshkeys") and
+ self.setup.cfp.has_option("sshkeys", "category")):
+ return self.setup.cfp.get("sshkeys", "category")
return None
def handle_event(self, event):
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgCatFilter.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgCatFilter.py
deleted file mode 100644
index 49a5a85b3..000000000
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgCatFilter.py
+++ /dev/null
@@ -1,28 +0,0 @@
-""" Handle .cat files, which append lines to and remove lines from
-plaintext files """
-
-from Bcfg2.Server.Plugins.Cfg import CfgFilter
-
-
-class CfgCatFilter(CfgFilter):
- """ CfgCatFilter appends lines to and remove lines from plaintext
- :ref:`server-plugins-generators-Cfg` files"""
-
- #: Handle .cat files
- __extensions__ = ['cat']
-
- #: .cat files are deprecated
- deprecated = True
-
- def modify_data(self, entry, metadata, data):
- datalines = data.strip().split('\n')
- for line in self.data.split('\n'):
- if not line:
- continue
- if line.startswith('+'):
- datalines.append(line[1:])
- elif line.startswith('-'):
- if line[1:] in datalines:
- datalines.remove(line[1:])
- return "\n".join(datalines) + "\n"
- modify_data.__doc__ = CfgFilter.modify_data.__doc__
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgCheetahGenerator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgCheetahGenerator.py
index 724164cf5..4c8adceec 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgCheetahGenerator.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgCheetahGenerator.py
@@ -3,7 +3,7 @@
:ref:`server-plugins-generators-cfg` files. """
from Bcfg2.Server.Plugin import PluginExecutionError
-from Bcfg2.Server.Plugins.Cfg import CfgGenerator, SETUP
+from Bcfg2.Server.Plugins.Cfg import CfgGenerator
try:
from Cheetah.Template import Template
@@ -40,6 +40,6 @@ class CfgCheetahGenerator(CfgGenerator):
template.name = entry.get('realname', entry.get('name'))
template.path = entry.get('realname', entry.get('name'))
template.source_path = self.name
- template.repo = SETUP['repo']
+ template.repo = self.setup['repo']
return template.respond()
get_data.__doc__ = CfgGenerator.get_data.__doc__
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgDiffFilter.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgDiffFilter.py
deleted file mode 100644
index da506a195..000000000
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgDiffFilter.py
+++ /dev/null
@@ -1,35 +0,0 @@
-""" Handle .diff files, which apply diffs to plaintext files """
-
-import os
-import tempfile
-from Bcfg2.Server.Plugin import PluginExecutionError
-from subprocess import Popen, PIPE
-from Bcfg2.Server.Plugins.Cfg import CfgFilter
-
-
-class CfgDiffFilter(CfgFilter):
- """ CfgDiffFilter applies diffs to plaintext
- :ref:`server-plugins-generators-Cfg` files """
-
- #: Handle .diff files
- __extensions__ = ['diff']
-
- #: .diff files are deprecated
- deprecated = True
-
- def modify_data(self, entry, metadata, data):
- basehandle, basename = tempfile.mkstemp()
- open(basename, 'w').write(data)
- os.close(basehandle)
-
- cmd = ["patch", "-u", "-f", basename]
- patch = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
- stderr = patch.communicate(input=self.data)[1]
- ret = patch.wait()
- output = open(basename, 'r').read()
- os.unlink(basename)
- if ret != 0:
- raise PluginExecutionError("Error applying diff %s: %s" %
- (self.name, stderr))
- return output
- modify_data.__doc__ = CfgFilter.modify_data.__doc__
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenerator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenerator.py
index 3b4703ddb..516eba2f6 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenerator.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenerator.py
@@ -2,10 +2,9 @@
:ref:`server-plugins-generators-cfg` files on the server. """
from Bcfg2.Server.Plugin import PluginExecutionError
-from Bcfg2.Server.Plugins.Cfg import CfgGenerator, SETUP
+from Bcfg2.Server.Plugins.Cfg import CfgGenerator
try:
- from Bcfg2.Encryption import bruteforce_decrypt, EVPError, \
- get_algorithm
+ from Bcfg2.Server.Encryption import bruteforce_decrypt, EVPError
HAS_CRYPTO = True
except ImportError:
HAS_CRYPTO = False
@@ -34,8 +33,7 @@ class CfgEncryptedGenerator(CfgGenerator):
return
# todo: let the user specify a passphrase by name
try:
- self.data = bruteforce_decrypt(self.data, setup=SETUP,
- algorithm=get_algorithm(SETUP))
+ self.data = bruteforce_decrypt(self.data)
except EVPError:
raise PluginExecutionError("Failed to decrypt %s" % self.name)
handle_event.__doc__ = CfgGenerator.handle_event.__doc__
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenshiGenerator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenshiGenerator.py
index 130652aef..0521485e8 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenshiGenerator.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenshiGenerator.py
@@ -1,32 +1,24 @@
""" Handle encrypted Genshi templates (.crypt.genshi or .genshi.crypt
files) """
+from genshi.template import TemplateLoader
from Bcfg2.Compat import StringIO
from Bcfg2.Server.Plugin import PluginExecutionError
-from Bcfg2.Server.Plugins.Cfg import SETUP
from Bcfg2.Server.Plugins.Cfg.CfgGenshiGenerator import CfgGenshiGenerator
try:
- from Bcfg2.Encryption import bruteforce_decrypt, get_algorithm
+ from Bcfg2.Server.Encryption import bruteforce_decrypt
HAS_CRYPTO = True
except ImportError:
HAS_CRYPTO = False
-try:
- from genshi.template import TemplateLoader
-except ImportError:
- # CfgGenshiGenerator will raise errors if genshi doesn't exist
- TemplateLoader = object # pylint: disable=C0103
-
class EncryptedTemplateLoader(TemplateLoader):
""" Subclass :class:`genshi.template.TemplateLoader` to decrypt
the data on the fly as it's read in using
- :func:`Bcfg2.Encryption.bruteforce_decrypt` """
+ :func:`Bcfg2.Server.Encryption.bruteforce_decrypt` """
def _instantiate(self, cls, fileobj, filepath, filename, encoding=None):
- plaintext = \
- StringIO(bruteforce_decrypt(fileobj.read(),
- algorithm=get_algorithm(SETUP)))
+ plaintext = StringIO(bruteforce_decrypt(fileobj.read()))
return TemplateLoader._instantiate(self, cls, plaintext, filepath,
filename, encoding=encoding)
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgExternalCommandVerifier.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgExternalCommandVerifier.py
index 313e53ee9..d06b864ac 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgExternalCommandVerifier.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgExternalCommandVerifier.py
@@ -3,8 +3,8 @@
import os
import sys
import shlex
+from Bcfg2.Utils import Executor
from Bcfg2.Server.Plugin import PluginExecutionError
-from subprocess import Popen, PIPE
from Bcfg2.Server.Plugins.Cfg import CfgVerifier, CfgVerificationError
@@ -18,24 +18,16 @@ class CfgExternalCommandVerifier(CfgVerifier):
def __init__(self, name, specific, encoding):
CfgVerifier.__init__(self, name, specific, encoding)
self.cmd = []
+ self.exc = Executor(timeout=30)
__init__.__doc__ = CfgVerifier.__init__.__doc__
def verify_entry(self, entry, metadata, data):
try:
- proc = Popen(self.cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
- out, err = proc.communicate(input=data)
- rv = proc.wait()
- if rv != 0:
- # pylint: disable=E1103
- raise CfgVerificationError(err.strip() or out.strip() or
- "Non-zero return value %s" % rv)
- # pylint: enable=E1103
- except CfgVerificationError:
- raise
- except:
- err = sys.exc_info()[1]
- raise CfgVerificationError("Error running external command "
- "verifier: %s" % err)
+ result = self.exc.run(self.cmd, inputdata=data)
+ if not result.success:
+ raise CfgVerificationError(result.error)
+ except OSError:
+ raise CfgVerificationError(sys.exc_info()[1])
verify_entry.__doc__ = CfgVerifier.verify_entry.__doc__
def handle_event(self, event):
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgGenshiGenerator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgGenshiGenerator.py
index 83a5c1165..e056c871a 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgGenshiGenerator.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgGenshiGenerator.py
@@ -5,63 +5,41 @@
import re
import sys
import traceback
-from Bcfg2.Server.Plugin import PluginExecutionError
-from Bcfg2.Server.Plugins.Cfg import CfgGenerator, SETUP
-
-try:
- import genshi.core
- from genshi.template import TemplateLoader, NewTextTemplate
- from genshi.template.eval import UndefinedError, Suite
- #: True if Genshi libraries are available
- HAS_GENSHI = True
-
- def _genshi_removes_blank_lines():
- """ Genshi 0.5 uses the Python :mod:`compiler` package to
- compile genshi snippets to AST. Genshi 0.6 uses some bespoke
- magic, because compiler has been deprecated.
- :func:`compiler.parse` produces an AST that removes all excess
- whitespace (e.g., blank lines), while
- :func:`genshi.template.astutil.parse` does not. In order to
- determine which actual line of code an error occurs on, we
- need to know which is in use and how it treats blank lines.
- I've beat my head against this for hours and the best/only way
- I can find is to compile some genshi code with an error and
- see which line it's on."""
- code = """d = dict()
-
+from Bcfg2.Server.Plugin import PluginExecutionError, removecomment
+from Bcfg2.Server.Plugins.Cfg import CfgGenerator
+
+from genshi.template import TemplateLoader, NewTextTemplate
+from genshi.template.eval import UndefinedError, Suite
+
+
+def _genshi_removes_blank_lines():
+ """ Genshi 0.5 uses the Python :mod:`compiler` package to
+ compile genshi snippets to AST. Genshi 0.6 uses some bespoke
+ magic, because compiler has been deprecated.
+ :func:`compiler.parse` produces an AST that removes all excess
+ whitespace (e.g., blank lines), while
+ :func:`genshi.template.astutil.parse` does not. In order to
+ determine which actual line of code an error occurs on, we
+ need to know which is in use and how it treats blank lines.
+ I've beat my head against this for hours and the best/only way
+ I can find is to compile some genshi code with an error and
+ see which line it's on."""
+ code = """d = dict()
d['a']"""
- try:
- Suite(code).execute(dict())
- except KeyError:
- line = traceback.extract_tb(sys.exc_info()[2])[-1][1]
- if line == 2:
- return True
- else:
- return False
-
- #: True if Genshi removes all blank lines from a code block before
- #: executing it; False indicates that Genshi only removes leading
- #: and trailing blank lines. See
- #: :func:`_genshi_removes_blank_lines` for an explanation of this.
- GENSHI_REMOVES_BLANK_LINES = _genshi_removes_blank_lines()
-except ImportError:
- TemplateLoader = None # pylint: disable=C0103
- HAS_GENSHI = False
-
-
-def removecomment(stream):
- """ A Genshi filter that removes comments from the stream. This
- function is a generator.
-
- :param stream: The Genshi stream to remove comments from
- :type stream: genshi.core.Stream
- :returns: tuple of ``(kind, data, pos)``, as when iterating
- through a Genshi stream
- """
- for kind, data, pos in stream:
- if kind is genshi.core.COMMENT:
- continue
- yield kind, data, pos
+ try:
+ Suite(code).execute(dict())
+ except KeyError:
+ line = traceback.extract_tb(sys.exc_info()[2])[-1][1]
+ if line == 2:
+ return True
+ else:
+ return False
+
+#: True if Genshi removes all blank lines from a code block before
+#: executing it; False indicates that Genshi only removes leading
+#: and trailing blank lines. See
+#: :func:`_genshi_removes_blank_lines` for an explanation of this.
+GENSHI_REMOVES_BLANK_LINES = _genshi_removes_blank_lines()
class CfgGenshiGenerator(CfgGenerator):
@@ -94,8 +72,6 @@ class CfgGenshiGenerator(CfgGenerator):
def __init__(self, fname, spec, encoding):
CfgGenerator.__init__(self, fname, spec, encoding)
- if not HAS_GENSHI:
- raise PluginExecutionError("Genshi is not available")
self.template = None
self.loader = self.__loader_cls__(max_cache_size=0)
__init__.__doc__ = CfgGenerator.__init__.__doc__
@@ -106,12 +82,12 @@ class CfgGenshiGenerator(CfgGenerator):
self.name)
fname = entry.get('realname', entry.get('name'))
- stream = \
- self.template.generate(name=fname,
- metadata=metadata,
- path=self.name,
- source_path=self.name,
- repo=SETUP['repo']).filter(removecomment)
+ stream = self.template.generate(
+ name=fname,
+ metadata=metadata,
+ path=self.name,
+ source_path=self.name,
+ repo=self.setup['repo']).filter(removecomment)
try:
try:
return stream.render('text', encoding=self.encoding,
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgInfoXML.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgInfoXML.py
index 3b6fc8fa0..886b3993b 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgInfoXML.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgInfoXML.py
@@ -1,6 +1,6 @@
""" Handle info.xml files """
-from Bcfg2.Server.Plugin import PluginExecutionError, InfoXML
+from Bcfg2.Server.Plugin import InfoXML
from Bcfg2.Server.Plugins.Cfg import CfgInfo
@@ -17,21 +17,9 @@ class CfgInfoXML(CfgInfo):
__init__.__doc__ = CfgInfo.__init__.__doc__
def bind_info_to_entry(self, entry, metadata):
- mdata = dict()
- self.infoxml.pnode.Match(metadata, mdata, entry=entry)
- if 'Info' not in mdata:
- raise PluginExecutionError("Failed to set metadata for file %s" %
- entry.get('name'))
- self._set_info(entry, mdata['Info'][None])
+ self.infoxml.BindEntry(entry, metadata)
bind_info_to_entry.__doc__ = CfgInfo.bind_info_to_entry.__doc__
def handle_event(self, event):
self.infoxml.HandleEvent()
handle_event.__doc__ = CfgInfo.handle_event.__doc__
-
- def _set_info(self, entry, info):
- CfgInfo._set_info(self, entry, info)
- if '__children__' in info:
- for child in info['__children__']:
- entry.append(child)
- _set_info.__doc__ = CfgInfo._set_info.__doc__
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgLegacyInfo.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgLegacyInfo.py
deleted file mode 100644
index 5122d9aa1..000000000
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgLegacyInfo.py
+++ /dev/null
@@ -1,46 +0,0 @@
-""" Handle info and :info files """
-
-import Bcfg2.Server.Plugin
-from Bcfg2.Server.Plugins.Cfg import CfgInfo
-
-
-class CfgLegacyInfo(CfgInfo):
- """ CfgLegacyInfo handles :file:`info` and :file:`:info` files for
- :ref:`server-plugins-generators-cfg` """
-
- #: Handle :file:`info` and :file:`:info`
- __basenames__ = ['info', ':info']
-
- #: CfgLegacyInfo is deprecated. Use
- #: :class:`Bcfg2.Server.Plugins.Cfg.CfgInfoXML.CfgInfoXML` instead.
- deprecated = True
-
- def __init__(self, path):
- CfgInfo.__init__(self, path)
- self.path = path
-
- #: The set of info metadata stored in the file
- self.metadata = None
- __init__.__doc__ = CfgInfo.__init__.__doc__
-
- def bind_info_to_entry(self, entry, metadata):
- self._set_info(entry, self.metadata)
- bind_info_to_entry.__doc__ = CfgInfo.bind_info_to_entry.__doc__
-
- def handle_event(self, event):
- if event.code2str() == 'deleted':
- return
- self.metadata = dict()
- for line in open(self.path).readlines():
- match = Bcfg2.Server.Plugin.INFO_REGEX.match(line)
- if not match:
- self.logger.warning("Failed to parse line in %s: %s" %
- (event.filename, line))
- continue
- else:
- for key, value in list(match.groupdict().items()):
- if value:
- self.metadata[key] = value
- if ('mode' in self.metadata and len(self.metadata['mode']) == 3):
- self.metadata['mode'] = "0%s" % self.metadata['mode']
- handle_event.__doc__ = CfgInfo.handle_event.__doc__
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgPrivateKeyCreator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgPrivateKeyCreator.py
index c7b62f352..862726788 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgPrivateKeyCreator.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgPrivateKeyCreator.py
@@ -3,12 +3,13 @@
import os
import shutil
import tempfile
-import subprocess
-from Bcfg2.Server.Plugin import PluginExecutionError, StructFile
-from Bcfg2.Server.Plugins.Cfg import CfgCreator, CfgCreationError, SETUP
+from Bcfg2.Utils import Executor
+from Bcfg2.Options import get_option_parser
+from Bcfg2.Server.Plugin import StructFile
+from Bcfg2.Server.Plugins.Cfg import CfgCreator, CfgCreationError
from Bcfg2.Server.Plugins.Cfg.CfgPublicKeyCreator import CfgPublicKeyCreator
try:
- import Bcfg2.Encryption
+ import Bcfg2.Server.Encryption
HAS_CRYPTO = True
except ImportError:
HAS_CRYPTO = False
@@ -31,25 +32,27 @@ class CfgPrivateKeyCreator(CfgCreator, StructFile):
pubkey_path = os.path.dirname(self.name) + ".pub"
pubkey_name = os.path.join(pubkey_path, os.path.basename(pubkey_path))
self.pubkey_creator = CfgPublicKeyCreator(pubkey_name)
+ self.setup = get_option_parser()
+ self.cmd = Executor()
__init__.__doc__ = CfgCreator.__init__.__doc__
@property
def category(self):
""" The name of the metadata category that generated keys are
specific to """
- if (SETUP.cfp.has_section("sshkeys") and
- SETUP.cfp.has_option("sshkeys", "category")):
- return SETUP.cfp.get("sshkeys", "category")
+ if (self.setup.cfp.has_section("sshkeys") and
+ self.setup.cfp.has_option("sshkeys", "category")):
+ return self.setup.cfp.get("sshkeys", "category")
return None
@property
def passphrase(self):
""" The passphrase used to encrypt private keys """
if (HAS_CRYPTO and
- SETUP.cfp.has_section("sshkeys") and
- SETUP.cfp.has_option("sshkeys", "passphrase")):
- return Bcfg2.Encryption.get_passphrases(SETUP)[
- SETUP.cfp.get("sshkeys", "passphrase")]
+ self.setup.cfp.has_section("sshkeys") and
+ self.setup.cfp.has_option("sshkeys", "passphrase")):
+ return Bcfg2.Server.Encryption.get_passphrases()[
+ self.setup.cfp.get("sshkeys", "passphrase")]
return None
def handle_event(self, event):
@@ -102,18 +105,17 @@ class CfgPrivateKeyCreator(CfgCreator, StructFile):
log_cmd.append("''")
self.debug_log("Cfg: Generating new SSH key pair: %s" %
" ".join(log_cmd))
- proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- err = proc.communicate()[1]
- if proc.wait():
+ result = self.cmd.run(cmd)
+ if not result.success:
raise CfgCreationError("Cfg: Failed to generate SSH key pair "
"at %s for %s: %s" %
- (filename, metadata.hostname, err))
- elif err:
+ (filename, metadata.hostname,
+ result.error))
+ elif result.stderr:
self.logger.warning("Cfg: Generated SSH key pair at %s for %s "
"with errors: %s" % (filename,
metadata.hostname,
- err))
+ result.stderr))
return filename
except:
shutil.rmtree(tempdir)
@@ -194,10 +196,8 @@ class CfgPrivateKeyCreator(CfgCreator, StructFile):
privkey = open(filename).read()
if HAS_CRYPTO and self.passphrase:
self.debug_log("Cfg: Encrypting key data at %s" % filename)
- privkey = Bcfg2.Encryption.ssl_encrypt(
- privkey,
- self.passphrase,
- algorithm=Bcfg2.Encryption.get_algorithm(SETUP))
+ privkey = Bcfg2.Server.Encryption.ssl_encrypt(privkey,
+ self.passphrase)
specificity['ext'] = '.crypt'
self.write_data(privkey, **specificity)
@@ -209,50 +209,3 @@ class CfgPrivateKeyCreator(CfgCreator, StructFile):
finally:
shutil.rmtree(os.path.dirname(filename))
# pylint: enable=W0221
-
- def Index(self):
- StructFile.Index(self)
- if HAS_CRYPTO:
- strict = self.xdata.get(
- "decrypt",
- SETUP.cfp.get(Bcfg2.Encryption.CFG_SECTION, "decrypt",
- default="strict")) == "strict"
- for el in self.xdata.xpath("//*[@encrypted]"):
- try:
- el.text = self._decrypt(el).encode('ascii',
- 'xmlcharrefreplace')
- except UnicodeDecodeError:
- self.logger.info("Cfg: Decrypted %s to gibberish, skipping"
- % el.tag)
- except Bcfg2.Encryption.EVPError:
- msg = "Cfg: Failed to decrypt %s element in %s" % \
- (el.tag, self.name)
- if strict:
- raise PluginExecutionError(msg)
- else:
- self.logger.warning(msg)
- Index.__doc__ = StructFile.Index.__doc__
-
- def _decrypt(self, element):
- """ Decrypt a single encrypted element """
- if not element.text or not element.text.strip():
- return
- passes = Bcfg2.Encryption.get_passphrases(SETUP)
- try:
- passphrase = passes[element.get("encrypted")]
- try:
- return Bcfg2.Encryption.ssl_decrypt(
- element.text,
- passphrase,
- algorithm=Bcfg2.Encryption.get_algorithm(SETUP))
- except Bcfg2.Encryption.EVPError:
- # error is raised below
- pass
- except KeyError:
- # bruteforce_decrypt raises an EVPError with a sensible
- # error message, so we just let it propagate up the stack
- return Bcfg2.Encryption.bruteforce_decrypt(
- element.text,
- passphrases=passes.values(),
- algorithm=Bcfg2.Encryption.get_algorithm(SETUP))
- raise Bcfg2.Encryption.EVPError("Failed to decrypt")
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgPublicKeyCreator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgPublicKeyCreator.py
index 6be438462..4c61e338e 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgPublicKeyCreator.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgPublicKeyCreator.py
@@ -23,6 +23,9 @@ class CfgPublicKeyCreator(CfgCreator, StructFile):
#: Handle XML specifications of private keys
__basenames__ = ['pubkey.xml']
+ #: No text content on any tags, so encryption support disabled
+ encryption = False
+
def __init__(self, fname):
CfgCreator.__init__(self, fname)
StructFile.__init__(self, fname)
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py b/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py
index c6ac9d8dc..7af69ec81 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py
@@ -16,17 +16,6 @@ from Bcfg2.Compat import u_str, unicode, b64encode, walk_packages, \
any, oct_mode
# pylint: enable=W0622
-#: SETUP contains a reference to the
-#: :class:`Bcfg2.Options.OptionParser` created by the Bcfg2 core for
-#: parsing command-line and config file options.
-#: :class:`Bcfg2.Server.Plugins.Cfg.Cfg` stores it in a module global
-#: so that the handler objects can access it, because there is no other
-#: facility for passing a setup object from a
-#: :class:`Bcfg2.Server.Plugin.helpers.GroupSpool` to its
-#: :class:`Bcfg2.Server.Plugin.helpers.EntrySet` objects and thence to
-#: the EntrySet children.
-SETUP = None
-
#: CFG is a reference to the :class:`Bcfg2.Server.Plugins.Cfg.Cfg`
#: plugin object created by the Bcfg2 core. This is provided so that
#: the handler objects can access it as necessary, since the existing
@@ -86,6 +75,7 @@ class CfgBaseFileMatcher(Bcfg2.Server.Plugin.SpecificData,
encoding)
Bcfg2.Server.Plugin.Debuggable.__init__(self)
self.encoding = encoding
+ self.setup = Bcfg2.Options.get_option_parser()
__init__.__doc__ = Bcfg2.Server.Plugin.SpecificData.__init__.__doc__ + \
"""
.. -----
@@ -228,10 +218,7 @@ class CfgFilter(CfgBaseFileMatcher):
class CfgInfo(CfgBaseFileMatcher):
""" CfgInfo handlers provide metadata (owner, group, paranoid,
- etc.) for a file entry.
-
- .. private-include: _set_info
- """
+ etc.) for a file entry. """
#: Whether or not the files handled by this handler are permitted
#: to have specificity indicators in their filenames -- e.g.,
@@ -261,20 +248,6 @@ class CfgInfo(CfgBaseFileMatcher):
"""
raise NotImplementedError
- def _set_info(self, entry, info):
- """ Helper function to assign a dict of info attributes to an
- entry object. ``entry`` is modified in-place.
-
- :param entry: The abstract entry to bind the info to
- :type entry: lxml.etree._Element
- :param info: A dict of attribute: value pairs
- :type info: dict
- :returns: None
- """
- for key, value in list(info.items()):
- if not key.startswith("__"):
- entry.attrib[key] = value
-
class CfgVerifier(CfgBaseFileMatcher):
""" CfgVerifier handlers validate entry data once it has been
@@ -317,9 +290,6 @@ class CfgCreator(CfgBaseFileMatcher):
#: file, and are thus not specific
__specific__ = False
- #: The CfgCreator interface is experimental at this time
- experimental = True
-
def __init__(self, fname):
"""
:param name: The full path to the file
@@ -432,22 +402,15 @@ class CfgDefaultInfo(CfgInfo):
""" :class:`Bcfg2.Server.Plugins.Cfg.Cfg` handler that supplies a
default set of file metadata """
- def __init__(self, defaults):
+ def __init__(self):
CfgInfo.__init__(self, '')
- self.defaults = defaults
__init__.__doc__ = CfgInfo.__init__.__doc__.split(".. -----")[0]
- def bind_info_to_entry(self, entry, metadata):
- self._set_info(entry, self.defaults)
+ def bind_info_to_entry(self, entry, _):
+ for key, value in Bcfg2.Server.Plugin.default_path_metadata().items():
+ entry.attrib[key] = value
bind_info_to_entry.__doc__ = CfgInfo.bind_info_to_entry.__doc__
-#: A :class:`CfgDefaultInfo` object instantiated with
-#: :attr:`Bcfg2.Server.Plugin.helper.DEFAULT_FILE_METADATA` as its
-#: default metadata. This is used to set a default file metadata set
-#: on an entry before a "real" :class:`CfgInfo` handler applies its
-#: metadata to the entry.
-DEFAULT_INFO = CfgDefaultInfo(Bcfg2.Server.Plugin.DEFAULT_FILE_METADATA)
-
class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet,
Bcfg2.Server.Plugin.Debuggable):
@@ -460,6 +423,7 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet,
Bcfg2.Server.Plugin.Debuggable.__init__(self)
self.specific = None
self._handlers = None
+ self.setup = Bcfg2.Options.get_option_parser()
__init__.__doc__ = Bcfg2.Server.Plugin.EntrySet.__doc__
def set_debug(self, debug):
@@ -587,7 +551,7 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet,
for fltr in self.get_handlers(metadata, CfgFilter):
data = fltr.modify_data(entry, metadata, data)
- if SETUP['validate']:
+ if self.setup['validate']:
try:
self._validate_data(entry, metadata, data)
except CfgVerificationError:
@@ -658,7 +622,7 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet,
:returns: None
"""
info_handlers = self.get_handlers(metadata, CfgInfo)
- DEFAULT_INFO.bind_info_to_entry(entry, metadata)
+ CfgDefaultInfo().bind_info_to_entry(entry, metadata)
if len(info_handlers) > 1:
self.logger.error("More than one info supplier found for %s: %s" %
(entry.get("name"), info_handlers))
@@ -705,13 +669,6 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet,
# raises an appropriate exception
return self._create_data(entry, metadata)
- if entry.get('mode').lower() == 'inherit':
- # use on-disk permissions
- self.logger.warning("Cfg: %s: Use of mode='inherit' is deprecated"
- % entry.get("name"))
- fname = os.path.join(self.path, generator.name)
- entry.set('mode',
- oct_mode(stat.S_IMODE(os.stat(fname).st_mode)))
try:
return generator.get_data(entry, metadata)
except:
@@ -800,13 +757,6 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet,
badattr = [attr for attr in ['owner', 'group', 'mode']
if attr in new_entry]
if badattr:
- # check for info files and inform user of their removal
- for ifile in ['info', ':info']:
- info = os.path.join(self.path, ifile)
- if os.path.exists(info):
- self.logger.info("Removing %s and replacing with info.xml"
- % info)
- os.remove(info)
metadata_updates = {}
metadata_updates.update(self.metadata)
for attr in badattr:
@@ -836,16 +786,16 @@ class Cfg(Bcfg2.Server.Plugin.GroupSpool,
es_child_cls = Bcfg2.Server.Plugin.SpecificData
def __init__(self, core, datastore):
- global SETUP, CFG # pylint: disable=W0603
+ global CFG # pylint: disable=W0603
Bcfg2.Server.Plugin.GroupSpool.__init__(self, core, datastore)
Bcfg2.Server.Plugin.PullTarget.__init__(self)
CFG = self
- SETUP = core.setup
- if 'validate' not in SETUP:
- SETUP.add_option('validate', Bcfg2.Options.CFG_VALIDATION)
- SETUP.reparse()
+ setup = Bcfg2.Options.get_option_parser()
+ if 'validate' not in setup:
+ setup.add_option('validate', Bcfg2.Options.CFG_VALIDATION)
+ setup.reparse()
__init__.__doc__ = Bcfg2.Server.Plugin.GroupSpool.__init__.__doc__
def has_generator(self, entry, metadata):
@@ -886,26 +836,11 @@ class CfgLint(Bcfg2.Server.Lint.ServerPlugin):
def Run(self):
for basename, entry in list(self.core.plugins['Cfg'].entries.items()):
- self.check_delta(basename, entry)
self.check_pubkey(basename, entry)
@classmethod
def Errors(cls):
- return {"cat-file-used": "warning",
- "diff-file-used": "warning",
- "no-pubkey-xml": "warning"}
-
- def check_delta(self, basename, entry):
- """ check that no .cat or .diff files are in use """
- for fname, handler in entry.entries.items():
- path = handler.name
- if self.HandlesFile(path) and isinstance(handler, CfgFilter):
- extension = fname.split(".")[-1]
- if extension in ["cat", "diff"]:
- self.LintError("%s-file-used" % extension,
- "%s file used on %s: %s" % (extension,
- basename,
- fname))
+ return {"no-pubkey-xml": "warning"}
def check_pubkey(self, basename, entry):
""" check that privkey.xml files have corresponding pubkey.xml
diff --git a/src/lib/Bcfg2/Server/Plugins/Cvs.py b/src/lib/Bcfg2/Server/Plugins/Cvs.py
index 22cacaa76..0054a8a37 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cvs.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cvs.py
@@ -1,7 +1,7 @@
""" The Cvs plugin provides a revision interface for Bcfg2 repos using
cvs. """
-from subprocess import Popen, PIPE
+from Bcfg2.Utils import Executor
import Bcfg2.Server.Plugin
@@ -13,20 +13,17 @@ class Cvs(Bcfg2.Server.Plugin.Version):
def __init__(self, core, datastore):
Bcfg2.Server.Plugin.Version.__init__(self, core, datastore)
+ self.cmd = Executor()
self.logger.debug("Initialized cvs plugin with CVS directory %s" %
self.vcs_path)
def get_revision(self):
"""Read cvs revision information for the Bcfg2 repository."""
+ result = self.cmd.run(["env LC_ALL=C", "cvs", "log"],
+ shell=True, cwd=self.vcs_root)
try:
- data = Popen("env LC_ALL=C cvs log",
- shell=True,
- cwd=self.vcs_root,
- stdout=PIPE).stdout.readlines()
- return data[3].strip('\n')
- except IndexError:
- msg = "Failed to read CVS log"
+ return result.stdout.splitlines()[0].strip()
+ except (IndexError, AttributeError):
+ msg = "Failed to read revision from CVS: %s" % result.error
self.logger.error(msg)
- self.logger.error('Ran command "cvs log" from directory %s' %
- self.vcs_root)
raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
diff --git a/src/lib/Bcfg2/Server/Plugins/Darcs.py b/src/lib/Bcfg2/Server/Plugins/Darcs.py
index b4abafb0e..2c6dde393 100644
--- a/src/lib/Bcfg2/Server/Plugins/Darcs.py
+++ b/src/lib/Bcfg2/Server/Plugins/Darcs.py
@@ -1,7 +1,7 @@
""" Darcs is a version plugin for dealing with Bcfg2 repos stored in the
Darcs VCS. """
-from subprocess import Popen, PIPE
+from Bcfg2.Utils import Executor
import Bcfg2.Server.Plugin
@@ -13,21 +13,17 @@ class Darcs(Bcfg2.Server.Plugin.Version):
def __init__(self, core, datastore):
Bcfg2.Server.Plugin.Version.__init__(self, core, datastore)
+ self.cmd = Executor()
self.logger.debug("Initialized Darcs plugin with darcs directory %s" %
self.vcs_path)
def get_revision(self):
"""Read Darcs changeset information for the Bcfg2 repository."""
- try:
- data = Popen("env LC_ALL=C darcs changes",
- shell=True,
- cwd=self.vcs_root,
- stdout=PIPE).stdout.readlines()
- revision = data[0].strip('\n')
- except:
- msg = "Failed to read darcs repository"
+ result = self.cmd.run(["env LC_ALL=C", "darcs", "changes"],
+ shell=True, cwd=self.vcs_root)
+ if result.success:
+ return result.stdout.splitlines()[0].strip()
+ else:
+ msg = "Failed to read revision from darcs: %s" % result.error
self.logger.error(msg)
- self.logger.error('Ran command "darcs changes" from directory %s' %
- self.vcs_root)
raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
- return revision
diff --git a/src/lib/Bcfg2/Server/Plugins/Decisions.py b/src/lib/Bcfg2/Server/Plugins/Decisions.py
index 66f299bc9..a67a356d4 100644
--- a/src/lib/Bcfg2/Server/Plugins/Decisions.py
+++ b/src/lib/Bcfg2/Server/Plugins/Decisions.py
@@ -2,57 +2,33 @@
blacklist certain entries. """
import os
-import lxml.etree
import Bcfg2.Server.Plugin
+import Bcfg2.Server.FileMonitor
-class DecisionFile(Bcfg2.Server.Plugin.SpecificData):
+class DecisionFile(Bcfg2.Server.Plugin.StructFile):
""" Representation of a Decisions XML file """
- def __init__(self, name, specific, encoding):
- Bcfg2.Server.Plugin.SpecificData.__init__(self, name, specific,
- encoding)
- self.contents = None
-
- def handle_event(self, event):
- Bcfg2.Server.Plugin.SpecificData.handle_event(self, event)
- self.contents = lxml.etree.XML(self.data)
-
- def get_decisions(self):
+ def get_decisions(self, metadata):
""" Get a list of whitelist or blacklist tuples """
+ if self.xdata is None:
+ # no white/blacklist has been read yet, probably because
+ # it doesn't exist
+ return []
return [(x.get('type'), x.get('name'))
- for x in self.contents.xpath('.//Decision')]
+ for x in self.XMLMatch(metadata).xpath('.//Decision')]
-class Decisions(Bcfg2.Server.Plugin.EntrySet,
- Bcfg2.Server.Plugin.Plugin,
+class Decisions(Bcfg2.Server.Plugin.Plugin,
Bcfg2.Server.Plugin.Decision):
- """ Decisions plugin
-
- Arguments:
- - `core`: Bcfg2.Core instance
- - `datastore`: File repository location
- """
- basename_is_regex = True
+ """ Decisions plugin """
__author__ = 'bcfg-dev@mcs.anl.gov'
def __init__(self, core, datastore):
Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
Bcfg2.Server.Plugin.Decision.__init__(self)
- Bcfg2.Server.Plugin.EntrySet.__init__(self, '(white|black)list',
- self.data, DecisionFile,
- core.setup['encoding'])
- core.fam.AddMonitor(self.data, self)
-
- def HandleEvent(self, event):
- """ Handle events on Decision files by passing them off to
- EntrySet.handle_event """
- if event.filename != self.path:
- return self.handle_event(event)
+ self.whitelist = DecisionFile(os.path.join(self.data, "whitelist.xml"))
+ self.blacklist = DecisionFile(os.path.join(self.data, "blacklist.xml"))
def GetDecisions(self, metadata, mode):
- ret = []
- for cdt in self.get_matching(metadata):
- if os.path.basename(cdt.name).startswith(mode):
- ret.extend(cdt.get_decisions())
- return ret
+ return getattr(self, mode).get_decision(metadata)
diff --git a/src/lib/Bcfg2/Server/Plugins/Deps.py b/src/lib/Bcfg2/Server/Plugins/Deps.py
index d3a1ee871..312b03bae 100644
--- a/src/lib/Bcfg2/Server/Plugins/Deps.py
+++ b/src/lib/Bcfg2/Server/Plugins/Deps.py
@@ -1,35 +1,12 @@
"""This plugin provides automatic dependency handling."""
import lxml.etree
-
import Bcfg2.Server.Plugin
-
-
-class DNode(Bcfg2.Server.Plugin.INode):
- """DNode provides supports for single predicate types for dependencies."""
- def _load_children(self, data, idict):
- for item in data.getchildren():
- if item.tag in self.containers:
- self.children.append(self.__class__(item, idict, self))
- else:
- data = [(child.tag, child.get('name'))
- for child in item.getchildren()]
- try:
- self.contents[item.tag][item.get('name')] = data
- except KeyError:
- self.contents[item.tag] = {item.get('name'): data}
-
-
-class DepXMLSrc(Bcfg2.Server.Plugin.XMLSrc):
- __node__ = DNode
+from Bcfg2.Server.Plugin import PluginExecutionError
class Deps(Bcfg2.Server.Plugin.PrioDir,
Bcfg2.Server.Plugin.StructureValidator):
- name = 'Deps'
- __author__ = 'bcfg-dev@mcs.anl.gov'
- __child__ = DepXMLSrc
-
# Override the default sort_order (of 500) so that this plugin
# gets handled after others running at the default. In particular,
# we want to run after Packages, so we can see the final set of
@@ -55,63 +32,58 @@ class Deps(Bcfg2.Server.Plugin.PrioDir,
tag = entry.tag
if tag.startswith('Bound'):
tag = tag[5:]
- if (tag, entry.get('name')) not in entries \
- and not isinstance(entry, lxml.etree._Comment):
+ if ((tag, entry.get('name')) not in entries
+ and not isinstance(entry, lxml.etree._Comment)):
entries.append((tag, entry.get('name')))
entries.sort()
entries = tuple(entries)
- gdata = list(metadata.groups)
- gdata.sort()
- gdata = tuple(gdata)
+ groups = list(metadata.groups)
+ groups.sort()
+ groups = tuple(groups)
# Check to see if we have cached the prereqs already
- if (entries, gdata) in self.cache:
- prereqs = self.cache[(entries, gdata)]
+ if (entries, groups) in self.cache:
+ prereqs = self.cache[(entries, groups)]
else:
prereqs = self.calculate_prereqs(metadata, entries)
- self.cache[(entries, gdata)] = prereqs
+ self.cache[(entries, groups)] = prereqs
newstruct = lxml.etree.Element("Independent")
for tag, name in prereqs:
- try:
- lxml.etree.SubElement(newstruct, tag, name=name)
- except:
- self.logger.error("Failed to add dep entry for %s:%s" % (tag, name))
+ lxml.etree.SubElement(newstruct, tag, name=name)
structures.append(newstruct)
-
def calculate_prereqs(self, metadata, entries):
"""Calculate the prerequisites defined in Deps for the passed
set of entries.
"""
prereqs = []
- [src.Cache(metadata) for src in self.entries.values()]
-
toexamine = list(entries[:])
while toexamine:
entry = toexamine.pop()
- matching = [src for src in list(self.entries.values())
- if src.cache and entry[0] in src.cache[1]
- and entry[1] in src.cache[1][entry[0]]]
+ # tuples of (PriorityStructFile, element) for each
+ # matching element and the structfile that contains it
+ matching = []
+ for deps in self.entries.values():
+ el = deps.find("/%s[name='%s']" % (entry.tag,
+ entry.get("name")))
+ if el:
+ matching.append((deps, el))
if len(matching) > 1:
- prio = [int(src.priority) for src in matching]
+ prio = [int(m[0].priority) for m in matching]
if prio.count(max(prio)) > 1:
- self.logger.error("Found conflicting %s sources with same priority for %s, pkg %s" %
- (entry[0].lower(), metadata.hostname, entry[1]))
- raise Bcfg2.Server.Plugin.PluginExecutionError
+ raise PluginExecutionError(
+ "Deps: Found conflicting dependencies with same "
+ "priority for %s:%s for %s: %s" %
+ (entry.tag, entry.get("name"),
+ metadata.hostname, [m[0].name for m in matching]))
index = prio.index(max(prio))
matching = [matching[index]]
- elif len(matching) == 1:
- for prq in matching[0].cache[1][entry[0]][entry[1]]:
- # XML comments seem to show up in the cache as a
- # tuple with item 0 being callable. The logic
- # below filters them out. Would be better to
- # exclude them when we load the cache in the first
- # place.
- if prq not in prereqs and prq not in entries and not callable(prq[0]):
- toexamine.append(prq)
- prereqs.append(prq)
- else:
+ if not matching:
continue
+ for prq in matching[0][1].getchildren():
+ if prq not in prereqs and prq not in entries:
+ toexamine.append(prq)
+ prereqs.append(prq)
return prereqs
diff --git a/src/lib/Bcfg2/Server/Plugins/Editor.py b/src/lib/Bcfg2/Server/Plugins/Editor.py
deleted file mode 100644
index f82e0f1dd..000000000
--- a/src/lib/Bcfg2/Server/Plugins/Editor.py
+++ /dev/null
@@ -1,80 +0,0 @@
-import Bcfg2.Server.Plugin
-import re
-import lxml.etree
-
-
-def linesub(pattern, repl, filestring):
- """Substitutes instances of pattern with repl in filestring."""
- if filestring == None:
- filestring = ''
- output = list()
- fileread = filestring.split('\n')
- for line in fileread:
- output.append(re.sub(pattern, repl, filestring))
- return '\n'.join(output)
-
-
-class EditDirectives(Bcfg2.Server.Plugin.SpecificData):
- """This object handles the editing directives."""
- def ProcessDirectives(self, input):
- """Processes a list of edit directives on input."""
- temp = input
- for directive in self.data.split('\n'):
- directive = directive.split(',')
- temp = linesub(directive[0], directive[1], temp)
- return temp
-
-
-class EditEntrySet(Bcfg2.Server.Plugin.EntrySet):
- def __init__(self, basename, path, entry_type, encoding):
- self.ignore = re.compile("^(\.#.*|.*~|\\..*\\.(tmp|sw[px])|%s\.H_.*)$" % path.split('/')[-1])
- Bcfg2.Server.Plugin.EntrySet.__init__(self,
- basename,
- path,
- entry_type,
- encoding)
- self.inputs = dict()
-
- def bind_entry(self, entry, metadata):
- client = metadata.hostname
- filename = entry.get('name')
- permdata = {'owner': 'root',
- 'group': 'root',
- 'mode': '0644'}
- [entry.attrib.__setitem__(key, permdata[key]) for key in permdata]
- entry.text = self.entries['edits'].ProcessDirectives(self.get_client_data(client))
- if not entry.text:
- entry.set('empty', 'true')
- try:
- f = open('%s/%s.H_%s' % (self.path, filename.split('/')[-1], client), 'w')
- f.write(entry.text)
- f.close()
- except:
- pass
-
- def get_client_data(self, client):
- return self.inputs[client]
-
-
-class Editor(Bcfg2.Server.Plugin.GroupSpool,
- Bcfg2.Server.Plugin.Probing):
- name = 'Editor'
- __author__ = 'bcfg2-dev@mcs.anl.gov'
- filename_pattern = 'edits'
- es_child_cls = EditDirectives
- es_cls = EditEntrySet
-
- def GetProbes(self, _):
- '''Return a set of probes for execution on client'''
- probelist = list()
- for name in list(self.entries.keys()):
- probe = lxml.etree.Element('probe')
- probe.set('name', name)
- probe.set('source', "Editor")
- probe.text = "cat %s" % name
- probelist.append(probe)
- return probelist
-
- def ReceiveData(self, client, datalist):
- for data in datalist:
- self.entries[data.get('name')].inputs[client.hostname] = data.text
diff --git a/src/lib/Bcfg2/Server/Plugins/FileProbes.py b/src/lib/Bcfg2/Server/Plugins/FileProbes.py
index 8e074118f..a3bba14f3 100644
--- a/src/lib/Bcfg2/Server/Plugins/FileProbes.py
+++ b/src/lib/Bcfg2/Server/Plugins/FileProbes.py
@@ -11,6 +11,7 @@ import lxml.etree
import Bcfg2.Options
import Bcfg2.Server
import Bcfg2.Server.Plugin
+import Bcfg2.Server.FileMonitor
from Bcfg2.Compat import b64decode
#: The probe we send to clients to get the file data. Returns an XML
@@ -70,7 +71,6 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin,
self.config = \
Bcfg2.Server.Plugin.StructFile(os.path.join(self.data,
'config.xml'),
- fam=core.fam,
should_monitor=True,
create=self.name)
self.entries = dict()
@@ -198,7 +198,7 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin,
if tries >= 10:
self.logger.error("%s still not registered" % filename)
return
- self.core.fam.handle_events_in_interval(1)
+ Bcfg2.Server.FileMonitor.get_fam().handle_events_in_interval(1)
try:
cfg.entries[filename].bind_entry(entry, metadata)
except Bcfg2.Server.Plugin.PluginExecutionError:
diff --git a/src/lib/Bcfg2/Server/Plugins/Fossil.py b/src/lib/Bcfg2/Server/Plugins/Fossil.py
index 6165ac651..05cf4e5d4 100644
--- a/src/lib/Bcfg2/Server/Plugins/Fossil.py
+++ b/src/lib/Bcfg2/Server/Plugins/Fossil.py
@@ -1,7 +1,7 @@
""" The Fossil plugin provides a revision interface for Bcfg2 repos
using fossil."""
-from subprocess import Popen, PIPE
+from Bcfg2.Utils import Executor
import Bcfg2.Server.Plugin
@@ -13,22 +13,22 @@ class Fossil(Bcfg2.Server.Plugin.Version):
def __init__(self, core, datastore):
Bcfg2.Server.Plugin.Version.__init__(self, core, datastore)
+ self.cmd = Executor()
self.logger.debug("Initialized Fossil plugin with fossil directory %s"
% self.vcs_path)
def get_revision(self):
"""Read fossil revision information for the Bcfg2 repository."""
+ result = self.cmd.run(["env LC_ALL=C", "fossil", "info"],
+ shell=True, cwd=self.vcs_root)
try:
- data = Popen("env LC_ALL=C fossil info",
- shell=True,
- cwd=self.vcs_root,
- stdout=PIPE).stdout.readlines()
- revline = [line.split(': ')[1].strip() for line in data
- if line.split(': ')[0].strip() == 'checkout'][-1]
- return revline.split(' ')[0]
- except IndexError:
- msg = "Failed to read fossil info"
+ revision = None
+ for line in result.stdout.splitlines():
+ ldata = line.split(': ')
+ if ldata[0].strip() == 'checkout':
+ revision = line[1].strip().split(' ')[0]
+ return revision
+ except (IndexError, AttributeError):
+ msg = "Failed to read revision from Fossil: %s" % result.error
self.logger.error(msg)
- self.logger.error('Ran command "fossil info" from directory "%s"' %
- self.vcs_root)
raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
diff --git a/src/lib/Bcfg2/Server/Plugins/Git.py b/src/lib/Bcfg2/Server/Plugins/Git.py
index 44971aba7..58a5c58f0 100644
--- a/src/lib/Bcfg2/Server/Plugins/Git.py
+++ b/src/lib/Bcfg2/Server/Plugins/Git.py
@@ -3,12 +3,12 @@ git. """
import sys
from Bcfg2.Server.Plugin import Version, PluginExecutionError
-from subprocess import Popen, PIPE
try:
import git
HAS_GITPYTHON = True
except ImportError:
+ from Bcfg2.Utils import Executor
HAS_GITPYTHON = False
@@ -24,10 +24,12 @@ class Git(Version):
Version.__init__(self, core, datastore)
if HAS_GITPYTHON:
self.repo = git.Repo(self.vcs_root)
+ self.cmd = None
else:
self.logger.debug("Git: GitPython not found, using CLI interface "
"to Git")
self.repo = None
+ self.cmd = Executor()
self.logger.debug("Initialized git plugin with git directory %s" %
self.vcs_path)
@@ -45,11 +47,10 @@ class Git(Version):
cmd = ["git", "--git-dir", self.vcs_path,
"--work-tree", self.vcs_root, "rev-parse", "HEAD"]
self.debug_log("Git: Running %s" % cmd)
- proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
- rv, err = proc.communicate()
- if proc.wait():
- raise Exception(err)
- return rv
+ result = self.cmd.run(cmd)
+ if not result.success:
+ raise Exception(result.stderr)
+ return result.stdout
except:
raise PluginExecutionError("Git: Error getting revision from %s: "
"%s" % (self.vcs_root,
diff --git a/src/lib/Bcfg2/Server/Plugins/GroupLogic.py b/src/lib/Bcfg2/Server/Plugins/GroupLogic.py
index 810b273af..aa336ff23 100644
--- a/src/lib/Bcfg2/Server/Plugins/GroupLogic.py
+++ b/src/lib/Bcfg2/Server/Plugins/GroupLogic.py
@@ -4,30 +4,17 @@ template to dynamically set additional groups for clients. """
import os
import lxml.etree
import Bcfg2.Server.Plugin
-try:
- from Bcfg2.Server.Plugins.Bundler import BundleTemplateFile
-except ImportError:
- # BundleTemplateFile missing means that genshi is missing. we
- # import genshi to get the _real_ error
- import genshi # pylint: disable=W0611
-class GroupLogicConfig(BundleTemplateFile):
+class GroupLogicConfig(Bcfg2.Server.Plugin.StructFile):
""" Representation of the GroupLogic groups.xml file """
create = lxml.etree.Element("GroupLogic",
nsmap=dict(py="http://genshi.edgewall.org/"))
- def __init__(self, name, fam):
- BundleTemplateFile.__init__(self, name,
- Bcfg2.Server.Plugin.Specificity(), None)
- self.fam = fam
- self.should_monitor = True
- self.fam.AddMonitor(self.name, self)
-
def _match(self, item, metadata):
if item.tag == 'Group' and not len(item.getchildren()):
return [item]
- return BundleTemplateFile._match(self, item, metadata)
+ return Bcfg2.Server.Plugin.StructFile._match(self, item, metadata)
class GroupLogic(Bcfg2.Server.Plugin.Plugin,
@@ -40,8 +27,8 @@ class GroupLogic(Bcfg2.Server.Plugin.Plugin,
Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
Bcfg2.Server.Plugin.Connector.__init__(self)
self.config = GroupLogicConfig(os.path.join(self.data, "groups.xml"),
- core.fam)
+ should_monitor=True)
def get_additional_groups(self, metadata):
return [el.get("name")
- for el in self.config.get_xml_value(metadata).findall("Group")]
+ for el in self.config.XMLMatch(metadata).findall("Group")]
diff --git a/src/lib/Bcfg2/Server/Plugins/GroupPatterns.py b/src/lib/Bcfg2/Server/Plugins/GroupPatterns.py
index 09685d972..3e5508160 100644
--- a/src/lib/Bcfg2/Server/Plugins/GroupPatterns.py
+++ b/src/lib/Bcfg2/Server/Plugins/GroupPatterns.py
@@ -69,11 +69,7 @@ class PatternFile(Bcfg2.Server.Plugin.XMLFileBacked):
create = 'GroupPatterns'
def __init__(self, filename, core=None):
- try:
- fam = core.fam
- except AttributeError:
- fam = None
- Bcfg2.Server.Plugin.XMLFileBacked.__init__(self, filename, fam=fam,
+ Bcfg2.Server.Plugin.XMLFileBacked.__init__(self, filename,
should_monitor=True)
self.core = core
self.patterns = []
diff --git a/src/lib/Bcfg2/Server/Plugins/Guppy.py b/src/lib/Bcfg2/Server/Plugins/Guppy.py
index 4f2601f15..6d6df3cc3 100644
--- a/src/lib/Bcfg2/Server/Plugins/Guppy.py
+++ b/src/lib/Bcfg2/Server/Plugins/Guppy.py
@@ -32,10 +32,7 @@ from guppy.heapy import Remote
class Guppy(Bcfg2.Server.Plugin.Plugin):
"""Guppy is a debugging plugin to help trace memory leaks"""
- name = 'Guppy'
__author__ = 'bcfg-dev@mcs.anl.gov'
-
- experimental = True
__rmi__ = Bcfg2.Server.Plugin.Plugin.__rmi__ + ['Enable', 'Disable']
def __init__(self, core, datastore):
diff --git a/src/lib/Bcfg2/Server/Plugins/Hostbase.py b/src/lib/Bcfg2/Server/Plugins/Hostbase.py
deleted file mode 100644
index 55757e0b4..000000000
--- a/src/lib/Bcfg2/Server/Plugins/Hostbase.py
+++ /dev/null
@@ -1,599 +0,0 @@
-"""
-This file provides the Hostbase plugin.
-It manages dns/dhcp/nis host information
-"""
-
-from lxml.etree import Element, SubElement
-import os
-import re
-from time import strftime
-os.environ['DJANGO_SETTINGS_MODULE'] = 'Bcfg2.Server.Hostbase.settings'
-import Bcfg2.Server.Plugin
-from Bcfg2.Server.Plugin import PluginExecutionError, PluginInitError
-from django.template import Context, loader
-from django.db import connection
-# Compatibility imports
-from Bcfg2.Compat import StringIO
-
-try:
- set
-except NameError:
- # deprecated since python 2.6
- from sets import Set as set
-
-
-class Hostbase(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Structure,
- Bcfg2.Server.Plugin.Generator):
- """The Hostbase plugin handles host/network info."""
- name = 'Hostbase'
- __author__ = 'bcfg-dev@mcs.anl.gov'
- filepath = '/my/adm/hostbase/files/bind'
- deprecated = True
-
- def __init__(self, core, datastore):
-
- self.ready = False
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Structure.__init__(self)
- Bcfg2.Server.Plugin.Generator.__init__(self)
- files = ['zone.tmpl',
- 'reversesoa.tmpl',
- 'named.tmpl',
- 'reverseappend.tmpl',
- 'dhcpd.tmpl',
- 'hosts.tmpl',
- 'hostsappend.tmpl']
- self.filedata = {}
- self.dnsservers = []
- self.dhcpservers = []
- self.templates = {'zone': loader.get_template('zone.tmpl'),
- 'reversesoa': loader.get_template('reversesoa.tmpl'),
- 'named': loader.get_template('named.tmpl'),
- 'namedviews': loader.get_template('namedviews.tmpl'),
- 'reverseapp': loader.get_template('reverseappend.tmpl'),
- 'dhcp': loader.get_template('dhcpd.tmpl'),
- 'hosts': loader.get_template('hosts.tmpl'),
- 'hostsapp': loader.get_template('hostsappend.tmpl'),
- }
- self.Entries['ConfigFile'] = {}
- self.__rmi__ = ['rebuildState']
- try:
- self.rebuildState(None)
- except:
- raise PluginInitError
-
- def FetchFile(self, entry, metadata):
- """Return prebuilt file data."""
- fname = entry.get('name').split('/')[-1]
- if not fname in self.filedata:
- raise PluginExecutionError
- perms = {'owner': 'root',
- 'group': 'root',
- 'mode': '644'}
- [entry.attrib.__setitem__(key, value)
- for (key, value) in list(perms.items())]
- entry.text = self.filedata[fname]
-
- def BuildStructures(self, metadata):
- """Build hostbase bundle."""
- if metadata.hostname not in self.dnsservers or metadata.hostname not in self.dhcpservers:
- return []
- output = Element("Bundle", name='hostbase')
- if metadata.hostname in self.dnsservers:
- for configfile in self.Entries['ConfigFile']:
- if re.search('/etc/bind/', configfile):
- SubElement(output, "ConfigFile", name=configfile)
- if metadata.hostname in self.dhcpservers:
- SubElement(output, "ConfigFile", name="/etc/dhcp3/dhcpd.conf")
- return [output]
-
- def rebuildState(self, _):
- """Pre-cache all state information for hostbase config files
- callable as an XMLRPC function.
-
- """
- self.buildZones()
- self.buildDHCP()
- self.buildHosts()
- self.buildHostsLPD()
- self.buildPrinters()
- self.buildNetgroups()
- return True
-
- def buildZones(self):
- """Pre-build and stash zone files."""
- cursor = connection.cursor()
-
- cursor.execute("SELECT id, serial FROM hostbase_zone")
- zones = cursor.fetchall()
-
- for zone in zones:
- # update the serial number for all zone files
- todaydate = (strftime('%Y%m%d'))
- try:
- if todaydate == str(zone[1])[:8]:
- serial = zone[1] + 1
- else:
- serial = int(todaydate) * 100
- except (KeyError):
- serial = int(todaydate) * 100
- cursor.execute("""UPDATE hostbase_zone SET serial = \'%s\' WHERE id = \'%s\'""" % (str(serial), zone[0]))
-
- cursor.execute("SELECT * FROM hostbase_zone WHERE zone NOT LIKE \'%%.rev\'")
- zones = cursor.fetchall()
-
- iplist = []
- hosts = {}
-
- for zone in zones:
- zonefile = StringIO()
- externalzonefile = StringIO()
- cursor.execute("""SELECT n.name FROM hostbase_zone_nameservers z
- INNER JOIN hostbase_nameserver n ON z.nameserver_id = n.id
- WHERE z.zone_id = \'%s\'""" % zone[0])
- nameservers = cursor.fetchall()
- cursor.execute("""SELECT i.ip_addr FROM hostbase_zone_addresses z
- INNER JOIN hostbase_zoneaddress i ON z.zoneaddress_id = i.id
- WHERE z.zone_id = \'%s\'""" % zone[0])
- addresses = cursor.fetchall()
- cursor.execute("""SELECT m.priority, m.mx FROM hostbase_zone_mxs z
- INNER JOIN hostbase_mx m ON z.mx_id = m.id
- WHERE z.zone_id = \'%s\'""" % zone[0])
- mxs = cursor.fetchall()
- context = Context({
- 'zone': zone,
- 'nameservers': nameservers,
- 'addresses': addresses,
- 'mxs': mxs
- })
- zonefile.write(self.templates['zone'].render(context))
- externalzonefile.write(self.templates['zone'].render(context))
-
- querystring = """SELECT h.hostname, p.ip_addr,
- n.name, c.cname, m.priority, m.mx, n.dns_view
- FROM (((((hostbase_host h INNER JOIN hostbase_interface i ON h.id = i.host_id)
- INNER JOIN hostbase_ip p ON i.id = p.interface_id)
- INNER JOIN hostbase_name n ON p.id = n.ip_id)
- INNER JOIN hostbase_name_mxs x ON n.id = x.name_id)
- INNER JOIN hostbase_mx m ON m.id = x.mx_id)
- LEFT JOIN hostbase_cname c ON n.id = c.name_id
- WHERE n.name LIKE '%%%%%s'
- AND h.status = 'active'
- ORDER BY h.hostname, n.name, p.ip_addr
- """ % zone[1]
- cursor.execute(querystring)
- zonehosts = cursor.fetchall()
- prevhost = (None, None, None, None)
- cnames = StringIO()
- cnamesexternal = StringIO()
- for host in zonehosts:
- if not host[2].split(".", 1)[1] == zone[1]:
- zonefile.write(cnames.getvalue())
- externalzonefile.write(cnamesexternal.getvalue())
- cnames = StringIO()
- cnamesexternal = StringIO()
- continue
- if not prevhost[1] == host[1] or not prevhost[2] == host[2]:
- zonefile.write(cnames.getvalue())
- externalzonefile.write(cnamesexternal.getvalue())
- cnames = StringIO()
- cnamesexternal = StringIO()
- zonefile.write("%-32s%-10s%-32s\n" %
- (host[2].split(".", 1)[0], 'A', host[1]))
- zonefile.write("%-32s%-10s%-3s%s.\n" %
- ('', 'MX', host[4], host[5]))
- if host[6] == 'global':
- externalzonefile.write("%-32s%-10s%-32s\n" %
- (host[2].split(".", 1)[0], 'A', host[1]))
- externalzonefile.write("%-32s%-10s%-3s%s.\n" %
- ('', 'MX', host[4], host[5]))
- elif not prevhost[5] == host[5]:
- zonefile.write("%-32s%-10s%-3s%s.\n" %
- ('', 'MX', host[4], host[5]))
- if host[6] == 'global':
- externalzonefile.write("%-32s%-10s%-3s%s.\n" %
- ('', 'MX', host[4], host[5]))
-
- if host[3]:
- try:
- if host[3].split(".", 1)[1] == zone[1]:
- cnames.write("%-32s%-10s%-32s\n" %
- (host[3].split(".", 1)[0],
- 'CNAME', host[2].split(".", 1)[0]))
- if host[6] == 'global':
- cnamesexternal.write("%-32s%-10s%-32s\n" %
- (host[3].split(".", 1)[0],
- 'CNAME', host[2].split(".", 1)[0]))
- else:
- cnames.write("%-32s%-10s%-32s\n" %
- (host[3] + ".",
- 'CNAME',
- host[2].split(".", 1)[0]))
- if host[6] == 'global':
- cnamesexternal.write("%-32s%-10s%-32s\n" %
- (host[3] + ".",
- 'CNAME',
- host[2].split(".", 1)[0]))
-
- except:
- pass
- prevhost = host
- zonefile.write(cnames.getvalue())
- externalzonefile.write(cnamesexternal.getvalue())
- zonefile.write("\n\n%s" % zone[9])
- externalzonefile.write("\n\n%s" % zone[9])
- self.filedata[zone[1]] = zonefile.getvalue()
- self.filedata[zone[1] + ".external"] = externalzonefile.getvalue()
- zonefile.close()
- externalzonefile.close()
- self.Entries['ConfigFile']["%s/%s" % (self.filepath, zone[1])] = self.FetchFile
- self.Entries['ConfigFile']["%s/%s.external" % (self.filepath, zone[1])] = self.FetchFile
-
- cursor.execute("SELECT * FROM hostbase_zone WHERE zone LIKE \'%%.rev\' AND zone <> \'.rev\'")
- reversezones = cursor.fetchall()
-
- reversenames = []
- for reversezone in reversezones:
- cursor.execute("""SELECT n.name FROM hostbase_zone_nameservers z
- INNER JOIN hostbase_nameserver n ON z.nameserver_id = n.id
- WHERE z.zone_id = \'%s\'""" % reversezone[0])
- reverse_nameservers = cursor.fetchall()
-
- context = Context({
- 'inaddr': reversezone[1].rstrip('.rev'),
- 'zone': reversezone,
- 'nameservers': reverse_nameservers,
- })
-
- self.filedata[reversezone[1]] = self.templates['reversesoa'].render(context)
- self.filedata[reversezone[1] + '.external'] = self.templates['reversesoa'].render(context)
- self.filedata[reversezone[1]] += reversezone[9]
- self.filedata[reversezone[1] + '.external'] += reversezone[9]
-
- subnet = reversezone[1].split(".")
- subnet.reverse()
- reversenames.append((reversezone[1].rstrip('.rev'), ".".join(subnet[1:])))
-
- for filename in reversenames:
- cursor.execute("""
- SELECT DISTINCT h.hostname, p.ip_addr, n.dns_view FROM ((hostbase_host h
- INNER JOIN hostbase_interface i ON h.id = i.host_id)
- INNER JOIN hostbase_ip p ON i.id = p.interface_id)
- INNER JOIN hostbase_name n ON n.ip_id = p.id
- WHERE p.ip_addr LIKE '%s%%%%' AND h.status = 'active' ORDER BY p.ip_addr
- """ % filename[1])
- reversehosts = cursor.fetchall()
- zonefile = StringIO()
- externalzonefile = StringIO()
- if len(filename[0].split(".")) == 2:
- originlist = []
- [originlist.append((".".join([ip[1].split(".")[2], filename[0]]),
- ".".join([filename[1], ip[1].split(".")[2]])))
- for ip in reversehosts
- if (".".join([ip[1].split(".")[2], filename[0]]),
- ".".join([filename[1], ip[1].split(".")[2]])) not in originlist]
- for origin in originlist:
- hosts = [(host[1].split("."), host[0])
- for host in reversehosts
- if host[1].rstrip('0123456789').rstrip('.') == origin[1]]
- hosts_external = [(host[1].split("."), host[0])
- for host in reversehosts
- if (host[1].rstrip('0123456789').rstrip('.') == origin[1]
- and host[2] == 'global')]
- context = Context({
- 'hosts': hosts,
- 'inaddr': origin[0],
- 'fileorigin': filename[0],
- })
- zonefile.write(self.templates['reverseapp'].render(context))
- context = Context({
- 'hosts': hosts_external,
- 'inaddr': origin[0],
- 'fileorigin': filename[0],
- })
- externalzonefile.write(self.templates['reverseapp'].render(context))
- else:
- originlist = [filename[0]]
- hosts = [(host[1].split("."), host[0])
- for host in reversehosts
- if (host[1].split("."), host[0]) not in hosts]
- hosts_external = [(host[1].split("."), host[0])
- for host in reversehosts
- if ((host[1].split("."), host[0]) not in hosts_external
- and host[2] == 'global')]
- context = Context({
- 'hosts': hosts,
- 'inaddr': filename[0],
- 'fileorigin': None,
- })
- zonefile.write(self.templates['reverseapp'].render(context))
- context = Context({
- 'hosts': hosts_external,
- 'inaddr': filename[0],
- 'fileorigin': None,
- })
- externalzonefile.write(self.templates['reverseapp'].render(context))
- self.filedata['%s.rev' % filename[0]] += zonefile.getvalue()
- self.filedata['%s.rev.external' % filename[0]] += externalzonefile.getvalue()
- zonefile.close()
- externalzonefile.close()
- self.Entries['ConfigFile']['%s/%s.rev' % (self.filepath, filename[0])] = self.FetchFile
- self.Entries['ConfigFile']['%s/%s.rev.external' % (self.filepath, filename[0])] = self.FetchFile
-
- ## here's where the named.conf file gets written
- context = Context({
- 'zones': zones,
- 'reverses': reversenames,
- })
- self.filedata['named.conf'] = self.templates['named'].render(context)
- self.Entries['ConfigFile']['/my/adm/hostbase/files/named.conf'] = self.FetchFile
- self.filedata['named.conf.views'] = self.templates['namedviews'].render(context)
- self.Entries['ConfigFile']['/my/adm/hostbase/files/named.conf.views'] = self.FetchFile
-
- def buildDHCP(self):
- """Pre-build dhcpd.conf and stash in the filedata table."""
-
- # fetches all the hosts with DHCP == True
- cursor = connection.cursor()
- cursor.execute("""
- SELECT hostname, mac_addr, ip_addr
- FROM (hostbase_host h INNER JOIN hostbase_interface i ON h.id = i.host_id)
- INNER JOIN hostbase_ip ip ON i.id = ip.interface_id
- WHERE i.dhcp=1 AND h.status='active' AND i.mac_addr <> ''
- AND i.mac_addr <> 'float' AND i.mac_addr <> 'unknown'
- ORDER BY h.hostname, i.mac_addr
- """)
-
- dhcphosts = cursor.fetchall()
- count = 0
- hosts = []
- hostdata = [dhcphosts[0][0], dhcphosts[0][1], dhcphosts[0][2]]
- if len(dhcphosts) > 1:
- for x in range(1, len(dhcphosts)):
- # if an interface has 2 or more ip addresses
- # adds the ip to the current interface
- if hostdata[0].split(".")[0] == dhcphosts[x][0].split(".")[0] and hostdata[1] == dhcphosts[x][1]:
- hostdata[2] = ", ".join([hostdata[2], dhcphosts[x][2]])
- # if a host has 2 or more interfaces
- # writes the current one and grabs the next
- elif hostdata[0].split(".")[0] == dhcphosts[x][0].split(".")[0]:
- hosts.append(hostdata)
- count += 1
- hostdata = ["-".join([dhcphosts[x][0], str(count)]), dhcphosts[x][1], dhcphosts[x][2]]
- # new host found, writes current data to the template
- else:
- hosts.append(hostdata)
- count = 0
- hostdata = [dhcphosts[x][0], dhcphosts[x][1], dhcphosts[x][2]]
- #makes sure the last of the data gets written out
- if hostdata not in hosts:
- hosts.append(hostdata)
-
- context = Context({
- 'hosts': hosts,
- 'numips': len(hosts),
- })
-
- self.filedata['dhcpd.conf'] = self.templates['dhcp'].render(context)
- self.Entries['ConfigFile']['/my/adm/hostbase/files/dhcpd.conf'] = self.FetchFile
-
- def buildHosts(self):
- """Pre-build and stash /etc/hosts file."""
-
- append_data = []
-
- cursor = connection.cursor()
- cursor.execute("""
- SELECT hostname FROM hostbase_host ORDER BY hostname
- """)
- hostbase = cursor.fetchall()
- domains = [host[0].split(".", 1)[1] for host in hostbase]
- domains_set = set(domains)
- domain_data = [(domain, domains.count(domain)) for domain in domains_set]
- domain_data.sort()
-
- cursor.execute("""
- SELECT ip_addr FROM hostbase_ip ORDER BY ip_addr
- """)
- ips = cursor.fetchall()
- three_octets = [ip[0].rstrip('0123456789').rstrip('.') \
- for ip in ips]
- three_octets_set = set(three_octets)
- three_octets_data = [(octet, three_octets.count(octet)) \
- for octet in three_octets_set]
- three_octets_data.sort()
-
- for three_octet in three_octets_data:
- querystring = """SELECT h.hostname, h.primary_user,
- p.ip_addr, n.name, c.cname
- FROM (((hostbase_host h INNER JOIN hostbase_interface i ON h.id = i.host_id)
- INNER JOIN hostbase_ip p ON i.id = p.interface_id)
- INNER JOIN hostbase_name n ON p.id = n.ip_id)
- LEFT JOIN hostbase_cname c ON n.id = c.name_id
- WHERE p.ip_addr LIKE \'%s.%%%%\' AND h.status = 'active'""" % three_octet[0]
- cursor.execute(querystring)
- tosort = list(cursor.fetchall())
- tosort.sort(lambda x, y: cmp(int(x[2].split(".")[-1]), int(y[2].split(".")[-1])))
- append_data.append((three_octet, tuple(tosort)))
-
- two_octets = [ip.rstrip('0123456789').rstrip('.') for ip in three_octets]
- two_octets_set = set(two_octets)
- two_octets_data = [(octet, two_octets.count(octet))
- for octet in two_octets_set]
- two_octets_data.sort()
-
- context = Context({
- 'domain_data': domain_data,
- 'three_octets_data': three_octets_data,
- 'two_octets_data': two_octets_data,
- 'three_octets': three_octets,
- 'num_ips': len(three_octets),
- })
-
- self.filedata['hosts'] = self.templates['hosts'].render(context)
-
- for subnet in append_data:
- ips = []
- simple = True
- namelist = [name.split('.', 1)[0] for name in [subnet[1][0][3]]]
- cnamelist = []
- if subnet[1][0][4]:
- cnamelist.append(subnet[1][0][4].split('.', 1)[0])
- simple = False
- appenddata = subnet[1][0]
- for ip in subnet[1][1:]:
- if appenddata[2] == ip[2]:
- namelist.append(ip[3].split('.', 1)[0])
- if ip[4]:
- cnamelist.append(ip[4].split('.', 1)[0])
- simple = False
- appenddata = ip
- else:
- if appenddata[0] == ip[0]:
- simple = False
- ips.append((appenddata[2], appenddata[0], set(namelist),
- cnamelist, simple, appenddata[1]))
- appenddata = ip
- simple = True
- namelist = [ip[3].split('.', 1)[0]]
- cnamelist = []
- if ip[4]:
- cnamelist.append(ip[4].split('.', 1)[0])
- simple = False
- ips.append((appenddata[2], appenddata[0], set(namelist),
- cnamelist, simple, appenddata[1]))
- context = Context({
- 'subnet': subnet[0],
- 'ips': ips,
- })
- self.filedata['hosts'] += self.templates['hostsapp'].render(context)
- self.Entries['ConfigFile']['/mcs/etc/hosts'] = self.FetchFile
-
- def buildPrinters(self):
- """The /mcs/etc/printers.data file"""
- header = """# This file is automatically generated. DO NOT EDIT IT!
-#
-Name Room User Type Notes
-============== ========== ============================== ======================== ====================
-"""
-
- cursor = connection.cursor()
- # fetches all the printers from the database
- cursor.execute("""
- SELECT printq, location, primary_user, comments
- FROM hostbase_host
- WHERE whatami='printer' AND printq <> '' AND status = 'active'
- ORDER BY printq
- """)
- printers = cursor.fetchall()
-
- printersfile = header
- for printer in printers:
- # splits up the printq line and gets the
- # correct description out of the comments section
- temp = printer[3].split('\n')
- for printq in re.split(',[ ]*', printer[0]):
- if len(temp) > 1:
- printersfile += ("%-16s%-12s%-32s%-26s%s\n" %
- (printq, printer[1], printer[2], temp[1], temp[0]))
- else:
- printersfile += ("%-16s%-12s%-32s%-26s%s\n" %
- (printq, printer[1], printer[2], '', printer[3]))
- self.filedata['printers.data'] = printersfile
- self.Entries['ConfigFile']['/mcs/etc/printers.data'] = self.FetchFile
-
- def buildHostsLPD(self):
- """Creates the /mcs/etc/hosts.lpd file"""
-
- # this header needs to be changed to be more generic
- header = """+@machines
-+@all-machines
-achilles.ctd.anl.gov
-raven.ops.anl.gov
-seagull.hr.anl.gov
-parrot.ops.anl.gov
-condor.ops.anl.gov
-delphi.esh.anl.gov
-anlcv1.ctd.anl.gov
-anlvms.ctd.anl.gov
-olivia.ctd.anl.gov\n\n"""
-
- cursor = connection.cursor()
- cursor.execute("""
- SELECT hostname FROM hostbase_host WHERE netgroup=\"red\" AND status = 'active'
- ORDER BY hostname""")
- redmachines = list(cursor.fetchall())
- cursor.execute("""
- SELECT n.name FROM ((hostbase_host h INNER JOIN hostbase_interface i ON h.id = i.host_id)
- INNER JOIN hostbase_ip p ON i.id = p.interface_id) INNER JOIN hostbase_name n ON p.id = n.ip_id
- WHERE netgroup=\"red\" AND n.only=1 AND h.status = 'active'
- """)
- redmachines.extend(list(cursor.fetchall()))
- cursor.execute("""
- SELECT hostname FROM hostbase_host WHERE netgroup=\"win\" AND status = 'active'
- ORDER BY hostname""")
- winmachines = list(cursor.fetchall())
- cursor.execute("""
- SELECT n.name FROM ((hostbase_host h INNER JOIN hostbase_interface i ON h.id = i.host_id)
- INNER JOIN hostbase_ip p ON i.id = p.interface_id) INNER JOIN hostbase_name n ON p.id = n.ip_id
- WHERE netgroup=\"win\" AND n.only=1 AND h.status = 'active'
- """)
- winmachines.__add__(list(cursor.fetchall()))
- hostslpdfile = header
- for machine in redmachines:
- hostslpdfile += machine[0] + "\n"
- hostslpdfile += "\n"
- for machine in winmachines:
- hostslpdfile += machine[0] + "\n"
- self.filedata['hosts.lpd'] = hostslpdfile
- self.Entries['ConfigFile']['/mcs/etc/hosts.lpd'] = self.FetchFile
-
- def buildNetgroups(self):
- """Makes the *-machine files"""
- header = """###################################################################
-# This file lists hosts in the '%s' machine netgroup, it is
-# automatically generated. DO NOT EDIT THIS FILE!
-#
-# Number of hosts in '%s' machine netgroup: %i
-#\n\n"""
-
- cursor = connection.cursor()
- # fetches all the hosts that with valid netgroup entries
- cursor.execute("""
- SELECT h.hostname, n.name, h.netgroup, n.only FROM ((hostbase_host h
- INNER JOIN hostbase_interface i ON h.id = i.host_id)
- INNER JOIN hostbase_ip p ON i.id = p.interface_id)
- INNER JOIN hostbase_name n ON p.id = n.ip_id
- WHERE h.netgroup <> '' AND h.netgroup <> 'none' AND h.status = 'active'
- ORDER BY h.netgroup, h.hostname
- """)
- nameslist = cursor.fetchall()
- # gets the first host and initializes the hash
- hostdata = nameslist[0]
- netgroups = {hostdata[2]: [hostdata[0]]}
- for row in nameslist:
- # if new netgroup, create it
- if row[2] not in netgroups:
- netgroups.update({row[2]: []})
- # if it belongs in the netgroup and has multiple interfaces, put them in
- if hostdata[0] == row[0] and row[3]:
- netgroups[row[2]].append(row[1])
- hostdata = row
- # if its a new host, write the old one to the hash
- elif hostdata[0] != row[0]:
- netgroups[row[2]].append(row[0])
- hostdata = row
-
- for netgroup in netgroups:
- fileoutput = StringIO()
- fileoutput.write(header % (netgroup, netgroup, len(netgroups[netgroup])))
- for each in netgroups[netgroup]:
- fileoutput.write(each + "\n")
- self.filedata['%s-machines' % netgroup] = fileoutput.getvalue()
- fileoutput.close()
- self.Entries['ConfigFile']['/my/adm/hostbase/makenets/machines/%s-machines' % netgroup] = self.FetchFile
-
- cursor.execute("""
- UPDATE hostbase_host SET dirty=0
- """)
diff --git a/src/lib/Bcfg2/Server/Plugins/Ldap.py b/src/lib/Bcfg2/Server/Plugins/Ldap.py
index f724402d0..8e8b078d9 100644
--- a/src/lib/Bcfg2/Server/Plugins/Ldap.py
+++ b/src/lib/Bcfg2/Server/Plugins/Ldap.py
@@ -44,10 +44,10 @@ class ConfigFile(Bcfg2.Server.Plugin.FileBacked):
The approach implemented here is having the user call a registering
decorator that updates a global variable in this module.
"""
- def __init__(self, filename, fam):
+ def __init__(self, filename):
self.filename = filename
Bcfg2.Server.Plugin.FileBacked.__init__(self, self.filename)
- fam.AddMonitor(self.filename, self)
+ self.fam.AddMonitor(self.filename, self)
def Index(self):
"""
@@ -72,7 +72,7 @@ class Ldap(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.Connector):
def __init__(self, core, datastore):
Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
Bcfg2.Server.Plugin.Connector.__init__(self)
- self.config = ConfigFile(self.data + "/config.py", core.fam)
+ self.config = ConfigFile(self.data + "/config.py")
def debug_log(self, message, flag = None):
if (flag is None) and self.debug_flag or flag:
diff --git a/src/lib/Bcfg2/Server/Plugins/Metadata.py b/src/lib/Bcfg2/Server/Plugins/Metadata.py
index 3b8361c76..507973fa6 100644
--- a/src/lib/Bcfg2/Server/Plugins/Metadata.py
+++ b/src/lib/Bcfg2/Server/Plugins/Metadata.py
@@ -97,7 +97,6 @@ class XMLMetadataConfig(Bcfg2.Server.Plugin.XMLFileBacked):
fpath = os.path.join(metadata.data, basefile)
toptag = os.path.splitext(basefile)[0].title()
Bcfg2.Server.Plugin.XMLFileBacked.__init__(self, fpath,
- fam=metadata.core.fam,
should_monitor=False,
create=toptag)
self.should_monitor = watch_clients
@@ -107,7 +106,7 @@ class XMLMetadataConfig(Bcfg2.Server.Plugin.XMLFileBacked):
self.basedata = None
self.basedir = metadata.data
self.logger = metadata.logger
- self.pseudo_monitor = isinstance(metadata.core.fam,
+ self.pseudo_monitor = isinstance(Bcfg2.Server.FileMonitor.get_fam(),
Bcfg2.Server.FileMonitor.Pseudo)
def _get_xdata(self):
@@ -245,7 +244,7 @@ class XMLMetadataConfig(Bcfg2.Server.Plugin.XMLFileBacked):
def add_monitor(self, fpath):
self.extras.append(fpath)
- if self.fam and self.should_monitor:
+ if self.should_monitor:
self.fam.AddMonitor(fpath, self.metadata)
def HandleEvent(self, event=None):
@@ -568,7 +567,8 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
(clients.xml or groups.xml, e.g.) """
if self.watch_clients:
try:
- self.core.fam.AddMonitor(os.path.join(self.data, fname), self)
+ Bcfg2.Server.FileMonitor.get_fam().AddMonitor(
+ os.path.join(self.data, fname), self)
except:
err = sys.exc_info()[1]
msg = "Unable to add file monitor for %s: %s" % (fname, err)
diff --git a/src/lib/Bcfg2/Server/Plugins/NagiosGen.py b/src/lib/Bcfg2/Server/Plugins/NagiosGen.py
index 466665382..9603cd518 100644
--- a/src/lib/Bcfg2/Server/Plugins/NagiosGen.py
+++ b/src/lib/Bcfg2/Server/Plugins/NagiosGen.py
@@ -5,25 +5,22 @@ import re
import sys
import glob
import socket
-import Bcfg2.Server
-import Bcfg2.Server.Plugin
+from Bcfg2.Server.Plugin import Plugin, Generator, StructFile, \
+ PluginExecutionError
-class NagiosGen(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Generator):
+class NagiosGen(Plugin, Generator):
""" NagiosGen is a Bcfg2 plugin that dynamically generates Nagios
configuration file based on Bcfg2 data. """
__author__ = 'bcfg-dev@mcs.anl.gov'
line_fmt = '\t%-32s %s'
def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Generator.__init__(self)
+ Plugin.__init__(self, core, datastore)
+ Generator.__init__(self)
self.config = \
- Bcfg2.Server.Plugin.StructFile(os.path.join(self.data,
- 'config.xml'),
- core.fam, should_monitor=True,
- create=self.name)
+ StructFile(os.path.join(self.data, 'config.xml'),
+ should_monitor=True, create=self.name)
self.Entries = {'Path':
{'/etc/nagiosgen.status': self.createhostconfig,
'/etc/nagios/nagiosgen.cfg': self.createserverconfig}}
@@ -44,9 +41,9 @@ class NagiosGen(Bcfg2.Server.Plugin.Plugin,
try:
host_address = socket.gethostbyname(metadata.hostname)
except socket.gaierror:
- self.logger.error("Failed to find IP address for %s" %
- metadata.hostname)
- raise Bcfg2.Server.Plugin.PluginExecutionError
+ self.logger.error()
+ raise PluginExecutionError("Failed to find IP address for %s" %
+ metadata.hostname)
host_groups = [grp for grp in metadata.groups
if os.path.isfile('%s/%s-group.cfg' % (self.data, grp))]
host_config = ['define host {',
diff --git a/src/lib/Bcfg2/Server/Plugins/Ohai.py b/src/lib/Bcfg2/Server/Plugins/Ohai.py
index 1ec3cbd60..18261be10 100644
--- a/src/lib/Bcfg2/Server/Plugins/Ohai.py
+++ b/src/lib/Bcfg2/Server/Plugins/Ohai.py
@@ -78,8 +78,6 @@ class Ohai(Bcfg2.Server.Plugin.Plugin,
"""The Ohai plugin is used to detect information
about the client operating system.
"""
- name = 'Ohai'
- experimental = True
def __init__(self, core, datastore):
Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py b/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py
index a82a183d8..dba56eed2 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py
@@ -13,8 +13,7 @@ class AptCollection(Collection):
overrides nothing, and defers all operations to :class:`PacSource`
"""
- def __init__(self, metadata, sources, cachepath, basepath, fam,
- debug=False):
+ def __init__(self, metadata, sources, cachepath, basepath, debug=False):
# we define an __init__ that just calls the parent __init__,
# so that we can set the docstring on __init__ to something
# different from the parent __init__ -- namely, the parent
@@ -22,7 +21,7 @@ class AptCollection(Collection):
# which we use to delineate the actual docs from the
# .. autoattribute hacks we have to do to get private
# attributes included in sphinx 1.0 """
- Collection.__init__(self, metadata, sources, cachepath, basepath, fam,
+ Collection.__init__(self, metadata, sources, cachepath, basepath,
debug=debug)
__init__.__doc__ = Collection.__init__.__doc__.split(".. -----")[0]
@@ -53,10 +52,6 @@ class AptCollection(Collection):
class AptSource(Source):
""" Handle APT sources """
- #: :ref:`server-plugins-generators-packages-magic-groups` for
- #: ``AptSource`` are "apt", "debian", "ubuntu", and "nexenta"
- basegroups = ['apt', 'debian', 'ubuntu', 'nexenta']
-
#: AptSource sets the ``type`` on Package entries to "deb"
ptype = 'deb'
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Collection.py b/src/lib/Bcfg2/Server/Plugins/Packages/Collection.py
index b25cb0fc4..59eefe143 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/Collection.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Collection.py
@@ -78,7 +78,10 @@ import copy
import logging
import lxml.etree
import Bcfg2.Server.Plugin
+from Bcfg2.Server.FileMonitor import get_fam
+from Bcfg2.Options import get_option_parser
from Bcfg2.Compat import any, md5 # pylint: disable=W0622
+from Bcfg2.Server.Statistics import track_statistics
LOGGER = logging.getLogger(__name__)
@@ -93,8 +96,7 @@ class Collection(list, Bcfg2.Server.Plugin.Debuggable):
#: Whether or not this Packages backend supports package groups
__package_groups__ = False
- def __init__(self, metadata, sources, cachepath, basepath, fam,
- debug=False):
+ def __init__(self, metadata, sources, cachepath, basepath, debug=False):
"""
:param metadata: The client metadata for this collection
:type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
@@ -111,9 +113,6 @@ class Collection(list, Bcfg2.Server.Plugin.Debuggable):
directory, where more permanent data can be
stored
:type basepath: string
- :param fam: A file monitor object to use if this Collection
- needs to monitor for file activity
- :type fam: Bcfg2.Server.FileMonitor.FileMonitor
:param debug: Enable debugging output
:type debug: bool
@@ -127,13 +126,12 @@ class Collection(list, Bcfg2.Server.Plugin.Debuggable):
self.basepath = basepath
self.cachepath = cachepath
self.virt_pkgs = dict()
- self.fam = fam
+ self.fam = get_fam()
+ self.setup = get_option_parser()
try:
- self.setup = sources[0].setup
self.ptype = sources[0].ptype
except IndexError:
- self.setup = None
self.ptype = "unknown"
@property
@@ -204,19 +202,6 @@ class Collection(list, Bcfg2.Server.Plugin.Debuggable):
return sorted(list(set(groups)))
@property
- def basegroups(self):
- """ Get a list of group names used by this Collection type in
- resolution of
- :ref:`server-plugins-generators-packages-magic-groups`.
-
- The base implementation simply aggregates the results of
- :attr:`Bcfg2.Server.Plugins.Packages.Source.Source.basegroups`."""
- groups = set()
- for source in self:
- groups.update(source.basegroups)
- return list(groups)
-
- @property
def cachefiles(self):
""" A list of the full path to all cachefiles used by this
collection.
@@ -229,7 +214,7 @@ class Collection(list, Bcfg2.Server.Plugin.Debuggable):
cachefiles.add(source.cachefile)
return list(cachefiles)
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def get_groups(self, grouplist):
""" Given a list of package group names, return a dict of
``<group name>: <list of packages>``. This method is provided
@@ -250,7 +235,7 @@ class Collection(list, Bcfg2.Server.Plugin.Debuggable):
rv[group] = self.get_group(group, ptype)
return rv
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def get_group(self, group, ptype=None):
""" Get the list of packages of the given type in a package
group.
@@ -386,20 +371,6 @@ class Collection(list, Bcfg2.Server.Plugin.Debuggable):
for source in self:
source.filter_unknown(unknown)
- def magic_groups_match(self):
- """ Returns True if the client's
- :ref:`server-plugins-generators-packages-magic-groups` match
- the magic groups for any of the sources contained in this
- Collection.
-
- The base implementation returns True if any source
- :func:`Bcfg2.Server.Plugins.Packages.Source.Source.magic_groups_match`
- returns True.
-
- :returns: bool
- """
- return any(s.magic_groups_match(self.metadata) for s in self)
-
def build_extra_structures(self, independent):
""" Add additional entries to the ``<Independent/>`` section
of the final configuration. This can be used to handle, e.g.,
@@ -499,7 +470,7 @@ class Collection(list, Bcfg2.Server.Plugin.Debuggable):
"""
return list(complete.difference(initial))
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def complete(self, packagelist): # pylint: disable=R0912,R0914
""" Build a complete list of all packages and their dependencies.
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Pac.py b/src/lib/Bcfg2/Server/Plugins/Packages/Pac.py
index 99aed5ce5..5f4d2ea41 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/Pac.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Pac.py
@@ -12,8 +12,7 @@ class PacCollection(Collection):
overrides nothing, and defers all operations to :class:`PacSource`
"""
- def __init__(self, metadata, sources, cachepath, basepath, fam,
- debug=False):
+ def __init__(self, metadata, sources, cachepath, basepath, debug=False):
# we define an __init__ that just calls the parent __init__,
# so that we can set the docstring on __init__ to something
# different from the parent __init__ -- namely, the parent
@@ -21,7 +20,7 @@ class PacCollection(Collection):
# which we use to delineate the actual docs from the
# .. autoattribute hacks we have to do to get private
# attributes included in sphinx 1.0 """
- Collection.__init__(self, metadata, sources, cachepath, basepath, fam,
+ Collection.__init__(self, metadata, sources, cachepath, basepath,
debug=debug)
__init__.__doc__ = Collection.__init__.__doc__.split(".. -----")[0]
@@ -29,10 +28,6 @@ class PacCollection(Collection):
class PacSource(Source):
""" Handle Pacman sources """
- #: :ref:`server-plugins-generators-packages-magic-groups` for
- #: ``PacSource`` are "arch" and "parabola"
- basegroups = ['arch', 'parabola']
-
#: PacSource sets the ``type`` on Package entries to "pacman"
ptype = 'pacman'
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py b/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py
index 332f0bbab..aa6127f57 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py
@@ -4,6 +4,8 @@
import os
import sys
import Bcfg2.Server.Plugin
+from Bcfg2.Options import get_option_parser
+from Bcfg2.Server.Statistics import track_statistics
from Bcfg2.Server.Plugins.Packages.Source import SourceInitError
@@ -19,7 +21,7 @@ class PackagesSources(Bcfg2.Server.Plugin.StructFile,
__identifier__ = None
create = "Sources"
- def __init__(self, filename, cachepath, fam, packages, setup):
+ def __init__(self, filename, cachepath, packages):
"""
:param filename: The full path to ``sources.xml``
:type filename: string
@@ -27,21 +29,16 @@ class PackagesSources(Bcfg2.Server.Plugin.StructFile,
:class:`Bcfg2.Server.Plugins.Packages.Source.Source`
data will be cached
:type cachepath: string
- :param fam: The file access monitor to use to create watches
- on ``sources.xml`` and any XIncluded files.
- :type fam: Bcfg2.Server.FileMonitor.FileMonitor
:param packages: The Packages plugin object ``sources.xml`` is
being parsed on behalf of (i.e., the calling
object)
:type packages: Bcfg2.Server.Plugins.Packages.Packages
- :param setup: A Bcfg2 options dict
- :type setup: dict
:raises: :class:`Bcfg2.Server.Plugin.exceptions.PluginInitError` -
If ``sources.xml`` cannot be read
"""
Bcfg2.Server.Plugin.Debuggable.__init__(self)
- Bcfg2.Server.Plugin.StructFile.__init__(self, filename, fam=fam,
+ Bcfg2.Server.Plugin.StructFile.__init__(self, filename,
should_monitor=True)
#: The full path to the directory where
@@ -58,7 +55,7 @@ class PackagesSources(Bcfg2.Server.Plugin.StructFile,
self.logger.error("Could not create Packages cache at %s: %s" %
(self.cachepath, err))
#: The Bcfg2 options dict
- self.setup = setup
+ self.setup = get_option_parser()
#: The :class:`Bcfg2.Server.Plugins.Packages.Packages` that
#: instantiated this ``PackagesSources`` object
@@ -107,7 +104,7 @@ class PackagesSources(Bcfg2.Server.Plugin.StructFile,
load its data. """
return sorted(list(self.parsed)) == sorted(self.extras)
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def Index(self):
Bcfg2.Server.Plugin.StructFile.Index(self)
self.entries = []
@@ -120,7 +117,7 @@ class PackagesSources(Bcfg2.Server.Plugin.StructFile,
``Index`` is responsible for calling :func:`source_from_xml`
for each ``Source`` tag in each file. """
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def source_from_xml(self, xsource):
""" Create a
:class:`Bcfg2.Server.Plugins.Packages.Source.Source` subclass
@@ -153,7 +150,7 @@ class PackagesSources(Bcfg2.Server.Plugin.StructFile,
return None
try:
- source = cls(self.cachepath, xsource, self.setup)
+ source = cls(self.cachepath, xsource)
except SourceInitError:
err = sys.exc_info()[1]
self.logger.error("Packages: %s" % err)
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Source.py b/src/lib/Bcfg2/Server/Plugins/Packages/Source.py
index 22073493c..767ac13ac 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/Source.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Source.py
@@ -27,7 +27,6 @@ in your ``Source`` subclass:
* :func:`Source.urls`
* :func:`Source.read_files`
-* :attr:`Source.basegroups`
Additionally, you may want to consider overriding the following
methods and attributes:
@@ -51,9 +50,11 @@ import os
import re
import sys
import Bcfg2.Server.Plugin
+from Bcfg2.Options import get_option_parser
from Bcfg2.Compat import HTTPError, HTTPBasicAuthHandler, \
HTTPPasswordMgrWithDefaultRealm, install_opener, build_opener, urlopen, \
cPickle, md5
+from Bcfg2.Server.Statistics import track_statistics
def fetch_url(url):
@@ -106,25 +107,18 @@ class Source(Bcfg2.Server.Plugin.Debuggable): # pylint: disable=R0902
those features.
"""
- #: The list of
- #: :ref:`server-plugins-generators-packages-magic-groups` that
- #: make sources of this type available to clients.
- basegroups = []
-
#: The Package type handled by this Source class. The ``type``
#: attribute of Package entries will be set to the value ``ptype``
#: when they are handled by :mod:`Bcfg2.Server.Plugins.Packages`.
ptype = None
- def __init__(self, basepath, xsource, setup): # pylint: disable=R0912
+ def __init__(self, basepath, xsource): # pylint: disable=R0912
"""
:param basepath: The base filesystem path under which cache
data for this source should be stored
:type basepath: string
:param xsource: The XML tag that describes this source
:type source: lxml.etree._Element
- :param setup: A Bcfg2 options dict
- :type setup: dict
:raises: :class:`Bcfg2.Server.Plugins.Packages.Source.SourceInitError`
"""
Bcfg2.Server.Plugin.Debuggable.__init__(self)
@@ -137,7 +131,7 @@ class Source(Bcfg2.Server.Plugin.Debuggable): # pylint: disable=R0902
self.xsource = xsource
#: A Bcfg2 options dict
- self.setup = setup
+ self.setup = get_option_parser()
#: A set of package names that are deemed "essential" by this
#: source
@@ -308,8 +302,7 @@ class Source(Bcfg2.Server.Plugin.Debuggable): # pylint: disable=R0902
:return: list of strings - group names
"""
return sorted(list(set([g for g in metadata.groups
- if (g in self.basegroups or
- g in self.groups or
+ if (g in self.groups or
g in self.arches)])))
def load_state(self):
@@ -332,7 +325,7 @@ class Source(Bcfg2.Server.Plugin.Debuggable): # pylint: disable=R0902
self.essentialpkgs), cache, 2)
cache.close()
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def setup_data(self, force_update=False):
""" Perform all data fetching and setup tasks. For most
backends, this involves downloading all metadata from the
@@ -636,16 +629,15 @@ class Source(Bcfg2.Server.Plugin.Debuggable): # pylint: disable=R0902
def applies(self, metadata):
""" Return true if this source applies to the given client,
- i.e., the client is in all necessary groups and
- :ref:`server-plugins-generators-packages-magic-groups`.
+ i.e., the client is in all necessary groups.
:param metadata: The client metadata to check to see if this
source applies
:type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
:returns: bool
"""
- # check base groups
- if not self.magic_groups_match(metadata):
+ # check arch groups
+ if not self.arch_groups_match(metadata):
return False
# check Group/Client tags from sources.xml
@@ -716,29 +708,13 @@ class Source(Bcfg2.Server.Plugin.Debuggable): # pylint: disable=R0902
"""
return []
- def magic_groups_match(self, metadata):
- """ Returns True if the client's
- :ref:`server-plugins-generators-packages-magic-groups` match
- the magic groups this source. Also returns True if magic
- groups are off in the configuration and the client's
- architecture matches (i.e., architecture groups are *always*
- checked).
+ def arch_groups_match(self, metadata):
+ """ Returns True if the client is in an arch group that
+ matches the arch of this source.
:returns: bool
"""
- found_arch = False
for arch in self.arches:
if arch in metadata.groups:
- found_arch = True
- break
- if not found_arch:
- return False
-
- if not self.setup.cfp.getboolean("packages", "magic_groups",
- default=False):
- return True
- else:
- for group in self.basegroups:
- if group in metadata.groups:
- return True
- return False
+ return True
+ return False
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py b/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py
index bb7caab0d..a4b17f05a 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py
@@ -58,8 +58,10 @@ import errno
import socket
import logging
import lxml.etree
-from subprocess import Popen, PIPE
+import Bcfg2.Server.FileMonitor
import Bcfg2.Server.Plugin
+from Bcfg2.Utils import Executor
+from Bcfg2.Options import get_option_parser
# pylint: disable=W0622
from Bcfg2.Compat import StringIO, cPickle, HTTPError, URLError, \
ConfigParser, any
@@ -67,6 +69,7 @@ from Bcfg2.Compat import StringIO, cPickle, HTTPError, URLError, \
from Bcfg2.Server.Plugins.Packages.Collection import Collection
from Bcfg2.Server.Plugins.Packages.Source import SourceInitError, Source, \
fetch_url
+from Bcfg2.Server.Statistics import track_statistics
LOGGER = logging.getLogger(__name__)
@@ -102,17 +105,12 @@ FL = '{http://linux.duke.edu/metadata/filelists}'
PULPSERVER = None
PULPCONFIG = None
-#: The path to bcfg2-yum-helper
-HELPER = None
-
-def _setup_pulp(setup):
+def _setup_pulp():
""" Connect to a Pulp server and pass authentication credentials.
This only needs to be called once, but multiple calls won't hurt
anything.
- :param setup: A Bcfg2 options dict
- :type setup: dict
:returns: :class:`pulp.client.api.server.PulpServer`
"""
global PULPSERVER, PULPCONFIG
@@ -123,6 +121,7 @@ def _setup_pulp(setup):
raise Bcfg2.Server.Plugin.PluginInitError(msg)
if PULPSERVER is None:
+ setup = get_option_parser()
try:
username = setup.cfp.get("packages:pulp", "username")
password = setup.cfp.get("packages:pulp", "password")
@@ -174,7 +173,7 @@ class PulpCertificateSet(Bcfg2.Server.Plugin.EntrySet):
#: The path to certificates on consumer machines
certpath = "/etc/pki/consumer/cert.pem"
- def __init__(self, path, fam):
+ def __init__(self, path):
"""
:param path: The path to the directory where Pulp consumer
certificates will be stored
@@ -192,7 +191,7 @@ class PulpCertificateSet(Bcfg2.Server.Plugin.EntrySet):
important='true',
sensitive='true',
paranoid=self.metadata['paranoid'])
- self.fam = fam
+ self.fam = Bcfg2.Server.FileMonitor.get_fam()
self.fam.AddMonitor(path, self)
def HandleEvent(self, event):
@@ -271,12 +270,12 @@ class YumCollection(Collection):
#: :class:`PulpCertificateSet` object used to handle Pulp certs
pulp_cert_set = None
- def __init__(self, metadata, sources, cachepath, basepath, fam,
- debug=False):
- Collection.__init__(self, metadata, sources, cachepath, basepath, fam,
+ def __init__(self, metadata, sources, cachepath, basepath, debug=False):
+ Collection.__init__(self, metadata, sources, cachepath, basepath,
debug=debug)
self.keypath = os.path.join(self.cachepath, "keys")
+ self._helper = None
if self.use_yum:
#: Define a unique cache file for this collection to use
#: for cached yum metadata
@@ -289,11 +288,13 @@ class YumCollection(Collection):
#: resolving packages with the Python yum libraries
self.cfgfile = os.path.join(self.cachefile, "yum.conf")
self.write_config()
+ self.cmd = Executor()
else:
self.cachefile = None
+ self.cmd = None
if HAS_PULP and self.has_pulp_sources:
- _setup_pulp(self.setup)
+ _setup_pulp()
if self.pulp_cert_set is None:
certdir = os.path.join(
self.basepath,
@@ -309,7 +310,7 @@ class YumCollection(Collection):
self.logger.error("Could not create Pulp consumer "
"cert directory at %s: %s" %
(certdir, err))
- self.pulp_cert_set = PulpCertificateSet(certdir, self.fam)
+ self.pulp_cert_set = PulpCertificateSet(certdir)
@property
def __package_groups__(self):
@@ -323,20 +324,18 @@ class YumCollection(Collection):
a call to it; I wish there was a way to do this without
forking, but apparently not); finally we check in /usr/sbin,
the default location. """
- global HELPER
- if not HELPER:
+ if not self._helper:
try:
- HELPER = self.setup.cfp.get("packages:yum", "helper")
+ self._helper = self.setup.cfp.get("packages:yum", "helper")
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
# first see if bcfg2-yum-helper is in PATH
try:
self.debug_log("Checking for bcfg2-yum-helper in $PATH")
- Popen(['bcfg2-yum-helper'],
- stdin=PIPE, stdout=PIPE, stderr=PIPE).wait()
- HELPER = 'bcfg2-yum-helper'
+ self.cmd.run(['bcfg2-yum-helper'])
+ self._helper = 'bcfg2-yum-helper'
except OSError:
- HELPER = "/usr/sbin/bcfg2-yum-helper"
- return HELPER
+ self._helper = "/usr/sbin/bcfg2-yum-helper"
+ return self._helper
@property
def use_yum(self):
@@ -361,7 +360,7 @@ class YumCollection(Collection):
cachefiles.add(self.cachefile)
return list(cachefiles)
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def write_config(self):
""" Write the server-side config file to :attr:`cfgfile` based
on the data from :func:`get_config`"""
@@ -463,7 +462,7 @@ class YumCollection(Collection):
return "# This config was generated automatically by the Bcfg2 " \
"Packages plugin\n\n" + buf.getvalue()
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def build_extra_structures(self, independent):
""" Add additional entries to the ``<Independent/>`` section
of the final configuration. This adds several kinds of
@@ -570,7 +569,7 @@ class YumCollection(Collection):
name=self.pulp_cert_set.certpath)
self.pulp_cert_set.bind_entry(crt, self.metadata)
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def _get_pulp_consumer(self, consumerapi=None):
""" Get a Pulp consumer object for the client.
@@ -599,7 +598,7 @@ class YumCollection(Collection):
"%s" % err)
return consumer
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def _add_gpg_instances(self, keyentry, localkey, remotekey, keydata=None):
""" Add GPG keys instances to a ``Package`` entry. This is
called from :func:`build_extra_structures` to add GPG keys to
@@ -642,7 +641,7 @@ class YumCollection(Collection):
self.logger.error("Packages: Could not read GPG key %s: %s" %
(localkey, err))
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def get_groups(self, grouplist):
""" If using the yum libraries, given a list of package group
names, return a dict of ``<group name>: <list of packages>``.
@@ -816,7 +815,7 @@ class YumCollection(Collection):
new.append(pkg)
return new
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def complete(self, packagelist):
""" Build a complete list of all packages and their dependencies.
@@ -856,7 +855,7 @@ class YumCollection(Collection):
else:
return set(), set()
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def call_helper(self, command, inputdata=None):
""" Make a call to :ref:`bcfg2-yum-helper`. The yum libs have
horrific memory leaks, so apparently the right way to get
@@ -879,28 +878,18 @@ class YumCollection(Collection):
cmd.append("-v")
cmd.append(command)
self.debug_log("Packages: running %s" % " ".join(cmd), flag=verbose)
- try:
- helper = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
- except OSError:
- err = sys.exc_info()[1]
- self.logger.error("Packages: Failed to execute %s: %s" %
- (" ".join(cmd), err))
- return None
-
if inputdata:
- idata = json.dumps(inputdata)
- (stdout, stderr) = helper.communicate(idata)
- else:
- (stdout, stderr) = helper.communicate()
- rv = helper.wait()
- if rv:
- self.logger.error("Packages: error running bcfg2-yum-helper "
- "(returned %d): %s" % (rv, stderr))
+ result = self.cmd.run(cmd, inputdata=json.dumps(inputdata))
else:
+ result = self.cmd.run(cmd)
+ if not result.success:
+ self.logger.error("Packages: error running bcfg2-yum-helper: %s" %
+ result.error)
+ elif result.stderr:
self.debug_log("Packages: debug info from bcfg2-yum-helper: %s" %
- stderr, flag=verbose)
+ result.stderr, flag=verbose)
try:
- return json.loads(stdout)
+ return json.loads(result.stdout)
except ValueError:
err = sys.exc_info()[1]
self.logger.error("Packages: error reading bcfg2-yum-helper "
@@ -943,20 +932,16 @@ class YumCollection(Collection):
class YumSource(Source):
""" Handle yum sources """
- #: :ref:`server-plugins-generators-packages-magic-groups` for
- #: ``YumSource`` are "yum", "redhat", "centos", and "fedora"
- basegroups = ['yum', 'redhat', 'centos', 'fedora']
-
#: YumSource sets the ``type`` on Package entries to "yum"
ptype = 'yum'
- def __init__(self, basepath, xsource, setup):
- Source.__init__(self, basepath, xsource, setup)
+ def __init__(self, basepath, xsource):
+ Source.__init__(self, basepath, xsource)
self.pulp_id = None
if HAS_PULP and xsource.get("pulp_id"):
self.pulp_id = xsource.get("pulp_id")
- _setup_pulp(self.setup)
+ _setup_pulp()
repoapi = RepositoryAPI()
try:
self.repo = repoapi.repository(self.pulp_id)
@@ -1083,7 +1068,7 @@ class YumSource(Source):
self.file_to_arch[self.escape_url(fullurl)] = arch
return urls
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def read_files(self):
""" When using the builtin yum parser, read and parse locally
downloaded metadata files. This diverges from the stock
@@ -1131,7 +1116,7 @@ class YumSource(Source):
self.packages[key].difference(self.packages['global'])
self.save_state()
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def parse_filelist(self, data, arch):
""" parse filelists.xml.gz data """
if arch not in self.filemap:
@@ -1145,7 +1130,7 @@ class YumSource(Source):
self.filemap[arch][fentry.text] = \
set([pkg.get('name')])
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def parse_primary(self, data, arch):
""" parse primary.xml.gz data """
if arch not in self.packages:
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py b/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py
index f82b8a392..8c272cf53 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py
@@ -13,6 +13,7 @@ from Bcfg2.Compat import ConfigParser, urlopen, HTTPError, URLError
from Bcfg2.Server.Plugins.Packages.Collection import Collection, \
get_collection_class
from Bcfg2.Server.Plugins.Packages.PackagesSources import PackagesSources
+from Bcfg2.Server.Statistics import track_statistics
#: The default path for generated yum configs
YUM_CONFIG_DEFAULT = "/etc/yum.repos.d/bcfg2.repo"
@@ -65,14 +66,6 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
# create key directory if needed
os.makedirs(self.keypath)
- # warn about deprecated magic groups
- if self.core.setup.cfp.getboolean("packages", "magic_groups",
- default=False):
- self.logger.warning("Packages: Magic groups are deprecated and "
- "will be removed in a future release")
- self.logger.warning("You can disable magic groups by setting "
- "magic_groups=0 in [packages] in bcfg2.conf")
-
# pylint: disable=C0301
#: The
#: :class:`Bcfg2.Server.Plugins.Packages.PackagesSources.PackagesSources`
@@ -80,8 +73,7 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
#: :class:`Bcfg2.Server.Plugins.Packages.Source.Source` objects for
#: this plugin.
self.sources = PackagesSources(os.path.join(self.data, "sources.xml"),
- self.cachepath, core.fam, self,
- self.core.setup)
+ self.cachepath, self)
#: We cache
#: :class:`Bcfg2.Server.Plugins.Packages.Collection.Collection`
@@ -239,13 +231,7 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
:raises: :class:`Bcfg2.Server.Plugin.exceptions.PluginExecutionError`
"""
if entry.tag == 'Package':
- if self.core.setup.cfp.getboolean("packages", "magic_groups",
- default=False):
- collection = self.get_collection(metadata)
- if collection.magic_groups_match():
- return True
- else:
- return True
+ return True
elif entry.tag == 'Path':
# managed entries for yum/apt configs
if (entry.get("name") ==
@@ -259,7 +245,7 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
return True
return False
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def validate_structures(self, metadata, structures):
""" Do the real work of Packages. This does two things:
@@ -294,7 +280,7 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
collection.build_extra_structures(indep)
structures.append(indep)
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def _build_packages(self, metadata, independent, structures,
collection=None):
""" Perform dependency resolution and build the complete list
@@ -372,7 +358,7 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
newpkgs.sort()
collection.packages_to_entry(newpkgs, independent)
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def Refresh(self):
""" Packages.Refresh() => True|False
@@ -380,7 +366,7 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
self._load_config(force_update=True)
return True
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def Reload(self):
""" Packages.Refresh() => True|False
@@ -476,7 +462,7 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
if kfile not in keyfiles:
os.unlink(kfile)
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def get_collection(self, metadata):
""" Get a
:class:`Bcfg2.Server.Plugins.Packages.Collection.Collection`
@@ -493,8 +479,7 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
if not self.sources.loaded:
# if sources.xml has not received a FAM event yet, defer;
# instantiate a dummy Collection object
- return Collection(metadata, [], self.cachepath, self.data,
- self.core.fam)
+ return Collection(metadata, [], self.cachepath, self.data)
if metadata.hostname in self.clients:
return self.collections[self.clients[metadata.hostname]]
@@ -525,7 +510,7 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
"for %s" % (cclass.__name__, metadata.hostname))
collection = cclass(metadata, relevant, self.cachepath, self.data,
- self.core.fam, debug=self.debug_flag)
+ debug=self.debug_flag)
ckey = collection.cachekey
if cclass != Collection:
self.clients[metadata.hostname] = ckey
diff --git a/src/lib/Bcfg2/Server/Plugins/Pkgmgr.py b/src/lib/Bcfg2/Server/Plugins/Pkgmgr.py
index a1dcb575f..293ec8e1a 100644
--- a/src/lib/Bcfg2/Server/Plugins/Pkgmgr.py
+++ b/src/lib/Bcfg2/Server/Plugins/Pkgmgr.py
@@ -2,23 +2,20 @@
import os
import re
+import sys
import glob
import logging
import lxml.etree
-import Bcfg2.Server.Plugin
import Bcfg2.Server.Lint
+import Bcfg2.Server.Plugin
+from Bcfg2.Server.Plugin import PluginExecutionError
-try:
- set
-except NameError:
- # deprecated since python 2.6
- from sets import Set as set
logger = logging.getLogger('Bcfg2.Plugins.Pkgmgr')
class FuzzyDict(dict):
- fuzzy = re.compile('(?P<name>.*):(?P<alist>\S+(,\S+)*)')
+ fuzzy = re.compile(r'(?P<name>.*):(?P<alist>\S+(,\S+)*)')
def __getitem__(self, key):
if isinstance(key, str):
@@ -47,95 +44,217 @@ class FuzzyDict(dict):
raise
-class PNode(Bcfg2.Server.Plugin.INode):
+class PNode(object):
"""PNode has a list of packages available at a
particular group intersection.
"""
- splitters = {'rpm': re.compile('^(.*/)?(?P<name>[\w\+\d\.]+(-[\w\+\d\.]+)*)-' + \
- '(?P<version>[\w\d\.]+-([\w\d\.]+))\.(?P<arch>\S+)\.rpm$'),
- 'encap': re.compile('^(?P<name>[\w-]+)-(?P<version>[\w\d\.+-]+).encap.*$')}
+ splitters = dict(
+ rpm=re.compile(
+ r'^(.*/)?(?P<name>[\w\+\d\.]+(-[\w\+\d\.]+)*)-' +
+ r'(?P<version>[\w\d\.]+-([\w\d\.]+))\.(?P<arch>\S+)\.rpm$'),
+ encap=re.compile(
+ r'^(?P<name>[\w-]+)-(?P<version>[\w\d\.+-]+).encap.*$'))
+ raw = dict(
+ Client="lambda m, e:'%(name)s' == m.hostname and predicate(m, e)",
+ Group="lambda m, e:'%(name)s' in m.groups and predicate(m, e)")
+ nraw = dict(
+ Client="lambda m, e:'%(name)s' != m.hostname and predicate(m, e)",
+ Group="lambda m, e:'%(name)s' not in m.groups and predicate(m, e)")
+ containers = ['Group', 'Client']
ignore = ['Package']
- def Match(self, metadata, data, entry=lxml.etree.Element("None")):
- """Return a dictionary of package mappings."""
- if self.predicate(metadata, entry):
- for key in self.contents:
- try:
- data[key].update(self.contents[key])
- except:
- data[key] = FuzzyDict()
- data[key].update(self.contents[key])
- for child in self.children:
- child.Match(metadata, data)
-
def __init__(self, data, pdict, parent=None):
# copy local attributes to all child nodes if no local attribute exists
if 'Package' not in pdict:
pdict['Package'] = set()
for child in data.getchildren():
- attrs = set(data.attrib.keys()).difference(child.attrib.keys() + ['name'])
+ attrs = set(data.attrib.keys()).difference(
+ child.attrib.keys() + ['name'])
for attr in attrs:
try:
child.set(attr, data.get(attr))
except:
- # don't fail on things like comments and other immutable elements
+ # don't fail on things like comments and other
+ # immutable elements
pass
- Bcfg2.Server.Plugin.INode.__init__(self, data, pdict, parent)
+ self.data = data
+ self.contents = {}
+ if parent is None:
+ self.predicate = lambda m, e: True
+ else:
+ predicate = parent.predicate
+ if data.get('negate', 'false').lower() == 'true':
+ psrc = self.nraw
+ else:
+ psrc = self.raw
+ if data.tag in list(psrc.keys()):
+ self.predicate = eval(psrc[data.tag] %
+ {'name': data.get('name')},
+ {'predicate': predicate})
+ else:
+ raise PluginExecutionError("Unknown tag: %s" % data.tag)
+ self.children = []
+ self._load_children(data, pdict)
+
if 'Package' not in self.contents:
self.contents['Package'] = FuzzyDict()
for pkg in data.findall('./Package'):
- if 'name' in pkg.attrib and pkg.get('name') not in pdict['Package']:
+ if ('name' in pkg.attrib and
+ pkg.get('name') not in pdict['Package']):
pdict['Package'].add(pkg.get('name'))
- if pkg.get('name') != None:
+ if pkg.get('name') is not None:
self.contents['Package'][pkg.get('name')] = {}
if pkg.getchildren():
self.contents['Package'][pkg.get('name')]['__children__'] \
- = pkg.getchildren()
+ = pkg.getchildren()
if 'simplefile' in pkg.attrib:
- pkg.set('url', "%s/%s" % (pkg.get('uri'), pkg.get('simplefile')))
+ pkg.set('url',
+ "%s/%s" % (pkg.get('uri'), pkg.get('simplefile')))
self.contents['Package'][pkg.get('name')].update(pkg.attrib)
else:
if 'file' in pkg.attrib:
if 'multiarch' in pkg.attrib:
archs = pkg.get('multiarch').split()
srcs = pkg.get('srcs', pkg.get('multiarch')).split()
- url = ' '.join(["%s/%s" % (pkg.get('uri'),
- pkg.get('file') % {'src':srcs[idx],
- 'arch':archs[idx]})
- for idx in range(len(archs))])
+ url = ' '.join(
+ ["%s/%s" % (pkg.get('uri'),
+ pkg.get('file') % {'src': srcs[idx],
+ 'arch': archs[idx]})
+ for idx in range(len(archs))])
pkg.set('url', url)
else:
pkg.set('url', '%s/%s' % (pkg.get('uri'),
pkg.get('file')))
- if pkg.get('type') in self.splitters and pkg.get('file') != None:
- mdata = self.splitters[pkg.get('type')].match(pkg.get('file'))
+ if (pkg.get('type') in self.splitters and
+ pkg.get('file') is not None):
+ mdata = \
+ self.splitters[pkg.get('type')].match(pkg.get('file'))
if not mdata:
- logger.error("Failed to match pkg %s" % pkg.get('file'))
+ logger.error("Failed to match pkg %s" %
+ pkg.get('file'))
continue
pkgname = mdata.group('name')
self.contents['Package'][pkgname] = mdata.groupdict()
self.contents['Package'][pkgname].update(pkg.attrib)
if pkg.attrib.get('file'):
- self.contents['Package'][pkgname]['url'] = pkg.get('url')
- self.contents['Package'][pkgname]['type'] = pkg.get('type')
+ self.contents['Package'][pkgname]['url'] = \
+ pkg.get('url')
+ self.contents['Package'][pkgname]['type'] = \
+ pkg.get('type')
if pkg.get('verify'):
- self.contents['Package'][pkgname]['verify'] = pkg.get('verify')
+ self.contents['Package'][pkgname]['verify'] = \
+ pkg.get('verify')
if pkg.get('multiarch'):
- self.contents['Package'][pkgname]['multiarch'] = pkg.get('multiarch')
+ self.contents['Package'][pkgname]['multiarch'] = \
+ pkg.get('multiarch')
if pkgname not in pdict['Package']:
pdict['Package'].add(pkgname)
if pkg.getchildren():
- self.contents['Package'][pkgname]['__children__'] = pkg.getchildren()
+ self.contents['Package'][pkgname]['__children__'] = \
+ pkg.getchildren()
else:
- self.contents['Package'][pkg.get('name')].update(pkg.attrib)
+ self.contents['Package'][pkg.get('name')].update(
+ pkg.attrib)
+
+ def _load_children(self, data, idict):
+ """ load children """
+ for item in data.getchildren():
+ if item.tag in self.ignore:
+ continue
+ elif item.tag in self.containers:
+ self.children.append(self.__class__(item, idict, self))
+ else:
+ try:
+ self.contents[item.tag][item.get('name')] = \
+ dict(item.attrib)
+ except KeyError:
+ self.contents[item.tag] = \
+ {item.get('name'): dict(item.attrib)}
+ if item.text:
+ self.contents[item.tag][item.get('name')]['__text__'] = \
+ item.text
+ if item.getchildren():
+ self.contents[item.tag][item.get('name')]['__children__'] \
+ = item.getchildren()
+ try:
+ idict[item.tag].append(item.get('name'))
+ except KeyError:
+ idict[item.tag] = [item.get('name')]
+ def Match(self, metadata, data, entry=lxml.etree.Element("None")):
+ """Return a dictionary of package mappings."""
+ if self.predicate(metadata, entry):
+ for key in self.contents:
+ try:
+ data[key].update(self.contents[key])
+ except: # pylint: disable=W0702
+ data[key] = FuzzyDict()
+ data[key].update(self.contents[key])
+ for child in self.children:
+ child.Match(metadata, data)
-class PkgSrc(Bcfg2.Server.Plugin.XMLSrc):
- """PkgSrc files contain a PNode hierarchy that
- returns matching package entries.
- """
+
+class PkgSrc(Bcfg2.Server.Plugin.XMLFileBacked):
+ """ XMLSrc files contain a
+ :class:`Bcfg2.Server.Plugin.helpers.INode` hierarchy that returns
+ matching entries. XMLSrc objects are deprecated and
+ :class:`Bcfg2.Server.Plugin.helpers.StructFile` should be
+ preferred where possible."""
__node__ = PNode
__cacheobj__ = FuzzyDict
+ __priority_required__ = True
+
+ def __init__(self, filename, should_monitor=False):
+ Bcfg2.Server.Plugin.XMLFileBacked.__init__(self, filename,
+ should_monitor)
+ self.items = {}
+ self.cache = None
+ self.pnode = None
+ self.priority = -1
+
+ def HandleEvent(self, _=None):
+ """Read file upon update."""
+ try:
+ data = open(self.name).read()
+ except IOError:
+ msg = "Failed to read file %s: %s" % (self.name, sys.exc_info()[1])
+ logger.error(msg)
+ raise PluginExecutionError(msg)
+ self.items = {}
+ try:
+ xdata = lxml.etree.XML(data, parser=Bcfg2.Server.XMLParser)
+ except lxml.etree.XMLSyntaxError:
+ msg = "Failed to parse file %s: %s" % (self.name,
+ sys.exc_info()[1])
+ logger.error(msg)
+ raise PluginExecutionError(msg)
+ self.pnode = self.__node__(xdata, self.items)
+ self.cache = None
+ try:
+ self.priority = int(xdata.get('priority'))
+ except (ValueError, TypeError):
+ if self.__priority_required__:
+ msg = "Got bogus priority %s for file %s" % \
+ (xdata.get('priority'), self.name)
+ logger.error(msg)
+ raise PluginExecutionError(msg)
+
+ del xdata, data
+
+ def Cache(self, metadata):
+ """Build a package dict for a given host."""
+ if self.cache is None or self.cache[0] != metadata:
+ cache = (metadata, self.__cacheobj__())
+ if self.pnode is None:
+ logger.error("Cache method called early for %s; "
+ "forcing data load" % self.name)
+ self.HandleEvent()
+ return
+ self.pnode.Match(metadata, cache[1])
+ self.cache = cache
+
+ def __str__(self):
+ return str(self.items)
class Pkgmgr(Bcfg2.Server.Plugin.PrioDir):
@@ -165,12 +284,14 @@ class Pkgmgr(Bcfg2.Server.Plugin.PrioDir):
mdata = FuzzyDict.fuzzy.match(pname)
if mdata:
arches = mdata.group('alist').split(',')
- [entry.remove(inst) for inst in \
- entry.findall('Instance') \
- if inst.get('arch') not in arches]
+ for inst in entry.findall('Instance'):
+ if inst.get('arch') not in arches:
+ entry.remove(inst)
def HandlesEntry(self, entry, metadata):
- return entry.tag == 'Package' and entry.get('name').split(':')[0] in list(self.Entries['Package'].keys())
+ return (
+ entry.tag == 'Package' and
+ entry.get('name').split(':')[0] in self.Entries['Package'].keys())
def HandleEntry(self, entry, metadata):
self.BindEntry(entry, metadata)
diff --git a/src/lib/Bcfg2/Server/Plugins/Probes.py b/src/lib/Bcfg2/Server/Plugins/Probes.py
index f8baddb4b..e97607093 100644
--- a/src/lib/Bcfg2/Server/Plugins/Probes.py
+++ b/src/lib/Bcfg2/Server/Plugins/Probes.py
@@ -9,6 +9,8 @@ import operator
import lxml.etree
import Bcfg2.Server
import Bcfg2.Server.Plugin
+import Bcfg2.Server.FileMonitor
+from Bcfg2.Server.Statistics import track_statistics
try:
from django.db import models
@@ -117,12 +119,12 @@ class ProbeSet(Bcfg2.Server.Plugin.EntrySet):
bangline = re.compile(r'^#!\s*(?P<interpreter>.*)$')
basename_is_regex = True
- def __init__(self, path, fam, encoding, plugin_name):
+ def __init__(self, path, encoding, plugin_name):
self.plugin_name = plugin_name
Bcfg2.Server.Plugin.EntrySet.__init__(self, r'[0-9A-Za-z_\-]+', path,
Bcfg2.Server.Plugin.SpecificData,
encoding)
- fam.AddMonitor(path, self)
+ Bcfg2.Server.FileMonitor.get_fam().AddMonitor(path, self)
def HandleEvent(self, event):
""" handle events on everything but probed.xml """
@@ -191,7 +193,7 @@ class Probes(Bcfg2.Server.Plugin.Probing,
Bcfg2.Server.Plugin.DatabaseBacked.__init__(self, core, datastore)
try:
- self.probes = ProbeSet(self.data, core.fam, core.setup['encoding'],
+ self.probes = ProbeSet(self.data, core.setup['encoding'],
self.name)
except:
err = sys.exc_info()[1]
@@ -202,7 +204,7 @@ class Probes(Bcfg2.Server.Plugin.Probing,
self.load_data()
__init__.__doc__ = Bcfg2.Server.Plugin.DatabaseBacked.__init__.__doc__
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def write_data(self, client):
""" Write probe data out for use with bcfg2-info """
if self._use_db:
@@ -306,12 +308,12 @@ class Probes(Bcfg2.Server.Plugin.Probing,
self.cgroups[pgroup.hostname] = []
self.cgroups[pgroup.hostname].append(pgroup.group)
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def GetProbes(self, meta):
return self.probes.get_probe_data(meta)
GetProbes.__doc__ = Bcfg2.Server.Plugin.Probing.GetProbes.__doc__
- @Bcfg2.Server.Plugin.track_statistics()
+ @track_statistics()
def ReceiveData(self, client, datalist):
if self.core.metadata_cache_mode in ['cautious', 'aggressive']:
if client.hostname in self.cgroups:
diff --git a/src/lib/Bcfg2/Server/Plugins/Properties.py b/src/lib/Bcfg2/Server/Plugins/Properties.py
index e97f66675..8e54da19b 100644
--- a/src/lib/Bcfg2/Server/Plugins/Properties.py
+++ b/src/lib/Bcfg2/Server/Plugins/Properties.py
@@ -7,13 +7,9 @@ import sys
import copy
import logging
import lxml.etree
+from Bcfg2.Options import get_option_parser
import Bcfg2.Server.Plugin
from Bcfg2.Server.Plugin import PluginExecutionError
-try:
- import Bcfg2.Encryption
- HAS_CRYPTO = True
-except ImportError:
- HAS_CRYPTO = False
try:
import json
@@ -33,8 +29,6 @@ except ImportError:
LOGGER = logging.getLogger(__name__)
-SETUP = None
-
class PropertyFile(object):
""" Base Properties file handler """
@@ -46,13 +40,14 @@ class PropertyFile(object):
.. automethod:: _write
"""
self.name = name
+ self.setup = get_option_parser()
def write(self):
""" Write the data in this data structure back to the property
file. This public method performs checking to ensure that
writing is possible and then calls :func:`_write`. """
- if not SETUP.cfp.getboolean("properties", "writes_enabled",
- default=True):
+ if not self.setup.cfp.getboolean("properties", "writes_enabled",
+ default=True):
msg = "Properties files write-back is disabled in the " + \
"configuration"
LOGGER.error(msg)
@@ -88,8 +83,8 @@ class PropertyFile(object):
class JSONPropertyFile(Bcfg2.Server.Plugin.FileBacked, PropertyFile):
""" Handle JSON Properties files. """
- def __init__(self, name, fam=None):
- Bcfg2.Server.Plugin.FileBacked.__init__(self, name, fam=fam)
+ def __init__(self, name):
+ Bcfg2.Server.Plugin.FileBacked.__init__(self, name)
PropertyFile.__init__(self, name)
self.json = None
__init__.__doc__ = Bcfg2.Server.Plugin.FileBacked.__init__.__doc__
@@ -127,8 +122,8 @@ class JSONPropertyFile(Bcfg2.Server.Plugin.FileBacked, PropertyFile):
class YAMLPropertyFile(Bcfg2.Server.Plugin.FileBacked, PropertyFile):
""" Handle YAML Properties files. """
- def __init__(self, name, fam=None):
- Bcfg2.Server.Plugin.FileBacked.__init__(self, name, fam=fam)
+ def __init__(self, name):
+ Bcfg2.Server.Plugin.FileBacked.__init__(self, name)
PropertyFile.__init__(self, name)
self.yaml = None
__init__.__doc__ = Bcfg2.Server.Plugin.FileBacked.__init__.__doc__
@@ -166,8 +161,8 @@ class YAMLPropertyFile(Bcfg2.Server.Plugin.FileBacked, PropertyFile):
class XMLPropertyFile(Bcfg2.Server.Plugin.StructFile, PropertyFile):
""" Handle XML Properties files. """
- def __init__(self, name, fam=None, should_monitor=False):
- Bcfg2.Server.Plugin.StructFile.__init__(self, name, fam=fam,
+ def __init__(self, name, should_monitor=False):
+ Bcfg2.Server.Plugin.StructFile.__init__(self, name,
should_monitor=should_monitor)
PropertyFile.__init__(self, name)
__init__.__doc__ = Bcfg2.Server.Plugin.StructFile.__init__.__doc__
@@ -203,53 +198,8 @@ class XMLPropertyFile(Bcfg2.Server.Plugin.StructFile, PropertyFile):
return True
validate_data.__doc__ = PropertyFile.validate_data.__doc__
- def Index(self):
- Bcfg2.Server.Plugin.StructFile.Index(self)
- if HAS_CRYPTO:
- strict = self.xdata.get(
- "decrypt",
- SETUP.cfp.get(Bcfg2.Encryption.CFG_SECTION, "decrypt",
- default="strict")) == "strict"
- for el in self.xdata.xpath("//*[@encrypted]"):
- try:
- el.text = self._decrypt(el).encode('ascii',
- 'xmlcharrefreplace')
- except UnicodeDecodeError:
- LOGGER.info("Properties: Decrypted %s to gibberish, "
- "skipping" % el.tag)
- except Bcfg2.Encryption.EVPError:
- msg = "Properties: Failed to decrypt %s element in %s" % \
- (el.tag, self.name)
- if strict:
- raise PluginExecutionError(msg)
- else:
- LOGGER.warning(msg)
- Index.__doc__ = Bcfg2.Server.Plugin.StructFile.Index.__doc__
-
- def _decrypt(self, element):
- """ Decrypt a single encrypted properties file element """
- if not element.text or not element.text.strip():
- return
- passes = Bcfg2.Encryption.get_passphrases(SETUP)
- try:
- passphrase = passes[element.get("encrypted")]
- try:
- return Bcfg2.Encryption.ssl_decrypt(
- element.text, passphrase,
- algorithm=Bcfg2.Encryption.get_algorithm(SETUP))
- except Bcfg2.Encryption.EVPError:
- # error is raised below
- pass
- except KeyError:
- # bruteforce_decrypt raises an EVPError with a sensible
- # error message, so we just let it propagate up the stack
- return Bcfg2.Encryption.bruteforce_decrypt(
- element.text, passphrases=passes.values(),
- algorithm=Bcfg2.Encryption.get_algorithm(SETUP))
- raise Bcfg2.Encryption.EVPError("Failed to decrypt")
-
def get_additional_data(self, metadata):
- if SETUP.cfp.getboolean("properties", "automatch", default=False):
+ if self.setup.cfp.getboolean("properties", "automatch", default=False):
default_automatch = "true"
else:
default_automatch = "false"
@@ -290,11 +240,9 @@ class Properties(Bcfg2.Server.Plugin.Plugin,
ignore = re.compile(r'.*\.xsd$')
def __init__(self, core, datastore):
- global SETUP # pylint: disable=W0603
Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
Bcfg2.Server.Plugin.Connector.__init__(self)
- Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data, core.fam)
- SETUP = core.setup
+ Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data)
#: Instead of creating children of this object with a static
#: object, we use :func:`property_dispatcher` to create a
@@ -302,23 +250,21 @@ class Properties(Bcfg2.Server.Plugin.Plugin,
self.__child__ = self.property_dispatcher
__init__.__doc__ = Bcfg2.Server.Plugin.Plugin.__init__.__doc__
- def property_dispatcher(self, fname, fam):
+ def property_dispatcher(self, fname):
""" Dispatch an event on a Properties file to the
appropriate object.
:param fname: The name of the file that received the event
:type fname: string
- :param fam: The file monitor the event was received by
- :type fam: Bcfg2.Server.FileMonitor.FileMonitor
:returns: An object of the appropriate subclass of
:class:`PropertyFile`
"""
if fname.endswith(".xml"):
- return XMLPropertyFile(fname, fam)
+ return XMLPropertyFile(fname)
elif HAS_JSON and fname.endswith(".json"):
- return JSONPropertyFile(fname, fam)
+ return JSONPropertyFile(fname)
elif HAS_YAML and (fname.endswith(".yaml") or fname.endswith(".yml")):
- return YAMLPropertyFile(fname, fam)
+ return YAMLPropertyFile(fname)
else:
raise Bcfg2.Server.Plugin.PluginExecutionError(
"Properties: Unknown extension %s" % fname)
diff --git a/src/lib/Bcfg2/Server/Plugins/PuppetENC.py b/src/lib/Bcfg2/Server/Plugins/PuppetENC.py
index 801e7006d..3b367573b 100644
--- a/src/lib/Bcfg2/Server/Plugins/PuppetENC.py
+++ b/src/lib/Bcfg2/Server/Plugins/PuppetENC.py
@@ -4,7 +4,7 @@ import os
import sys
import Bcfg2.Server
import Bcfg2.Server.Plugin
-from subprocess import Popen, PIPE
+from Bcfg2.Utils import Executor
try:
from syck import load as yaml_load, error as yaml_error
@@ -28,16 +28,15 @@ class PuppetENC(Bcfg2.Server.Plugin.Plugin,
Bcfg2.Server.Plugin.DirectoryBacked):
""" A plugin to run Puppet external node classifiers
(http://docs.puppetlabs.com/guides/external_nodes.html) """
- experimental = True
__child__ = PuppetENCFile
def __init__(self, core, datastore):
Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
Bcfg2.Server.Plugin.Connector.__init__(self)
Bcfg2.Server.Plugin.ClientRunHooks.__init__(self)
- Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data,
- self.core.fam)
+ Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data)
self.cache = dict()
+ self.cmd = Executor()
def _run_encs(self, metadata):
""" Run all Puppet ENCs """
@@ -46,20 +45,17 @@ class PuppetENC(Bcfg2.Server.Plugin.Plugin,
epath = os.path.join(self.data, enc)
self.debug_log("PuppetENC: Running ENC %s for %s" %
(enc, metadata.hostname))
- proc = Popen([epath, metadata.hostname], stdin=PIPE, stdout=PIPE,
- stderr=PIPE)
- (out, err) = proc.communicate()
- rv = proc.wait()
- if rv != 0:
- msg = "PuppetENC: Error running ENC %s for %s (%s): %s" % \
- (enc, metadata.hostname, rv, err)
+ result = self.cmd.run([epath, metadata.hostname])
+ if not result.success:
+ msg = "PuppetENC: Error running ENC %s for %s: %s" % \
+ (enc, metadata.hostname, result.error)
self.logger.error(msg)
raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
- if err:
- self.debug_log("ENC Error: %s" % err)
+ if result.stderr:
+ self.debug_log("ENC Error: %s" % result.stderr)
try:
- yaml = yaml_load(out)
+ yaml = yaml_load(result.stdout)
self.debug_log("Loaded data from %s for %s: %s" %
(enc, metadata.hostname, yaml))
except yaml_error:
@@ -69,13 +65,7 @@ class PuppetENC(Bcfg2.Server.Plugin.Plugin,
self.logger.error(msg)
raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
- groups = dict()
- if "classes" in yaml:
- # stock Puppet ENC output format
- groups = yaml['classes']
- elif "groups" in yaml:
- # more Bcfg2-ish output format
- groups = yaml['groups']
+ groups = yaml.get("classes", yaml.get("groups", dict()))
if groups:
if isinstance(groups, list):
self.debug_log("ENC %s adding groups to %s: %s" %
diff --git a/src/lib/Bcfg2/Server/Plugins/Rules.py b/src/lib/Bcfg2/Server/Plugins/Rules.py
index 21862c5db..3d4e8671d 100644
--- a/src/lib/Bcfg2/Server/Plugins/Rules.py
+++ b/src/lib/Bcfg2/Server/Plugins/Rules.py
@@ -18,32 +18,25 @@ class Rules(Bcfg2.Server.Plugin.PrioDir):
self.Entries[entry.tag].keys())
return False
- def BindEntry(self, entry, metadata):
- attrs = self.get_attrs(entry, metadata)
- for key, val in list(attrs.items()):
- if key not in entry.attrib:
- entry.attrib[key] = val
+ HandleEntry = Bcfg2.Server.Plugin.PrioDir.BindEntry
- HandleEntry = BindEntry
-
- def _matches(self, entry, metadata, rules):
- if Bcfg2.Server.Plugin.PrioDir._matches(self, entry, metadata, rules):
+ def _matches(self, entry, metadata, candidate):
+ if Bcfg2.Server.Plugin.PrioDir._matches(self, entry, metadata,
+ candidate):
return True
elif (entry.tag == "Path" and
- ((entry.get('name').endswith("/") and
- entry.get('name').rstrip("/") in rules) or
- (not entry.get('name').endswith("/") and
- entry.get('name') + '/' in rules))):
+ entry.get('name').rstrip("/") ==
+ candidate.get("name").rstrip("/")):
# special case for Path tags:
# http://trac.mcs.anl.gov/projects/bcfg2/ticket/967
return True
elif self._regex_enabled:
# attempt regular expression matching
- for rule in rules:
- if rule not in self._regex_cache:
- self._regex_cache[rule] = re.compile("%s$" % rule)
- if self._regex_cache[rule].match(entry.get('name')):
- return True
+ rule = candidate.get("name")
+ if rule not in self._regex_cache:
+ self._regex_cache[rule] = re.compile("%s$" % rule)
+ if self._regex_cache[rule].match(entry.get('name')):
+ return True
return False
@property
diff --git a/src/lib/Bcfg2/Server/Plugins/SEModules.py b/src/lib/Bcfg2/Server/Plugins/SEModules.py
index fa47f9496..248b662f9 100644
--- a/src/lib/Bcfg2/Server/Plugins/SEModules.py
+++ b/src/lib/Bcfg2/Server/Plugins/SEModules.py
@@ -43,9 +43,6 @@ class SEModules(Bcfg2.Server.Plugin.GroupSpool):
#: SEModules manages ``SEModule`` entries
entry_type = 'SEModule'
- #: The SEModules plugin is experimental
- experimental = True
-
def _get_module_filename(self, entry):
""" GroupSpool stores entries as /foo.pp, but we want people
to be able to specify module entries as name='foo' or
diff --git a/src/lib/Bcfg2/Server/Plugins/SSHbase.py b/src/lib/Bcfg2/Server/Plugins/SSHbase.py
index d8b3104b7..84dcf2780 100644
--- a/src/lib/Bcfg2/Server/Plugins/SSHbase.py
+++ b/src/lib/Bcfg2/Server/Plugins/SSHbase.py
@@ -8,8 +8,8 @@ import shutil
import logging
import tempfile
from itertools import chain
-from subprocess import Popen, PIPE
import Bcfg2.Server.Plugin
+from Bcfg2.Utils import Executor
from Bcfg2.Server.Plugin import PluginExecutionError
from Bcfg2.Compat import any, u_str, b64encode # pylint: disable=W0622
@@ -20,9 +20,7 @@ class KeyData(Bcfg2.Server.Plugin.SpecificData):
""" class to handle key data for HostKeyEntrySet """
def __init__(self, name, specific, encoding):
- Bcfg2.Server.Plugin.SpecificData.__init__(self,
- name,
- specific,
+ Bcfg2.Server.Plugin.SpecificData.__init__(self, name, specific,
encoding)
self.encoding = encoding
@@ -135,7 +133,8 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
# do so once
self.badnames = dict()
- core.fam.AddMonitor(self.data, self)
+ self.fam = Bcfg2.Server.FileMonitor.get_fam()
+ self.fam.AddMonitor(self.data, self)
self.static = dict()
self.entries = dict()
@@ -149,6 +148,8 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
HostKeyEntrySet(keypattern, self.data)
self.Entries['Path']["/etc/ssh/" + keypattern] = self.build_hk
+ self.cmd = Executor()
+
def get_skn(self):
"""Build memory cache of the ssh known hosts file."""
if not self.__skn:
@@ -257,7 +258,7 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
self.skn = False
return
- if event.filename in ['info', 'info.xml', ':info']:
+ if event.filename == 'info.xml':
for entry in list(self.entries.values()):
entry.handle_event(event)
return
@@ -279,12 +280,13 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
(event.filename, action))
def get_ipcache_entry(self, client):
- """Build a cache of dns results."""
+ """ Build a cache of dns results. """
if client in self.ipcache:
if self.ipcache[client]:
return self.ipcache[client]
else:
- raise socket.gaierror
+ raise PluginExecutionError("No cached IP address for %s" %
+ client)
else:
# need to add entry
try:
@@ -293,14 +295,17 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
self.ipcache[client] = (ipaddr, client)
return (ipaddr, client)
except socket.gaierror:
- ipaddr = Popen(["getent", "hosts", client],
- stdout=PIPE).stdout.read().strip().split()
- if ipaddr:
- self.ipcache[client] = (ipaddr, client)
- return (ipaddr, client)
+ result = self.cmd.run(["getent", "hosts", client])
+ if result.success:
+ ipaddr = result.stdout.strip().split()
+ if ipaddr:
+ self.ipcache[client] = (ipaddr, client)
+ return (ipaddr, client)
self.ipcache[client] = False
- self.logger.error("Failed to find IP address for %s" % client)
- raise socket.gaierror
+ msg = "Failed to find IP address for %s: %s" % (client,
+ result.error)
+ self.logger(msg)
+ raise PluginExecutionError(msg)
def get_namecache_entry(self, cip):
"""Build a cache of name lookups from client IP addresses."""
@@ -370,7 +375,7 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
msg = "%s still not registered" % filename
self.logger.error(msg)
raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
- self.core.fam.handle_events_in_interval(1)
+ self.fam.handle_events_in_interval(1)
tries += 1
try:
self.entries[entry.get('name')].bind_entry(entry, metadata)
@@ -399,11 +404,10 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin,
cmd = ["ssh-keygen", "-q", "-f", temploc, "-N", "",
"-t", keytype, "-C", "root@%s" % client]
self.debug_log("SSHbase: Running: %s" % " ".join(cmd))
- proc = Popen(cmd, stdout=PIPE, stdin=PIPE)
- err = proc.communicate()[1]
- if proc.wait():
+ result = self.cmd.run(cmd)
+ if not result.success:
raise PluginExecutionError("SSHbase: Error running ssh-keygen: %s"
- % err)
+ % result.error)
try:
shutil.copy(temploc, fileloc)
diff --git a/src/lib/Bcfg2/Server/Plugins/SSLCA.py b/src/lib/Bcfg2/Server/Plugins/SSLCA.py
index f111ffc60..b21732666 100644
--- a/src/lib/Bcfg2/Server/Plugins/SSLCA.py
+++ b/src/lib/Bcfg2/Server/Plugins/SSLCA.py
@@ -6,9 +6,9 @@ import sys
import logging
import tempfile
import lxml.etree
-from subprocess import Popen, PIPE, STDOUT
import Bcfg2.Options
import Bcfg2.Server.Plugin
+from Bcfg2.Utils import Executor
from Bcfg2.Compat import ConfigParser
from Bcfg2.Server.Plugin import PluginExecutionError
@@ -17,6 +17,7 @@ LOGGER = logging.getLogger(__name__)
class SSLCAXMLSpec(Bcfg2.Server.Plugin.StructFile):
""" Base class to handle key.xml and cert.xml """
+ encryption = False
attrs = dict()
tag = None
@@ -89,6 +90,7 @@ class SSLCAEntrySet(Bcfg2.Server.Plugin.EntrySet):
self.parent = parent
self.key = None
self.cert = None
+ self.cmd = Executor(timeout=120)
def handle_event(self, event):
action = event.code2str()
@@ -122,14 +124,14 @@ class SSLCAEntrySet(Bcfg2.Server.Plugin.EntrySet):
elif ktype == 'dsa':
cmd = ["openssl", "dsaparam", "-noout", "-genkey", bits]
self.debug_log("SSLCA: Generating new key: %s" % " ".join(cmd))
- proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
- key, err = proc.communicate()
- if proc.wait():
+ result = self.cmd.run(cmd)
+ if not result.success:
raise PluginExecutionError("SSLCA: Failed to generate key %s for "
"%s: %s" % (entry.get("name"),
- metadata.hostname, err))
- open(os.path.join(self.path, filename), 'w').write(key)
- return key
+ metadata.hostname,
+ result.error))
+ open(os.path.join(self.path, filename), 'w').write(result.stdout)
+ return result.stdout
def build_cert(self, entry, metadata, keyfile):
""" generate a new cert """
@@ -162,13 +164,10 @@ class SSLCAEntrySet(Bcfg2.Server.Plugin.EntrySet):
self.debug_log("SSLCA: Generating new certificate: %s" %
" ".join(_scrub_pass(a) for a in cmd))
- proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
- (cert, err) = proc.communicate()
- if proc.wait():
- # pylint: disable=E1103
+ result = self.cmd.run(cmd)
+ if not result.success:
raise PluginExecutionError("SSLCA: Failed to generate cert: %s"
- % err.splitlines()[-1])
- # pylint: enable=E1103
+ % result.error)
finally:
try:
if req_config and os.path.exists(req_config):
@@ -178,6 +177,7 @@ class SSLCAEntrySet(Bcfg2.Server.Plugin.EntrySet):
except OSError:
self.logger.error("SSLCA: Failed to unlink temporary files: %s"
% sys.exc_info()[1])
+ cert = result.stdout
if cert_spec['append_chain'] and 'chaincert' in ca:
cert += open(ca['chaincert']).read()
@@ -241,11 +241,10 @@ class SSLCAEntrySet(Bcfg2.Server.Plugin.EntrySet):
cmd = ["openssl", "req", "-new", "-config", req_config,
"-days", days, "-key", keyfile, "-text", "-out", req]
self.debug_log("SSLCA: Generating new CSR: %s" % " ".join(cmd))
- proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
- err = proc.communicate()[1]
- if proc.wait():
+ result = self.cmd.run(cmd)
+ if not result.success:
raise PluginExecutionError("SSLCA: Failed to generate CSR: %s" %
- err)
+ result.error)
return req
def verify_cert(self, filename, keyfile, entry, metadata):
@@ -276,34 +275,34 @@ class SSLCAEntrySet(Bcfg2.Server.Plugin.EntrySet):
cmd.extend([chaincert, cert])
self.debug_log("SSLCA: Verifying %s against CA: %s" %
(entry.get("name"), " ".join(cmd)))
- res = Popen(cmd, stdout=PIPE, stderr=STDOUT).stdout.read()
- if res == cert + ": OK\n":
+ result = self.cmd.run(cmd)
+ if result.stdout == cert + ": OK\n":
self.debug_log("SSLCA: %s verified successfully against CA" %
entry.get("name"))
return True
self.logger.warning("SSLCA: %s failed verification against CA: %s" %
- (entry.get("name"), res))
+ (entry.get("name"), result.error))
return False
+ def _get_modulus(self, fname, ftype="x509"):
+ """ get the modulus from the given file """
+ cmd = ["openssl", ftype, "-noout", "-modulus", "-in", fname]
+ self.debug_log("SSLCA: Getting modulus of %s for verification: %s" %
+ (fname, " ".join(cmd)))
+ result = self.cmd.run(cmd)
+ if not result.success:
+ self.logger.warning("SSLCA: Failed to get modulus of %s: %s" %
+ (fname, result.error))
+ return result.stdout.strip()
+
def verify_cert_against_key(self, filename, keyfile):
"""
check that a certificate validates against its private key.
"""
- def _modulus(fname, ftype="x509"):
- """ get the modulus from the given file """
- cmd = ["openssl", ftype, "-noout", "-modulus", "-in", fname]
- self.debug_log("SSLCA: Getting modulus of %s for verification: %s"
- % (fname, " ".join(cmd)))
- proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
- rv, err = proc.communicate()
- if proc.wait():
- self.logger.warning("SSLCA: Failed to get modulus of %s: %s" %
- (fname, err))
- return rv.strip() # pylint: disable=E1103
certfile = os.path.join(self.path, filename)
- cert = _modulus(certfile)
- key = _modulus(keyfile, ftype="rsa")
+ cert = self._get_modulus(certfile)
+ key = self._get_modulus(keyfile, ftype="rsa")
if cert == key:
self.debug_log("SSLCA: %s verified successfully against key %s" %
(filename, keyfile))
diff --git a/src/lib/Bcfg2/Server/Plugins/Snapshots.py b/src/lib/Bcfg2/Server/Plugins/Snapshots.py
deleted file mode 100644
index cc5946bb2..000000000
--- a/src/lib/Bcfg2/Server/Plugins/Snapshots.py
+++ /dev/null
@@ -1,129 +0,0 @@
-import logging
-import difflib
-import Bcfg2.Server.Plugin
-import Bcfg2.Server.Snapshots
-import Bcfg2.Logger
-from Bcfg2.Server.Snapshots.model import Snapshot
-import sys
-import time
-import threading
-
-# Compatibility import
-from Bcfg2.Compat import Queue, u_str, b64decode
-
-logger = logging.getLogger('Snapshots')
-
-ftypes = ['ConfigFile', 'SymLink', 'Directory']
-datafields = {
- 'Package': ['version'],
- 'Path': ['type'],
- 'Service': ['status'],
- 'ConfigFile': ['owner', 'group', 'mode'],
- 'Directory': ['owner', 'group', 'mode'],
- 'SymLink': ['to'],
- }
-
-
-def build_snap_ent(entry):
- basefields = []
- if entry.tag in ['Package', 'Service']:
- basefields += ['type']
- desired = dict([(key, u_str(entry.get(key))) for key in basefields])
- state = dict([(key, u_str(entry.get(key))) for key in basefields])
- desired.update([(key, u_str(entry.get(key))) for key in \
- datafields[entry.tag]])
- if entry.tag == 'ConfigFile' or \
- ((entry.tag == 'Path') and (entry.get('type') == 'file')):
- if entry.text == None:
- desired['contents'] = None
- else:
- if entry.get('encoding', 'ascii') == 'ascii':
- desired['contents'] = u_str(entry.text)
- else:
- desired['contents'] = u_str(b64decode(entry.text))
-
- if 'current_bfile' in entry.attrib:
- state['contents'] = u_str(b64decode(entry.get('current_bfile')))
- elif 'current_bdiff' in entry.attrib:
- diff = b64decode(entry.get('current_bdiff'))
- state['contents'] = u_str( \
- '\n'.join(difflib.restore(diff.split('\n'), 1)))
-
- state.update([(key, u_str(entry.get('current_' + key, entry.get(key)))) \
- for key in datafields[entry.tag]])
- if entry.tag in ['ConfigFile', 'Path'] and entry.get('exists', 'true') == 'false':
- state = None
- return [desired, state]
-
-
-class Snapshots(Bcfg2.Server.Plugin.Statistics):
- name = 'Snapshots'
- deprecated = True
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Statistics.__init__(self, core, datastore)
- self.session = Bcfg2.Server.Snapshots.setup_session(core.cfile)
- self.work_queue = Queue()
- self.loader = threading.Thread(target=self.load_snapshot)
-
- def start_threads(self):
- self.loader.start()
-
- def load_snapshot(self):
- while self.running:
- try:
- (metadata, data) = self.work_queue.get(block=True, timeout=5)
- except:
- continue
- self.statistics_from_old_stats(metadata, data)
-
- def process_statistics(self, metadata, data):
- return self.work_queue.put((metadata, data))
-
- def statistics_from_old_stats(self, metadata, xdata):
- # entries are name -> (modified, correct, start, desired, end)
- # not sure we can get all of this from old format stats
- t1 = time.time()
- entries = dict([('Package', dict()),
- ('Service', dict()), ('Path', dict())])
- extra = dict([('Package', dict()), ('Service', dict()),
- ('Path', dict())])
- bad = []
- state = xdata.find('.//Statistics')
- correct = state.get('state') == 'clean'
- revision = u_str(state.get('revision', '-1'))
- for entry in state.find('.//Bad'):
- data = [False, False, u_str(entry.get('name'))] \
- + build_snap_ent(entry)
- if entry.tag in ftypes:
- etag = 'Path'
- else:
- etag = entry.tag
- entries[etag][entry.get('name')] = data
- for entry in state.find('.//Modified'):
- if entry.tag in ftypes:
- etag = 'Path'
- else:
- etag = entry.tag
- if entry.get('name') in entries[etag]:
- data = [True, False, u_str(entry.get('name'))] + \
- build_snap_ent(entry)
- else:
- data = [True, False, u_str(entry.get('name'))] + \
- build_snap_ent(entry)
- for entry in state.find('.//Extra'):
- if entry.tag in datafields:
- data = build_snap_ent(entry)[1]
- ename = u_str(entry.get('name'))
- data['name'] = ename
- extra[entry.tag][ename] = data
- else:
- print("extra", entry.tag, entry.get('name'))
- t2 = time.time()
- snap = Snapshot.from_data(self.session, correct, revision,
- metadata, entries, extra)
- self.session.add(snap)
- self.session.commit()
- t3 = time.time()
- logger.info("Snapshot storage took %fs" % (t3 - t2))
- return True
diff --git a/src/lib/Bcfg2/Server/Plugins/Statistics.py b/src/lib/Bcfg2/Server/Plugins/Statistics.py
deleted file mode 100644
index 7fae445d0..000000000
--- a/src/lib/Bcfg2/Server/Plugins/Statistics.py
+++ /dev/null
@@ -1,160 +0,0 @@
-'''This file manages the statistics collected by the BCFG2 Server'''
-
-import copy
-import difflib
-import logging
-import lxml.etree
-import os
-import sys
-from time import asctime, localtime, time, strptime, mktime
-import threading
-from Bcfg2.Compat import b64decode
-import Bcfg2.Server.Plugin
-
-
-class StatisticsStore(object):
- """Manages the memory and file copy of statistics collected about client runs."""
- __min_write_delay__ = 0
-
- def __init__(self, filename):
- self.filename = filename
- self.element = lxml.etree.Element('Dummy')
- self.dirty = 0
- self.lastwrite = 0
- self.logger = logging.getLogger('Bcfg2.Server.Statistics')
- self.ReadFromFile()
-
- def WriteBack(self, force=0):
- """Write statistics changes back to persistent store."""
- if (self.dirty and (self.lastwrite + self.__min_write_delay__ <= time())) \
- or force:
- try:
- fout = open(self.filename + '.new', 'w')
- except IOError:
- ioerr = sys.exc_info()[1]
- self.logger.error("Failed to open %s for writing: %s" % (self.filename + '.new', ioerr))
- else:
- fout.write(lxml.etree.tostring(self.element,
- xml_declaration=False).decode('UTF-8'))
- fout.close()
- os.rename(self.filename + '.new', self.filename)
- self.dirty = 0
- self.lastwrite = time()
-
- def ReadFromFile(self):
- """Reads current state regarding statistics."""
- try:
- fin = open(self.filename, 'r')
- data = fin.read()
- fin.close()
- self.element = lxml.etree.XML(data)
- self.dirty = 0
- except (IOError, lxml.etree.XMLSyntaxError):
- self.logger.error("Creating new statistics file %s"%(self.filename))
- self.element = lxml.etree.Element('ConfigStatistics')
- self.WriteBack()
- self.dirty = 0
-
- def updateStats(self, xml, client):
- """Updates the statistics of a current node with new data."""
-
- # Current policy:
- # - Keep anything less than 24 hours old
- # - Keep latest clean run for clean nodes
- # - Keep latest clean and dirty run for dirty nodes
- newstat = xml.find('Statistics')
-
- if newstat.get('state') == 'clean':
- node_dirty = 0
- else:
- node_dirty = 1
-
- # Find correct node entry in stats data
- # The following list comprehension should be guarenteed to return at
- # most one result
- nodes = [elem for elem in self.element.findall('Node') \
- if elem.get('name') == client]
- nummatch = len(nodes)
- if nummatch == 0:
- # Create an entry for this node
- node = lxml.etree.SubElement(self.element, 'Node', name=client)
- elif nummatch == 1 and not node_dirty:
- # Delete old instance
- node = nodes[0]
- [node.remove(elem) for elem in node.findall('Statistics') \
- if self.isOlderThan24h(elem.get('time'))]
- elif nummatch == 1 and node_dirty:
- # Delete old dirty statistics entry
- node = nodes[0]
- [node.remove(elem) for elem in node.findall('Statistics') \
- if (elem.get('state') == 'dirty' \
- and self.isOlderThan24h(elem.get('time')))]
- else:
- # Shouldn't be reached
- self.logger.error("Duplicate node entry for %s"%(client))
-
- # Set current time for stats
- newstat.set('time', asctime(localtime()))
-
- # Add statistic
- node.append(copy.copy(newstat))
-
- # Set dirty
- self.dirty = 1
- self.WriteBack(force=1)
-
- def isOlderThan24h(self, testTime):
- """Helper function to determine if <time> string is older than 24 hours."""
- now = time()
- utime = mktime(strptime(testTime))
- secondsPerDay = 60*60*24
-
- return (now-utime) > secondsPerDay
-
-
-class Statistics(Bcfg2.Server.Plugin.ThreadedStatistics,
- Bcfg2.Server.Plugin.PullSource):
- name = 'Statistics'
- deprecated = True
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.ThreadedStatistics.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.PullSource.__init__(self)
- fpath = "%s/etc/statistics.xml" % datastore
- self.data_file = StatisticsStore(fpath)
-
- def handle_statistic(self, metadata, data):
- self.data_file.updateStats(data, metadata.hostname)
-
- def FindCurrent(self, client):
- rt = self.data_file.element.xpath('//Node[@name="%s"]' % client)[0]
- maxtime = max([strptime(stat.get('time')) for stat \
- in rt.findall('Statistics')])
- return [stat for stat in rt.findall('Statistics') \
- if strptime(stat.get('time')) == maxtime][0]
-
- def GetExtra(self, client):
- return [(entry.tag, entry.get('name')) for entry \
- in self.FindCurrent(client).xpath('.//Extra/*')]
-
- def GetCurrentEntry(self, client, e_type, e_name):
- curr = self.FindCurrent(client)
- entry = curr.xpath('.//Bad/%s[@name="%s"]' % (e_type, e_name))
- if not entry:
- raise Bcfg2.Server.Plugin.PluginExecutionError
- cfentry = entry[-1]
-
- owner = cfentry.get('current_owner', cfentry.get('owner'))
- group = cfentry.get('current_group', cfentry.get('group'))
- mode = cfentry.get('current_mode', cfentry.get('mode'))
- if cfentry.get('sensitive') in ['true', 'True']:
- raise Bcfg2.Server.Plugin.PluginExecutionError
- elif 'current_bfile' in cfentry.attrib:
- contents = b64decode(cfentry.get('current_bfile'))
- elif 'current_bdiff' in cfentry.attrib:
- diff = b64decode(cfentry.get('current_bdiff'))
- contents = '\n'.join(difflib.restore(diff.split('\n'), 1))
- else:
- contents = None
-
- return (owner, group, mode, contents)
diff --git a/src/lib/Bcfg2/Server/Plugins/Svn.py b/src/lib/Bcfg2/Server/Plugins/Svn.py
index 51f44c52d..34a6e89e0 100644
--- a/src/lib/Bcfg2/Server/Plugins/Svn.py
+++ b/src/lib/Bcfg2/Server/Plugins/Svn.py
@@ -10,8 +10,7 @@ try:
import pysvn
HAS_SVN = True
except ImportError:
- import pipes
- from subprocess import Popen, PIPE
+ from Bcfg2.Utils import Executor
HAS_SVN = False
@@ -29,10 +28,12 @@ class Svn(Bcfg2.Server.Plugin.Version):
self.revision = None
self.svn_root = None
+ self.client = None
+ self.cmd = None
if not HAS_SVN:
self.logger.debug("Svn: PySvn not found, using CLI interface to "
"SVN")
- self.client = None
+ self.cmd = Executor()
else:
self.client = pysvn.Client()
# pylint: disable=E1101
@@ -84,15 +85,16 @@ class Svn(Bcfg2.Server.Plugin.Version):
except pysvn.ClientError: # pylint: disable=E1101
msg = "Svn: Failed to get revision: %s" % sys.exc_info()[1]
else:
- try:
- data = Popen("env LC_ALL=C svn info %s" %
- pipes.quote(self.vcs_root), shell=True,
- stdout=PIPE).communicate()[0].split('\n')
- return [line.split(': ')[1] for line in data
- if line[:9] == 'Revision:'][-1]
- except IndexError:
- msg = "Failed to read svn info"
- self.logger.error('Ran command "svn info %s"' % self.vcs_root)
+ result = self.cmd.run(["env LC_ALL=C", "svn", "info",
+ self.vcs_root],
+ shell=True)
+ if result.success:
+ self.revision = [line.split(': ')[1]
+ for line in result.stdout.splitlines()
+ if line.startswith('Revision:')][-1]
+ return self.revision
+ else:
+ msg = "Failed to read svn info: %s" % result.error
self.revision = None
raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
diff --git a/src/lib/Bcfg2/Server/Plugins/TCheetah.py b/src/lib/Bcfg2/Server/Plugins/TCheetah.py
deleted file mode 100644
index f2c59ce29..000000000
--- a/src/lib/Bcfg2/Server/Plugins/TCheetah.py
+++ /dev/null
@@ -1,79 +0,0 @@
-'''This module implements a templating generator based on Cheetah'''
-
-import logging
-import sys
-import traceback
-import Bcfg2.Server.Plugin
-
-from Bcfg2.Compat import unicode, b64encode
-
-logger = logging.getLogger('Bcfg2.Plugins.TCheetah')
-
-try:
- import Cheetah.Template
- import Cheetah.Parser
-except:
- logger.error("TCheetah: Failed to import Cheetah. Is it installed?")
- raise
-
-
-class TemplateFile:
- """Template file creates Cheetah template structures for the loaded file."""
-
- def __init__(self, name, specific, encoding):
- self.name = name
- self.specific = specific
- self.encoding = encoding
- self.template = None
- self.searchlist = dict()
-
- def handle_event(self, event):
- """Handle all fs events for this template."""
- if event.code2str() == 'deleted':
- return
- try:
- s = {'useStackFrames': False}
- self.template = Cheetah.Template.Template(open(self.name).read(),
- compilerSettings=s,
- searchList=self.searchlist)
- except Cheetah.Parser.ParseError:
- perror = sys.exc_info()[1]
- logger.error("Cheetah parse error for file %s" % (self.name))
- logger.error(perror.report())
-
- def bind_entry(self, entry, metadata):
- """Build literal file information."""
- self.template.metadata = metadata
- self.searchlist['metadata'] = metadata
- self.template.path = entry.get('realname', entry.get('name'))
- self.searchlist['path'] = entry.get('realname', entry.get('name'))
- self.template.source_path = self.name
- self.searchlist['source_path'] = self.name
-
- if entry.tag == 'Path':
- entry.set('type', 'file')
- try:
- if type(self.template) == unicode:
- entry.text = self.template
- else:
- if entry.get('encoding') == 'base64':
- # take care of case where file needs base64 encoding
- entry.text = b64encode(self.template)
- else:
- entry.text = unicode(str(self.template), self.encoding)
- except:
- (a, b, c) = sys.exc_info()
- msg = traceback.format_exception(a, b, c, limit=2)[-1][:-1]
- logger.error(msg)
- logger.error("TCheetah template error for %s" % self.searchlist['path'])
- del a, b, c
- raise Bcfg2.Server.Plugin.PluginExecutionError
-
-
-class TCheetah(Bcfg2.Server.Plugin.GroupSpool):
- """The TCheetah generator implements a templating mechanism for configuration files."""
- name = 'TCheetah'
- __author__ = 'bcfg-dev@mcs.anl.gov'
- filename_pattern = 'template'
- es_child_cls = TemplateFile
- deprecated = True
diff --git a/src/lib/Bcfg2/Server/Plugins/TGenshi.py b/src/lib/Bcfg2/Server/Plugins/TGenshi.py
deleted file mode 100644
index 809587d91..000000000
--- a/src/lib/Bcfg2/Server/Plugins/TGenshi.py
+++ /dev/null
@@ -1,139 +0,0 @@
-"""This module implements a templating generator based on Genshi."""
-
-import logging
-import sys
-import Bcfg2.Server.Plugin
-
-from Bcfg2.Compat import unicode, b64encode
-
-logger = logging.getLogger('Bcfg2.Plugins.TGenshi')
-
-# try to import genshi stuff
-try:
- import genshi.core
- import genshi.input
- from genshi.template import TemplateLoader, \
- TextTemplate, MarkupTemplate, TemplateError
-except ImportError:
- logger.error("TGenshi: Failed to import Genshi. Is it installed?")
- raise
-try:
- from genshi.template import NewTextTemplate
- have_ntt = True
-except:
- have_ntt = False
-
-def removecomment(stream):
- """A genshi filter that removes comments from the stream."""
- for kind, data, pos in stream:
- if kind is genshi.core.COMMENT:
- continue
- yield kind, data, pos
-
-
-class TemplateFile(object):
- """Template file creates Genshi template structures for the loaded file."""
-
- def __init__(self, name, specific, encoding):
- self.name = name
- self.specific = specific
- self.encoding = encoding
- if self.specific.all:
- matchname = self.name
- elif self.specific.group:
- matchname = self.name[:self.name.find('.G')]
- else:
- matchname = self.name[:self.name.find('.H')]
- if matchname.endswith('.txt'):
- self.template_cls = TextTemplate
- elif matchname.endswith('.newtxt'):
- if not have_ntt:
- logger.error("Genshi NewTextTemplates not supported by this version of Genshi")
- else:
- self.template_cls = NewTextTemplate
- else:
- self.template_cls = MarkupTemplate
- self.HandleEvent = self.handle_event
-
- def handle_event(self, event=None):
- """Handle all fs events for this template."""
- if event and event.code2str() == 'deleted':
- return
- try:
- loader = TemplateLoader()
- try:
- self.template = loader.load(self.name, cls=self.template_cls,
- encoding=self.encoding)
- except LookupError:
- lerror = sys.exc_info()[1]
- logger.error('Genshi lookup error: %s' % lerror)
- except TemplateError:
- terror = sys.exc_info()[1]
- logger.error('Genshi template error: %s' % terror)
- except genshi.input.ParseError:
- perror = sys.exc_info()[1]
- logger.error('Genshi parse error: %s' % perror)
-
- def bind_entry(self, entry, metadata):
- """Build literal file information."""
- fname = entry.get('realname', entry.get('name'))
- if entry.tag == 'Path':
- entry.set('type', 'file')
- try:
- stream = self.template.generate( \
- name=fname, metadata=metadata,
- path=self.name).filter(removecomment)
- if have_ntt:
- ttypes = [TextTemplate, NewTextTemplate]
- else:
- ttypes = [TextTemplate]
- if True in [isinstance(self.template, t) for t in ttypes]:
- try:
- textdata = stream.render('text', strip_whitespace=False)
- except TypeError:
- textdata = stream.render('text')
- if type(textdata) == unicode:
- entry.text = textdata
- else:
- if entry.get('encoding') == 'base64':
- # take care of case where file needs base64 encoding
- entry.text = b64encode(textdata)
- else:
- entry.text = unicode(textdata, self.encoding)
- else:
- try:
- xmldata = stream.render('xml', strip_whitespace=False)
- except TypeError:
- xmldata = stream.render('xml')
- if type(xmldata) == unicode:
- entry.text = xmldata
- else:
- entry.text = unicode(xmldata, self.encoding)
- if entry.text == '':
- entry.set('empty', 'true')
- except TemplateError:
- err = sys.exc_info()[1]
- logger.exception('Genshi template error')
- raise Bcfg2.Server.Plugin.PluginExecutionError('Genshi template error: %s' % err)
- except AttributeError:
- err = sys.exc_info()[1]
- logger.exception('Genshi template loading error')
- raise Bcfg2.Server.Plugin.PluginExecutionError('Genshi template loading error: %s' % err)
-
-
-class TemplateEntrySet(Bcfg2.Server.Plugin.EntrySet):
- basename_is_regex = True
-
-
-class TGenshi(Bcfg2.Server.Plugin.GroupSpool):
- """
- The TGenshi generator implements a templating
- mechanism for configuration files.
-
- """
- name = 'TGenshi'
- __author__ = 'jeff@ocjtech.us'
- filename_pattern = 'template\.(txt|newtxt|xml)'
- es_cls = TemplateEntrySet
- es_child_cls = TemplateFile
- deprecated = True
diff --git a/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py b/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py
index fcd73bae2..050ba3b3e 100644
--- a/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py
+++ b/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py
@@ -23,9 +23,8 @@ def safe_module_name(module):
class HelperModule(object):
""" Representation of a TemplateHelper module """
- def __init__(self, name, fam=None):
+ def __init__(self, name):
self.name = name
- self.fam = fam
#: The name of the module as used by get_additional_data().
#: the name of the file with .py stripped off.
@@ -89,7 +88,7 @@ class TemplateHelper(Bcfg2.Server.Plugin.Plugin,
def __init__(self, core, datastore):
Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
Bcfg2.Server.Plugin.Connector.__init__(self)
- Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data, core.fam)
+ Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data)
def get_additional_data(self, _):
return dict([(h._module_name, h) # pylint: disable=W0212
diff --git a/src/lib/Bcfg2/Server/Plugins/Trigger.py b/src/lib/Bcfg2/Server/Plugins/Trigger.py
index f7c82fdb3..a1b79a8c5 100644
--- a/src/lib/Bcfg2/Server/Plugins/Trigger.py
+++ b/src/lib/Bcfg2/Server/Plugins/Trigger.py
@@ -3,18 +3,14 @@
import os
import pipes
import Bcfg2.Server.Plugin
-from subprocess import Popen, PIPE
+from Bcfg2.Utils import Executor
class TriggerFile(Bcfg2.Server.Plugin.FileBacked):
""" Representation of a trigger script file """
-
def HandleEvent(self, event=None):
return
- def __str__(self):
- return "%s: %s" % (self.__class__.__name__, self.name)
-
class Trigger(Bcfg2.Server.Plugin.Plugin,
Bcfg2.Server.Plugin.ClientRunHooks,
@@ -25,8 +21,8 @@ class Trigger(Bcfg2.Server.Plugin.Plugin,
def __init__(self, core, datastore):
Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
Bcfg2.Server.Plugin.ClientRunHooks.__init__(self)
- Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data,
- self.core.fam)
+ Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data)
+ self.cmd = Executor()
def async_run(self, args):
""" Run the trigger script asynchronously in a forked process
@@ -39,14 +35,12 @@ class Trigger(Bcfg2.Server.Plugin.Plugin,
if not dpid:
self.debug_log("Running %s" % " ".join(pipes.quote(a)
for a in args))
- proc = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
- err = proc.communicate()[1]
- rv = proc.wait()
- if rv != 0:
- self.logger.error("Trigger: Error running %s (%s): %s" %
- (args[0], rv, err))
- elif err:
- self.debug_log("Trigger: Error: %s" % err)
+ result = self.cmd.run(args)
+ if not result.success:
+ self.logger.error("Trigger: Error running %s: %s" %
+ (args[0], result.error))
+ elif result.stderr:
+ self.debug_log("Trigger: Error: %s" % result.stderr)
os._exit(0) # pylint: disable=W0212
def end_client_run(self, metadata):
diff --git a/src/lib/Bcfg2/Server/Plugins/__init__.py b/src/lib/Bcfg2/Server/Plugins/__init__.py
index ad51cf368..fdb20ed0a 100644
--- a/src/lib/Bcfg2/Server/Plugins/__init__.py
+++ b/src/lib/Bcfg2/Server/Plugins/__init__.py
@@ -1,5 +1 @@
"""Imports for Bcfg2.Server.Plugins."""
-
-from Bcfg2.Compat import walk_packages
-
-__all__ = [m[1] for m in walk_packages(path=__path__)]
diff --git a/src/lib/Bcfg2/SSLServer.py b/src/lib/Bcfg2/Server/SSLServer.py
index 316c2f86c..8bdcf0500 100644
--- a/src/lib/Bcfg2/SSLServer.py
+++ b/src/lib/Bcfg2/Server/SSLServer.py
@@ -18,8 +18,6 @@ from Bcfg2.Compat import xmlrpclib, SimpleXMLRPCServer, SocketServer, \
class XMLRPCDispatcher(SimpleXMLRPCServer.SimpleXMLRPCDispatcher):
""" An XML-RPC dispatcher. """
- logger = logging.getLogger("Bcfg2.SSLServer.XMLRPCDispatcher")
-
def __init__(self, allow_none, encoding):
try:
SimpleXMLRPCServer.SimpleXMLRPCDispatcher.__init__(self,
@@ -29,6 +27,7 @@ class XMLRPCDispatcher(SimpleXMLRPCServer.SimpleXMLRPCDispatcher):
# Python 2.4?
SimpleXMLRPCServer.SimpleXMLRPCDispatcher.__init__(self)
+ self.logger = logging.getLogger(self.__class__.__name__)
self.allow_none = allow_none
self.encoding = encoding
@@ -63,9 +62,7 @@ class XMLRPCDispatcher(SimpleXMLRPCServer.SimpleXMLRPCDispatcher):
class SSLServer(SocketServer.TCPServer, object):
""" TCP server supporting SSL encryption. """
-
allow_reuse_address = True
- logger = logging.getLogger("Bcfg2.SSLServer.SSLServer")
def __init__(self, listen_all, server_address, RequestHandlerClass,
keyfile=None, certfile=None, reqCert=False, ca=None,
@@ -98,6 +95,8 @@ class SSLServer(SocketServer.TCPServer, object):
if ':' in server_address[0]:
self.address_family = socket.AF_INET6
+ self.logger = logging.getLogger(self.__class__.__name__)
+
try:
SocketServer.TCPServer.__init__(self, listen_address,
RequestHandlerClass)
@@ -184,7 +183,10 @@ class XMLRPCRequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
Adds support for HTTP authentication.
"""
- logger = logging.getLogger("Bcfg2.SSLServer.XMLRPCRequestHandler")
+ def __init__(self, *args, **kwargs):
+ self.logger = logging.getLogger(self.__class__.__name__)
+ SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.__init__(self, *args,
+ **kwargs)
def authenticate(self):
try:
@@ -207,8 +209,9 @@ class XMLRPCRequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
password = ""
cert = self.request.getpeercert()
client_address = self.request.getpeername()
- return self.server.instance.authenticate(cert, username,
- password, client_address)
+ return (self.server.instance.authenticate(cert, username,
+ password, client_address) and
+ self.server.instance.check_acls(client_address[0]))
def parse_request(self):
"""Extends parse_request.
@@ -259,6 +262,12 @@ class XMLRPCRequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
raise
else:
# got a valid XML RPC response
+ # first, check ACLs
+ client_address = self.request.getpeername()
+ method = xmlrpclib.loads(data)[1]
+ if not self.server.instance.check_acls(client_address, method):
+ self.send_error(401, self.responses[401][0])
+ self.end_headers()
try:
self.send_response(200)
self.send_header("Content-type", "text/xml")
diff --git a/src/lib/Bcfg2/Server/Snapshots/__init__.py b/src/lib/Bcfg2/Server/Snapshots/__init__.py
deleted file mode 100644
index d42aa0525..000000000
--- a/src/lib/Bcfg2/Server/Snapshots/__init__.py
+++ /dev/null
@@ -1,31 +0,0 @@
-__all__ = ['models', 'db_from_config', 'setup_session']
-
-import sqlalchemy
-import sqlalchemy.orm
-# Compatibility import
-from Bcfg2.Compat import ConfigParser
-
-
-def db_from_config(cfile):
- cp = ConfigParser.ConfigParser()
- cp.read([cfile])
- driver = cp.get('snapshots', 'driver')
- if driver == 'sqlite':
- path = cp.get('snapshots', 'database')
- return 'sqlite:///%s' % path
- elif driver in ['mysql', 'postgres']:
- user = cp.get('snapshots', 'user')
- password = cp.get('snapshots', 'password')
- host = cp.get('snapshots', 'host')
- db = cp.get('snapshots', 'database')
- return '%s://%s:%s@%s/%s' % (driver, user, password, host, db)
- else:
- raise Exception("unsupported db driver %s" % driver)
-
-
-def setup_session(cfile, debug=False):
- engine = sqlalchemy.create_engine(db_from_config(cfile),
- echo=debug)
- Session = sqlalchemy.orm.sessionmaker()
- Session.configure(bind=engine)
- return Session()
diff --git a/src/lib/Bcfg2/Server/Snapshots/model.py b/src/lib/Bcfg2/Server/Snapshots/model.py
deleted file mode 100644
index d578cd2c0..000000000
--- a/src/lib/Bcfg2/Server/Snapshots/model.py
+++ /dev/null
@@ -1,323 +0,0 @@
-import sys
-from sqlalchemy import Table, Column, Integer, Unicode, ForeignKey, Boolean, \
- DateTime, UnicodeText, desc
-import datetime
-import sqlalchemy.exceptions
-from sqlalchemy.orm import relation, backref
-from sqlalchemy.ext.declarative import declarative_base
-
-from Bcfg2.Compat import u_str
-
-
-class Uniquer(object):
- force_rt = True
-
- @classmethod
- def by_value(cls, session, **kwargs):
- if cls.force_rt:
- try:
- return session.query(cls).filter_by(**kwargs).one()
- except sqlalchemy.exceptions.InvalidRequestError:
- return cls(**kwargs)
- else:
- return cls(**kwargs)
-
- @classmethod
- def from_record(cls, session, data):
- return cls.by_value(session, **data)
-
-Base = declarative_base()
-
-
-class Administrator(Uniquer, Base):
- __tablename__ = 'administrator'
- id = Column(Integer, primary_key=True)
- name = Column(Unicode(20), unique=True)
- email = Column(Unicode(64))
-
-admin_client = Table('admin_client', Base.metadata,
- Column('admin_id',
- Integer,
- ForeignKey('administrator.id')),
- Column('client_id',
- Integer,
- ForeignKey('client.id')))
-
-admin_group = Table('admin_group', Base.metadata,
- Column('admin_id',
- Integer,
- ForeignKey('administrator.id')),
- Column('group_id',
- Integer,
- ForeignKey('group.id')))
-
-
-class Client(Uniquer, Base):
- __tablename__ = 'client'
- id = Column(Integer, primary_key=True)
- name = Column(Unicode(64), unique=True)
- admins = relation("Administrator", secondary=admin_client,
- backref='clients')
- active = Column(Boolean, default=True)
- online = Column(Boolean, default=True)
- online_ts = Column(DateTime)
-
-
-class Group(Uniquer, Base):
- __tablename__ = 'group'
- id = Column(Integer, primary_key=True)
- name = Column(Unicode(32), unique=True)
- admins = relation("Administrator", secondary=admin_group,
- backref='groups')
-
-
-class ConnectorKeyVal(Uniquer, Base):
- __tablename__ = 'connkeyval'
- id = Column(Integer, primary_key=True)
- connector = Column(Unicode(16))
- key = Column(Unicode(32))
- value = Column(UnicodeText)
-
-meta_group = Table('meta_group', Base.metadata,
- Column('metadata_id',
- Integer,
- ForeignKey('metadata.id')),
- Column('group_id',
- Integer,
- ForeignKey('group.id')))
-
-meta_conn = Table('meta_conn', Base.metadata,
- Column('metadata_id',
- Integer,
- ForeignKey('metadata.id')),
- Column('connkeyval_id',
- Integer,
- ForeignKey('connkeyval.id')))
-
-
-class Metadata(Base):
- __tablename__ = 'metadata'
- id = Column(Integer, primary_key=True)
- client_id = Column(Integer, ForeignKey('client.id'))
- client = relation(Client)
- groups = relation("Group", secondary=meta_group)
- keyvals = relation(ConnectorKeyVal, secondary=meta_conn)
- timestamp = Column(DateTime)
-
- @classmethod
- def from_metadata(cls, mysession, mymetadata):
- client = Client.by_value(mysession, name=u_str(mymetadata.hostname))
- m = cls(client=client)
- for group in mymetadata.groups:
- m.groups.append(Group.by_value(mysession, name=u_str(group)))
- for connector in mymetadata.connectors:
- data = getattr(mymetadata, connector)
- if not isinstance(data, dict):
- continue
- for key, value in list(data.items()):
- if not isinstance(value, str):
- continue
- m.keyvals.append(ConnectorKeyVal.by_value(mysession,
- connector=u_str(connector),
- key=u_str(key),
- value=u_str(value)))
- return m
-
-
-class Package(Base, Uniquer):
- __tablename__ = 'package'
- id = Column(Integer, primary_key=True)
- name = Column(Unicode(24))
- type = Column(Unicode(16))
- version = Column(Unicode(16))
- verification_status = Column(Boolean)
-
-
-class CorrespondenceType(object):
- mtype = Package
-
- @classmethod
- def from_record(cls, mysession, record):
- (mod, corr, name, s_dict, e_dict) = record
- if not s_dict:
- start = None
- else:
- start = cls.mtype.by_value(mysession, name=name, **s_dict)
- if s_dict != e_dict:
- end = cls.mtype.by_value(mysession, name=name, **e_dict)
- else:
- end = start
- return cls(start=start, end=end, modified=mod, correct=corr)
-
-
-class PackageCorrespondence(Base, CorrespondenceType):
- mtype = Package
- __tablename__ = 'package_pair'
- id = Column(Integer, primary_key=True)
- start_id = Column(Integer, ForeignKey('package.id'))
- start = relation(Package, primaryjoin=start_id == Package.id)
- end_id = Column(Integer, ForeignKey('package.id'), nullable=True)
- end = relation(Package, primaryjoin=end_id == Package.id)
- modified = Column(Boolean)
- correct = Column(Boolean)
-
-package_snap = Table('package_snap', Base.metadata,
- Column('ppair_id',
- Integer,
- ForeignKey('package_pair.id')),
- Column('snapshot_id',
- Integer,
- ForeignKey('snapshot.id')))
-
-
-class Service(Base, Uniquer):
- __tablename__ = 'service'
- id = Column(Integer, primary_key=True)
- name = Column(Unicode(16))
- type = Column(Unicode(12))
- status = Column(Boolean)
-
-
-class ServiceCorrespondence(Base, CorrespondenceType):
- mtype = Service
- __tablename__ = 'service_pair'
- id = Column(Integer, primary_key=True)
- start_id = Column(Integer, ForeignKey('service.id'))
- start = relation(Service, primaryjoin=start_id == Service.id)
- end_id = Column(Integer, ForeignKey('service.id'), nullable=True)
- end = relation(Service, primaryjoin=end_id == Service.id)
- modified = Column(Boolean)
- correct = Column(Boolean)
-
-service_snap = Table('service_snap', Base.metadata,
- Column('spair_id',
- Integer,
- ForeignKey('service_pair.id')),
- Column('snapshot_id',
- Integer,
- ForeignKey('snapshot.id')))
-
-
-class File(Base, Uniquer):
- __tablename__ = 'file'
- id = Column(Integer, primary_key=True)
- name = Column(UnicodeText)
- type = Column(Unicode(12))
- owner = Column(Unicode(12))
- group = Column(Unicode(16))
- perms = Column(Integer)
- contents = Column(UnicodeText)
-
-
-class FileCorrespondence(Base, CorrespondenceType):
- mtype = File
- __tablename__ = 'file_pair'
- id = Column(Integer, primary_key=True)
- start_id = Column(Integer, ForeignKey('file.id'))
- start = relation(File, primaryjoin=start_id == File.id)
- end_id = Column(Integer, ForeignKey('file.id'), nullable=True)
- end = relation(File, primaryjoin=end_id == File.id)
- modified = Column(Boolean)
- correct = Column(Boolean)
-
-file_snap = Table('file_snap', Base.metadata,
- Column('fpair_id',
- Integer,
- ForeignKey('file_pair.id')),
- Column('snapshot_id',
- Integer,
- ForeignKey('snapshot.id')))
-
-extra_pkg_snap = Table('extra_pkg_snap', Base.metadata,
- Column('package_id',
- Integer,
- ForeignKey('package.id')),
- Column('snapshot_id',
- Integer,
- ForeignKey('snapshot.id')))
-
-extra_file_snap = Table('extra_file_snap', Base.metadata,
- Column('file_id',
- Integer,
- ForeignKey('file.id')),
- Column('snapshot_id',
- Integer,
- ForeignKey('snapshot.id')))
-
-extra_service_snap = Table('extra_service_snap', Base.metadata,
- Column('service_id',
- Integer,
- ForeignKey('service.id')),
- Column('snapshot_id',
- Integer,
- ForeignKey('snapshot.id')))
-
-
-class Action(Base):
- __tablename__ = 'action'
- id = Column(Integer, primary_key=True)
- command = Column(UnicodeText)
- return_code = Column(Integer)
- output = Column(UnicodeText)
-
-action_snap = Table('action_snap', Base.metadata,
- Column('action_id', Integer, ForeignKey('action.id')),
- Column('snapshot_id', Integer, ForeignKey('snapshot.id')))
-
-
-class Snapshot(Base):
- __tablename__ = 'snapshot'
- id = Column(Integer, primary_key=True)
- correct = Column(Boolean)
- revision = Column(Unicode(36))
- metadata_id = Column(Integer, ForeignKey('metadata.id'))
- client_metadata = relation(Metadata, primaryjoin=metadata_id == Metadata.id)
- timestamp = Column(DateTime, default=datetime.datetime.now)
- client_id = Column(Integer, ForeignKey('client.id'))
- client = relation(Client, backref=backref('snapshots'))
- packages = relation(PackageCorrespondence, secondary=package_snap)
- services = relation(ServiceCorrespondence, secondary=service_snap)
- files = relation(FileCorrespondence, secondary=file_snap)
- actions = relation(Action, secondary=action_snap)
- extra_packages = relation(Package, secondary=extra_pkg_snap)
- extra_services = relation(Service, secondary=extra_service_snap)
- extra_files = relation(File, secondary=extra_file_snap)
-
- c_dispatch = dict([('Package', ('packages', PackageCorrespondence)),
- ('Service', ('services', ServiceCorrespondence)),
- ('Path', ('files', FileCorrespondence))])
- e_dispatch = dict([('Package', ('extra_packages', Package)),
- ('Service', ('extra_services', Service)),
- ('Path', ('extra_files', File))])
-
- @classmethod
- def from_data(cls, session, correct, revision, metadata, entries, extra):
- dbm = Metadata.from_metadata(session, metadata)
- snap = cls(correct=correct, client_metadata=dbm, revision=revision,
- timestamp=datetime.datetime.now(), client=dbm.client)
- for (dispatch, data) in [(cls.c_dispatch, entries),
- (cls.e_dispatch, extra)]:
- for key in dispatch:
- dest, ecls = dispatch[key]
- for edata in list(data[key].values()):
- getattr(snap, dest).append(ecls.from_record(session, edata))
- return snap
-
- @classmethod
- def by_client(cls, session, clientname):
- return session.query(cls).join(cls.client_metadata,
- Metadata.client).filter(Client.name == clientname)
-
- @classmethod
- def get_current(cls, session, clientname):
- return session.query(Snapshot).join(Snapshot.client_metadata,
- Metadata.client).filter(Client.name == clientname).order_by(desc(Snapshot.timestamp)).first()
-
- @classmethod
- def get_by_date(cls, session, clientname, timestamp):
- return session.query(Snapshot)\
- .join(Snapshot.client_metadata, Metadata.client)\
- .filter(Snapshot.timestamp < timestamp)\
- .filter(Client.name == clientname)\
- .order_by(desc(Snapshot.timestamp))\
- .first()
diff --git a/src/lib/Bcfg2/Statistics.py b/src/lib/Bcfg2/Server/Statistics.py
index 3825941af..e34135d4b 100644
--- a/src/lib/Bcfg2/Statistics.py
+++ b/src/lib/Bcfg2/Server/Statistics.py
@@ -2,6 +2,9 @@
server core. This data is exposed by
:func:`Bcfg2.Server.Core.BaseCore.get_statistics`."""
+import time
+from Bcfg2.Compat import wraps
+
class Statistic(object):
""" A single named statistic, tracking minimum, maximum, and
@@ -85,3 +88,37 @@ class Statistics(object):
#: A module-level :class:`Statistics` objects used to track all
#: execution time metrics for the server.
stats = Statistics() # pylint: disable=C0103
+
+
+class track_statistics(object): # pylint: disable=C0103
+ """ Decorator that tracks execution time for the given method with
+ :mod:`Bcfg2.Server.Statistics` for reporting via ``bcfg2-admin
+ perf`` """
+
+ def __init__(self, name=None):
+ """
+ :param name: The name under which statistics for this function
+ will be tracked. By default, the name will be
+ the name of the function concatenated with the
+ name of the class the function is a member of.
+ :type name: string
+ """
+ # if this is None, it will be set later during __call_
+ self.name = name
+
+ def __call__(self, func):
+ if self.name is None:
+ self.name = func.__name__
+
+ @wraps(func)
+ def inner(obj, *args, **kwargs):
+ """ The decorated function """
+ name = "%s:%s" % (obj.__class__.__name__, self.name)
+
+ start = time.time()
+ try:
+ return func(obj, *args, **kwargs)
+ finally:
+ stats.add_value(name, time.time() - start)
+
+ return inner
diff --git a/src/lib/Bcfg2/Server/__init__.py b/src/lib/Bcfg2/Server/__init__.py
index 0678e4579..bf8d0191d 100644
--- a/src/lib/Bcfg2/Server/__init__.py
+++ b/src/lib/Bcfg2/Server/__init__.py
@@ -1,9 +1,6 @@
"""This is the set of modules for Bcfg2.Server."""
import lxml.etree
-from Bcfg2.Compat import walk_packages
-
-__all__ = [m[1] for m in walk_packages(path=__path__)]
XI = 'http://www.w3.org/2001/XInclude'
XI_NAMESPACE = '{%s}' % XI
diff --git a/src/lib/Bcfg2/Server/models.py b/src/lib/Bcfg2/Server/models.py
index 1f64111e7..370854881 100644
--- a/src/lib/Bcfg2/Server/models.py
+++ b/src/lib/Bcfg2/Server/models.py
@@ -5,6 +5,7 @@ import copy
import logging
import Bcfg2.Options
import Bcfg2.Server.Plugins
+from Bcfg2.Compat import walk_packages
from django.db import models
LOGGER = logging.getLogger('Bcfg2.Server.models')
@@ -21,13 +22,22 @@ def load_models(plugins=None, cfile='/etc/bcfg2.conf', quiet=True):
# namely, _all_ plugins, so that the database is guaranteed to
# work, even if /etc/bcfg2.conf isn't set up properly
plugin_opt = copy.deepcopy(Bcfg2.Options.SERVER_PLUGINS)
- plugin_opt.default = Bcfg2.Server.Plugins.__all__
+ all_plugins = []
+ for submodule in walk_packages(path=Bcfg2.Server.Plugins.__path__,
+ prefix="Bcfg2.Server.Plugins."):
+ module = submodule[1].rsplit('.', 1)[-1]
+ if submodule[1] == "Bcfg2.Server.Plugins.%s" % module:
+ # we only include direct children of
+ # Bcfg2.Server.Plugins -- e.g., all_plugins should
+ # include Bcfg2.Server.Plugins.Cfg, but not
+ # Bcfg2.Server.Plugins.Cfg.CfgInfoXML
+ all_plugins.append(module)
+ plugin_opt.default = all_plugins
- setup = \
- Bcfg2.Options.OptionParser(dict(plugins=plugin_opt,
- configfile=Bcfg2.Options.CFILE),
- quiet=quiet)
- setup.parse([Bcfg2.Options.CFILE.cmd, cfile])
+ setup = Bcfg2.Options.get_option_parser()
+ setup.add_option("plugins", plugin_opt)
+ setup.add_option("configfile", Bcfg2.Options.CFILE)
+ setup.reparse(argv=[Bcfg2.Options.CFILE.cmd, cfile])
plugins = setup['plugins']
if MODELS:
@@ -48,7 +58,7 @@ def load_models(plugins=None, cfile='/etc/bcfg2.conf', quiet=True):
err = sys.exc_info()[1]
mod = __import__(plugin)
except: # pylint: disable=W0702
- if plugins != Bcfg2.Server.Plugins.__all__:
+ if plugins != plugin_opt.default:
# only produce errors if the default plugin list
# was not used -- i.e., if the config file was set
# up. don't produce errors when trying to load
diff --git a/src/lib/Bcfg2/Utils.py b/src/lib/Bcfg2/Utils.py
index 1c2dceed2..69c3264f9 100644
--- a/src/lib/Bcfg2/Utils.py
+++ b/src/lib/Bcfg2/Utils.py
@@ -199,9 +199,10 @@ class Executor(object):
except OSError:
pass
- def run(self, command, inputdata=None, shell=False, timeout=None):
+ def run(self, command, inputdata=None, timeout=None, **kwargs):
""" Run a command, given as a list, optionally giving it the
- specified input data.
+ specified input data. All additional keyword arguments are
+ passed through to :class:`subprocess.Popen`.
:param command: The command to run, as a list (preferred) or
as a string. See :class:`subprocess.Popen` for
@@ -209,8 +210,6 @@ class Executor(object):
:type command: list or string
:param inputdata: Data to pass to the command on stdin
:type inputdata: string
- :param shell: Run the given command in a shell (not recommended)
- :type shell: bool
:param timeout: Kill the command if it runs longer than this
many seconds. Set to 0 or -1 to explicitly
override a default timeout.
@@ -223,15 +222,11 @@ class Executor(object):
else:
cmdstr = " ".join(command)
self.logger.debug("Running: %s" % cmdstr)
- try:
- proc = subprocess.Popen(command, shell=shell, bufsize=16384,
- close_fds=True,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- except OSError:
- return ExecutorResult('', 'No such command: %s' % cmdstr,
- 127)
+ args = dict(shell=False, bufsize=16384, close_fds=True)
+ args.update(kwargs)
+ args.update(stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ proc = subprocess.Popen(command, **args)
if timeout is None:
timeout = self.timeout
if timeout is not None:
@@ -253,7 +248,8 @@ class Executor(object):
self.logger.debug('< %s' % line)
for line in stderr.splitlines(): # pylint: disable=E1103
self.logger.info(line)
- return ExecutorResult(stdout, stderr, proc.wait())
+ return ExecutorResult(stdout, stderr,
+ proc.wait()) # pylint: disable=E1101
finally:
if timeout is not None:
timer.cancel()
diff --git a/src/lib/Bcfg2/settings.py b/src/lib/Bcfg2/settings.py
index 9393830a8..f3697d66f 100644
--- a/src/lib/Bcfg2/settings.py
+++ b/src/lib/Bcfg2/settings.py
@@ -39,20 +39,20 @@ def _default_config():
""" get the default config file. returns /etc/bcfg2-web.conf,
UNLESS /etc/bcfg2.conf exists AND /etc/bcfg2-web.conf does not
exist. """
- optinfo = dict(cfile=Bcfg2.Options.CFILE,
- web_cfile=Bcfg2.Options.WEB_CFILE)
- setup = Bcfg2.Options.OptionParser(optinfo, quiet=True)
- setup.parse(sys.argv[1:], do_getopt=False)
- if (not os.path.exists(setup['web_cfile']) and
- os.path.exists(setup['cfile'])):
- return setup['cfile']
+ setup = Bcfg2.Options.get_option_parser()
+ setup.add_option("configfile", Bcfg2.Options.CFILE)
+ setup.add_option("web_configfile", Bcfg2.Options.WEB_CFILE)
+ setup.reparse(argv=sys.argv[1:], do_getopt=False)
+ if (not os.path.exists(setup['web_configfile']) and
+ os.path.exists(setup['configfile'])):
+ return setup['configfile']
else:
- return setup['web_cfile']
+ return setup['web_configfile']
DEFAULT_CONFIG = _default_config()
-def read_config(cfile=DEFAULT_CONFIG, repo=None, quiet=False):
+def read_config(cfile=DEFAULT_CONFIG, repo=None):
""" read the config file and set django settings based on it """
# pylint: disable=W0602,W0603
global DATABASE_ENGINE, DATABASE_NAME, DATABASE_USER, DATABASE_PASSWORD, \
@@ -65,15 +65,15 @@ def read_config(cfile=DEFAULT_CONFIG, repo=None, quiet=False):
(cfile, DEFAULT_CONFIG))
cfile = DEFAULT_CONFIG
- optinfo = Bcfg2.Options.DATABASE_COMMON_OPTIONS
- optinfo['repo'] = Bcfg2.Options.SERVER_REPOSITORY
# when setting a different config file, it has to be set in either
# sys.argv or in the OptionSet() constructor AS WELL AS the argv
# that's passed to setup.parse()
argv = [Bcfg2.Options.CFILE.cmd, cfile,
Bcfg2.Options.WEB_CFILE.cmd, cfile]
- setup = Bcfg2.Options.OptionParser(optinfo, argv=argv, quiet=quiet)
- setup.parse(argv)
+ setup = Bcfg2.Options.get_option_parser()
+ setup.add_options(Bcfg2.Options.DATABASE_COMMON_OPTIONS)
+ setup.add_option("repo", Bcfg2.Options.SERVER_REPOSITORY)
+ setup.reparse(argv=argv)
if repo is None:
repo = setup['repo']
@@ -86,14 +86,6 @@ def read_config(cfile=DEFAULT_CONFIG, repo=None, quiet=False):
HOST=setup['db_host'],
PORT=setup['db_port'])
- if HAS_DJANGO and django.VERSION[0] == 1 and django.VERSION[1] < 2:
- DATABASE_ENGINE = setup['db_engine']
- DATABASE_NAME = DATABASES['default']['NAME']
- DATABASE_USER = DATABASES['default']['USER']
- DATABASE_PASSWORD = DATABASES['default']['PASSWORD']
- DATABASE_HOST = DATABASES['default']['HOST']
- DATABASE_PORT = DATABASES['default']['PORT']
-
# dropping the version check. This was added in 1.1.2
TIME_ZONE = setup['time_zone']
@@ -110,7 +102,7 @@ def read_config(cfile=DEFAULT_CONFIG, repo=None, quiet=False):
# initialize settings from /etc/bcfg2-web.conf or /etc/bcfg2.conf, or
# set up basic defaults. this lets manage.py work in all cases
-read_config(quiet=True)
+read_config()
ADMINS = (('Root', 'root'))
MANAGERS = ADMINS
@@ -159,16 +151,10 @@ else:
}
}
-if HAS_DJANGO and django.VERSION[0] == 1 and django.VERSION[1] < 2:
- TEMPLATE_LOADERS = (
- 'django.template.loaders.filesystem.load_template_source',
- 'django.template.loaders.app_directories.load_template_source',
- )
-else:
- TEMPLATE_LOADERS = (
- 'django.template.loaders.filesystem.Loader',
- 'django.template.loaders.app_directories.Loader',
- )
+TEMPLATE_LOADERS = (
+ 'django.template.loaders.filesystem.Loader',
+ 'django.template.loaders.app_directories.Loader',
+)
#TODO - review these. auth and sessions aren't really used
MIDDLEWARE_CLASSES = (
@@ -194,20 +180,10 @@ TEMPLATE_DIRS = (
'/usr/share/python-support/python-django/django/contrib/admin/templates/',
)
-# TODO - sanitize this
-if HAS_DJANGO and django.VERSION[0] == 1 and django.VERSION[1] < 2:
- TEMPLATE_CONTEXT_PROCESSORS = (
- 'django.core.context_processors.auth',
- 'django.core.context_processors.debug',
- 'django.core.context_processors.i18n',
- 'django.core.context_processors.media',
- 'django.core.context_processors.request'
- )
-else:
- TEMPLATE_CONTEXT_PROCESSORS = (
- 'django.contrib.auth.context_processors.auth',
- 'django.core.context_processors.debug',
- 'django.core.context_processors.i18n',
- 'django.core.context_processors.media',
- 'django.core.context_processors.request'
- )
+TEMPLATE_CONTEXT_PROCESSORS = (
+ 'django.contrib.auth.context_processors.auth',
+ 'django.core.context_processors.debug',
+ 'django.core.context_processors.i18n',
+ 'django.core.context_processors.media',
+ 'django.core.context_processors.request'
+)
diff --git a/src/sbin/bcfg2 b/src/sbin/bcfg2
index 444e86a7c..62f749b80 100755
--- a/src/sbin/bcfg2
+++ b/src/sbin/bcfg2
@@ -3,8 +3,8 @@
import sys
import signal
-import Bcfg2.Options
from Bcfg2.Client.Client import Client
+from Bcfg2.Options import load_option_parser, CLIENT_COMMON_OPTIONS
def cb_sigint_handler(signum, frame):
@@ -13,8 +13,7 @@ def cb_sigint_handler(signum, frame):
def main():
- optinfo = Bcfg2.Options.CLIENT_COMMON_OPTIONS
- setup = Bcfg2.Options.OptionParser(optinfo)
+ setup = load_option_parser(CLIENT_COMMON_OPTIONS)
setup.parse(sys.argv[1:])
if setup['args']:
@@ -23,7 +22,7 @@ def main():
raise SystemExit(1)
signal.signal(signal.SIGINT, cb_sigint_handler)
- return Client(setup).run()
+ return Client().run()
if __name__ == '__main__':
sys.exit(main())
diff --git a/src/sbin/bcfg2-admin b/src/sbin/bcfg2-admin
index 31e49c00b..3bce7fdab 100755
--- a/src/sbin/bcfg2-admin
+++ b/src/sbin/bcfg2-admin
@@ -42,7 +42,7 @@ def main():
optinfo = dict()
optinfo.update(Bcfg2.Options.CLI_COMMON_OPTIONS)
optinfo.update(Bcfg2.Options.SERVER_COMMON_OPTIONS)
- setup = Bcfg2.Options.OptionParser(optinfo)
+ setup = Bcfg2.Options.load_option_parser(optinfo)
# override default help message to include description of all modes
setup.hm = "Usage:\n\n%s\n%s" % (setup.buildHelpMessage(),
create_description())
@@ -81,7 +81,7 @@ def main():
err = sys.exc_info()[1]
log.error("Failed to load admin mode %s: %s" % (modname, err))
raise SystemExit(1)
- mode = mode_cls(setup)
+ mode = mode_cls()
try:
mode(setup['args'][1:])
finally:
diff --git a/src/sbin/bcfg2-build-reports b/src/sbin/bcfg2-build-reports
deleted file mode 100755
index 1c9e9ad97..000000000
--- a/src/sbin/bcfg2-build-reports
+++ /dev/null
@@ -1,306 +0,0 @@
-#!/usr/bin/env python
-
-"""
-bcfg2-build-reports generates & distributes reports of statistic
-information for Bcfg2."""
-
-import copy
-import getopt
-import re
-import os
-import socket
-import sys
-from time import asctime, strptime
-from lxml.etree import XML, XSLT, parse, Element, ElementTree, SubElement, tostring, XMLSyntaxError
-# Compatibility imports
-from Bcfg2.Compat import ConfigParser, cmp
-
-def generatereport(rspec, nrpt):
- """
- generatereport creates and returns an ElementTree representation
- of a report adhering to the XML spec for intermediate reports.
- """
- reportspec = copy.deepcopy(rspec)
- nodereprt = copy.deepcopy(nrpt)
-
- reportgood = reportspec.get("good", default = 'Y')
- reportmodified = reportspec.get("modified", default = 'Y')
- current_date = asctime()[:10]
-
- """Build regex of all the nodes we are reporting about."""
- pattern = re.compile( '|'.join([item.get("name") for item in reportspec.findall('Machine')]))
-
- for node in nodereprt.findall('Node'):
- if not (node.findall("Statistics") and pattern.match(node.get('name'))):
- # Don't know enough about node.
- nodereprt.remove(node)
- continue
-
- # Reduce to most recent Statistics entry.
- statisticslist = node.findall('Statistics')
- # This line actually sorts from most recent to oldest.
- statisticslist.sort(lambda y, x: cmp(strptime(x.get("time")), strptime(y.get("time"))))
- stats = statisticslist[0]
-
- [node.remove(item) for item in node.findall('Statistics')]
-
- # Add a good tag if node is good and we wnat to report such.
- if reportgood == 'Y' and stats.get('state') == 'clean':
- SubElement(stats,"Good")
-
- [stats.remove(item) for item in stats.findall("Bad") + stats.findall("Modified") if \
- item.getchildren() == []]
- [stats.remove(item) for item in stats.findall("Modified") if reportmodified == 'N']
-
- # Test for staleness -if stale add Stale tag.
- if stats.get("time").find(current_date) == -1:
- SubElement(stats,"Stale")
- node.append(stats)
- return nodereprt
-
-def mail(mailbody, confi):
- """mail mails a previously generated report."""
-
- try:
- mailer = confi.get('statistics', 'sendmailpath')
- except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
- mailer = "/usr/sbin/sendmail"
- # Open a pipe to the mail program and
- # write the data to the pipe.
- pipe = os.popen("%s -t" % mailer, 'w')
- pipe.write(mailbody)
- exitcode = pipe.close()
- if exitcode:
- print("Exit code: %s" % exitcode)
-
-def rss(reportxml, delivery, report):
- """rss appends a new report to the specified rss file
- keeping the last 9 articles.
- """
- # Check and see if rss file exists.
- for destination in delivery.findall('Destination'):
- try:
- fil = open(destination.attrib['address'], 'r')
- olddoc = XML(fil.read())
-
- # Defines the number of recent articles to keep.
- items = olddoc.find("channel").findall("item")[0:9]
- fil.close()
- fil = open(destination.attrib['address'], 'w')
- except (IOError, XMLSyntaxError):
- fil = open(destination.attrib['address'], 'w')
- items = []
-
- rssdata = Element("rss")
- channel = SubElement(rssdata, "channel")
- rssdata.set("version", "2.0")
- chantitle = SubElement(channel, "title")
- chantitle.text = report.attrib['name']
- chanlink = SubElement(channel, "link")
-
- # This can later link to WWW report if one gets published
- # simultaneously?
- chanlink.text = "http://www.mcs.anl.gov/cobalt/bcfg2"
- chandesc = SubElement(channel, "description")
- chandesc.text = "Information regarding the 10 most recent bcfg2 runs."
-
- channel.append(XML(reportxml))
-
- if items != []:
- for item in items:
- channel.append(item)
-
- tree = tostring(rssdata, xml_declaration=False).decode('UTF-8')
- fil.write(tree)
- fil.close()
-
-def www(reportxml, delivery):
- """www outputs report to."""
-
- # This can later link to WWW report if one gets published
- # simultaneously?
- for destination in delivery.findall('Destination'):
- fil = open(destination.attrib['address'], 'w')
-
- fil.write(reportxml)
- fil.close()
-
-def fileout(reportxml, delivery):
- """Outputs to plain text file."""
- for destination in delivery.findall('Destination'):
- fil = open(destination.attrib['address'], 'w')
-
- fil.write(reportxml)
- fil.close()
-
-def pretty_print(element, level=0):
- """Produce a pretty-printed text representation of element."""
- if element.text:
- fmt = "%s<%%s %%s>%%s</%%s>" % (level*" ")
- data = (element.tag, (" ".join(["%s='%s'" % keyval for keyval in list(element.attrib.items())])),
- element.text, element.tag)
- if element._children:
- fmt = "%s<%%s %%s>\n" % (level*" ",) + (len(element._children) * "%s") + "%s</%%s>\n" % (level*" ")
- data = (element.tag, ) + (" ".join(["%s='%s'" % keyval for keyval in list(element.attrib.items())]),)
- data += tuple([pretty_print(entry, level+2) for entry in element._children]) + (element.tag, )
- else:
- fmt = "%s<%%s %%s/>\n" % (level * " ")
- data = (element.tag, " ".join(["%s='%s'" % keyval for keyval in list(element.attrib.items())]))
- return fmt % data
-
-
-if __name__ == '__main__':
- all=False
- if '-C' in sys.argv:
- cfpath = sys.argv[sys.argv.index('-C') + 1]
- else:
- cfpath = '/etc/bcfg2.conf'
- c = ConfigParser.ConfigParser()
- c.read([cfpath])
- configpath = "%s/etc/report-configuration.xml" % c.get('server', 'repository')
- statpath = "%s/etc/statistics.xml" % c.get('server', 'repository')
- clientsdatapath = "%s/Metadata/clients.xml" % c.get('server', 'repository')
- try:
- prefix = c.get('server', 'prefix')
- except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
- prefix = '/usr'
-
- transformpath = "/%s/share/bcfg2/xsl-transforms/" % (prefix)
- #websrcspath = "/usr/share/bcfg2/web-rprt-srcs/"
-
- try:
- opts, args = getopt.getopt(sys.argv[1:], "C:hAc:Ns:", ["help", "all", "config=", "stats="])
- except getopt.GetoptError:
- mesg = sys.exc_info()[1]
- # Print help information and exit:
- print("%s\nUsage:\nbcfg2-build-reports [-h][-A (include ALL clients)] [-c <configuration-file>] [-s <statistics-file>]" % (mesg))
- raise SystemExit(2)
- for o, a in opts:
- if o in ("-h", "--help"):
- print("Usage:\nbcfg2-build-reports [-h] [-c <configuration-file>] [-s <statistics-file>]")
- raise SystemExit
- if o in ("-A", "--all"):
- all=True
- if o in ("-c", "--config"):
- configpath = a
- if o in ("-s", "--stats"):
- statpath = a
-
-
- """Reads data & config files."""
- try:
- statsdata = XML(open(statpath).read())
- except (IOError, XMLSyntaxError):
- print("bcfg2-build-reports: Failed to parse %s"%(statpath))
- raise SystemExit(1)
- try:
- configdata = XML(open(configpath).read())
- except (IOError, XMLSyntaxError):
- print("bcfg2-build-reports: Failed to parse %s"%(configpath))
- raise SystemExit(1)
- try:
- clientsdata = XML(open(clientsdatapath).read())
- except (IOError, XMLSyntaxError):
- print("bcfg2-build-reports: Failed to parse %s"%(clientsdatapath))
- raise SystemExit(1)
-
- # Merge data from three sources.
- nodereport = Element("Report", attrib={"time" : asctime()})
- # Should all of the other info in Metadata be appended?
- # What about all of the package stuff for other types of reports?
- for client in clientsdata.findall("Client"):
- nodel = Element("Node", attrib={"name" : client.get("name")})
- nodel.append(client)
- for nod in statsdata.findall("Node"):
- if client.get('name').find(nod.get('name')) == 0:
- for statel in nod.findall("Statistics"):
- nodel.append(statel)
- nodereport.append(nodel)
-
- if all:
- for nod in statsdata.findall("Node"):
- for client in clientsdata.findall("Client"):
- if client.get('name').find(nod.get('name')) == 0:
- break
- else:
- nodel = Element("Node", attrib={"name" : nod.get("name")})
- client = Element("Client", attrib={"name" : nod.get("name"), "profile" : "default"})
- nodel.append(client)
- for statel in nod.findall("Statistics"):
- nodel.append(statel)
- nodereport.append(nodel)
-
-
- for reprt in configdata.findall('Report'):
- nodereport.set("name", reprt.get("name", default="BCFG Report"))
-
- if reprt.get('refresh-time') != None:
- nodereport.set("refresh-time", reprt.get("refresh-time", default="600"))
-
- procnodereport = generatereport(reprt, nodereport)
-
- for deliv in reprt.findall('Delivery'):
- # Is a deepcopy of procnodereport necessary?
-
- delivtype = deliv.get('type', default='nodes-digest')
- deliverymechanism = deliv.get('mechanism', default='www')
-
- # Apply XSLT, different ones based on report type, and options
- if deliverymechanism == 'null-operator': # Special Cases
- fileout(tostring(ElementTree(procnodereport).getroot(), xml_declaration=False).decode('UTF-8'), deliv)
- break
- transform = delivtype + '-' + deliverymechanism + '.xsl'
-
- try: # Make sure valid stylesheet is selected.
- os.stat(transformpath + transform)
- except:
- print("bcfg2-build-reports: Invalid report type or delivery mechanism.\n Can't find: "\
- + transformpath + transform)
- raise SystemExit(1)
-
- try: # Try to parse stylesheet.
- stylesheet = XSLT(parse(transformpath + transform))
- except:
- print("bcfg2-build-reports: invalid XSLT transform file.")
- raise SystemExit(1)
-
- if deliverymechanism == 'mail':
- if delivtype == 'nodes-individual':
- reportdata = copy.deepcopy(procnodereport)
- for noden in reportdata.findall("Node"):
- [reportdata.remove(y) for y in reportdata.findall("Node")]
- reportdata.append(noden)
- result = stylesheet.apply(ElementTree(reportdata))
- outputstring = stylesheet.tostring(result)
-
- if not outputstring == None:
- toastring = ''
- for desti in deliv.findall("Destination"):
- toastring = "%s%s " % \
- (toastring, desti.get('address'))
- # Prepend To: and From:
- outputstring = "To: %s\nFrom: root@%s\n%s"% \
- (toastring, socket.getfqdn(), outputstring)
- mail(outputstring, c) #call function to send
-
- else:
- reportdata = copy.deepcopy(procnodereport)
-
- result = stylesheet.apply(ElementTree(reportdata))
- outputstring = stylesheet.tostring(result)
-
- if not outputstring == None:
- toastring = ''
- for desti in deliv.findall("Destination"):
- toastring = "%s%s " % \
- (toastring, desti.get('address'))
- # Prepend To: and From:
- outputstring = "To: %s\nFrom: root@%s\n%s"% \
- (toastring, socket.getfqdn(), outputstring)
- mail(outputstring, c) #call function to send
- else:
- outputstring = tostring(stylesheet.apply(ElementTree(procnodereport)).getroot(), xml_declaration=False).decode('UTF-8')
- if deliverymechanism == 'rss':
- rss(outputstring, deliv, reprt)
- else: # Must be deliverymechanism == 'www':
- www(outputstring, deliv)
diff --git a/src/sbin/bcfg2-crypt b/src/sbin/bcfg2-crypt
index aad89882f..a75c0da9d 100755
--- a/src/sbin/bcfg2-crypt
+++ b/src/sbin/bcfg2-crypt
@@ -12,7 +12,7 @@ import Bcfg2.Options
from Bcfg2.Server import XMLParser
from Bcfg2.Compat import input # pylint: disable=W0622
try:
- import Bcfg2.Encryption
+ import Bcfg2.Server.Encryption
except ImportError:
print("Could not import %s. Is M2Crypto installed?" % sys.exc_info()[1])
raise SystemExit(1)
@@ -27,8 +27,8 @@ class EncryptionChunkingError(Exception):
class Encryptor(object):
""" Generic encryptor for all files """
- def __init__(self, setup):
- self.setup = setup
+ def __init__(self):
+ self.setup = Bcfg2.Options.get_option_parser()
self.passphrase = None
self.pname = None
self.logger = logging.getLogger(self.__class__.__name__)
@@ -55,8 +55,8 @@ class Encryptor(object):
def set_passphrase(self):
""" set the passphrase for the current file """
- if (not self.setup.cfp.has_section(Bcfg2.Encryption.CFG_SECTION) or
- len(Bcfg2.Encryption.get_passphrases(self.setup)) == 0):
+ if (not self.setup.cfp.has_section(Bcfg2.Server.Encryption.CFG_SECTION)
+ or len(Bcfg2.Server.Encryption.get_passphrases()) == 0):
self.logger.error("No passphrases available in %s" %
self.setup['configfile'])
return False
@@ -70,10 +70,10 @@ class Encryptor(object):
self.pname = self.setup['passphrase']
if self.pname:
- if self.setup.cfp.has_option(Bcfg2.Encryption.CFG_SECTION,
+ if self.setup.cfp.has_option(Bcfg2.Server.Encryption.CFG_SECTION,
self.pname):
self.passphrase = \
- self.setup.cfp.get(Bcfg2.Encryption.CFG_SECTION,
+ self.setup.cfp.get(Bcfg2.Server.Encryption.CFG_SECTION,
self.pname)
self.logger.debug("Using passphrase %s specified on command "
"line" % self.pname)
@@ -83,7 +83,7 @@ class Encryptor(object):
(self.pname, self.setup['configfile']))
return False
else:
- pnames = Bcfg2.Encryption.get_passphrases(self.setup)
+ pnames = Bcfg2.Server.Encryption.get_passphrases()
if len(pnames) == 1:
self.pname = pnames.keys()[0]
self.passphrase = pnames[self.pname]
@@ -127,9 +127,7 @@ class Encryptor(object):
# pylint: disable=W0613
def _encrypt(self, plaintext, passphrase, name=None):
""" encrypt a single chunk of a file """
- return Bcfg2.Encryption.ssl_encrypt(
- plaintext, passphrase,
- Bcfg2.Encryption.get_algorithm(self.setup))
+ return Bcfg2.Server.Encryption.ssl_encrypt(plaintext, passphrase)
# pylint: enable=W0613
def decrypt(self, fname):
@@ -150,7 +148,7 @@ class Encryptor(object):
passphrase, pname = self.get_passphrase(chunk)
try:
plaintext.append(self._decrypt(chunk, passphrase))
- except Bcfg2.Encryption.EVPError:
+ except Bcfg2.Server.Encryption.EVPError:
self.logger.info("Could not decrypt %s with the "
"specified passphrase" % fname)
continue
@@ -161,13 +159,13 @@ class Encryptor(object):
continue
except TypeError:
pchunk = None
- passphrases = Bcfg2.Encryption.get_passphrases(self.setup)
- for pname, passphrase in passphrases.items():
+ for pname, passphrase in \
+ Bcfg2.Server.Encryption.get_passphrases().items():
self.logger.debug("Trying passphrase %s" % pname)
try:
pchunk = self._decrypt(chunk, passphrase)
break
- except Bcfg2.Encryption.EVPError:
+ except Bcfg2.Server.Encryption.EVPError:
pass
except:
err = sys.exc_info()[1]
@@ -196,9 +194,7 @@ class Encryptor(object):
def _decrypt(self, crypted, passphrase):
""" decrypt a single chunk """
- return Bcfg2.Encryption.ssl_decrypt(
- crypted, passphrase,
- Bcfg2.Encryption.get_algorithm(self.setup))
+ return Bcfg2.Server.Encryption.ssl_decrypt(crypted, passphrase)
def write_encrypted(self, fname, data=None):
""" write encrypted data to disk """
@@ -243,10 +239,11 @@ class Encryptor(object):
self.logger.info("No passphrase given on command line or "
"found in file")
return False
- elif self.setup.cfp.has_option(Bcfg2.Encryption.CFG_SECTION,
+ elif self.setup.cfp.has_option(Bcfg2.Server.Encryption.CFG_SECTION,
pname):
- passphrase = self.setup.cfp.get(Bcfg2.Encryption.CFG_SECTION,
- pname)
+ passphrase = self.setup.cfp.get(
+ Bcfg2.Server.Encryption.CFG_SECTION,
+ pname)
else:
self.logger.error("Could not find passphrase %s in %s" %
(pname, self.setup['configfile']))
@@ -287,10 +284,9 @@ class PropertiesEncryptor(Encryptor):
if name is None:
name = "true"
if plaintext.text and plaintext.text.strip():
- plaintext.text = Bcfg2.Encryption.ssl_encrypt(
- plaintext.text,
- passphrase,
- Bcfg2.Encryption.get_algorithm(self.setup)).strip()
+ plaintext.text = \
+ Bcfg2.Server.Encryption.ssl_encrypt(plaintext.text,
+ passphrase).strip()
plaintext.set("encrypted", name)
return plaintext
@@ -358,10 +354,8 @@ class PropertiesEncryptor(Encryptor):
if not crypted.text or not crypted.text.strip():
self.logger.warning("Skipping empty element %s" % crypted.tag)
return crypted
- decrypted = Bcfg2.Encryption.ssl_decrypt(
- crypted.text,
- passphrase,
- Bcfg2.Encryption.get_algorithm(self.setup)).strip()
+ decrypted = Bcfg2.Server.Encryption.ssl_decrypt(crypted.text,
+ passphrase).strip()
try:
crypted.text = decrypted.encode('ascii', 'xmlcharrefreplace')
except UnicodeDecodeError:
@@ -379,10 +373,10 @@ def main(): # pylint: disable=R0912,R0915
optinfo = dict(interactive=Bcfg2.Options.INTERACTIVE)
optinfo.update(Bcfg2.Options.CRYPT_OPTIONS)
optinfo.update(Bcfg2.Options.CLI_COMMON_OPTIONS)
- setup = Bcfg2.Options.OptionParser(optinfo)
+ setup = Bcfg2.Options.load_option_parser(optinfo)
setup.hm = " bcfg2-crypt [options] <filename>\nOptions:\n%s" % \
setup.buildHelpMessage()
- setup.parse(sys.argv[1:])
+ setup.parse()
if not setup['args']:
print(setup.hm)
diff --git a/src/sbin/bcfg2-info b/src/sbin/bcfg2-info
index 4e71ba35a..853c98845 100755
--- a/src/sbin/bcfg2-info
+++ b/src/sbin/bcfg2-info
@@ -115,9 +115,9 @@ def load_interpreters():
class InfoCore(cmd.Cmd, Bcfg2.Server.Core.BaseCore):
"""Main class for bcfg2-info."""
- def __init__(self, setup):
+ def __init__(self):
cmd.Cmd.__init__(self)
- Bcfg2.Server.Core.BaseCore.__init__(self, setup=setup)
+ Bcfg2.Server.Core.BaseCore.__init__(self)
self.prompt = '> '
self.cont = True
@@ -284,9 +284,8 @@ Bcfg2 client itself.""")
posix = Bcfg2.Client.Tools.POSIX.POSIX(MockLog(),
self.setup,
client_config)
- states = dict()
- posix.Inventory(states)
- posix.Install(list(states.keys()), states)
+ states = posix.Inventory()
+ posix.Install(list(states.keys()))
else:
print('Error: Incorrect number of parameters.')
self.help_builddir()
@@ -782,12 +781,12 @@ def main():
sys.exit(0)
elif setup['profile'] and HAS_PROFILE:
prof = profile.Profile()
- loop = prof.runcall(InfoCore, setup)
+ loop = prof.runcall(InfoCore)
display_trace(prof)
else:
if setup['profile']:
print("Profiling functionality not available.")
- loop = InfoCore(setup)
+ loop = InfoCore()
loop.run(setup['args'])
diff --git a/src/sbin/bcfg2-lint b/src/sbin/bcfg2-lint
index 9a98eaaaa..2ae5e02d5 100755
--- a/src/sbin/bcfg2-lint
+++ b/src/sbin/bcfg2-lint
@@ -13,37 +13,36 @@ import Bcfg2.Server.Lint
LOGGER = logging.getLogger('bcfg2-lint')
-def run_serverless_plugins(plugins, setup=None, errorhandler=None, files=None):
+def run_serverless_plugins(plugins, errorhandler=None, files=None):
""" Run serverless plugins """
LOGGER.debug("Running serverless plugins")
for plugin_name, plugin in list(plugins.items()):
- run_plugin(plugin, plugin_name, errorhandler=errorhandler,
- setup=setup, files=files)
+ run_plugin(plugin, plugin_name, errorhandler=errorhandler, files=files)
-def run_server_plugins(plugins, setup=None, errorhandler=None, files=None):
+def run_server_plugins(plugins, errorhandler=None, files=None):
""" run plugins that require a running server to run """
- core = load_server(setup)
+ core = load_server()
try:
LOGGER.debug("Running server plugins")
for plugin_name, plugin in list(plugins.items()):
run_plugin(plugin, plugin_name, args=[core],
- errorhandler=errorhandler, setup=setup, files=files)
+ errorhandler=errorhandler, files=files)
finally:
core.shutdown()
-def run_plugin(plugin, plugin_name, setup=None, errorhandler=None,
- args=None, files=None):
+def run_plugin(plugin, plugin_name, errorhandler=None, args=None, files=None):
""" run a single plugin, server-ful or serverless. """
LOGGER.debug(" Running %s" % plugin_name)
if args is None:
args = []
if errorhandler is None:
- errorhandler = get_errorhandler(setup)
+ errorhandler = get_errorhandler()
- if setup is not None and setup.cfp.has_section(plugin_name):
+ setup = Bcfg2.Options.get_option_parser()
+ if setup.cfp.has_section(plugin_name):
arg = setup
for key, val in setup.cfp.items(plugin_name):
arg[key] = val
@@ -55,8 +54,9 @@ def run_plugin(plugin, plugin_name, setup=None, errorhandler=None,
return plugin(*args, **dict(files=files, errorhandler=errorhandler)).Run()
-def get_errorhandler(setup):
+def get_errorhandler():
""" get a Bcfg2.Server.Lint.ErrorHandler object """
+ setup = Bcfg2.Options.get_option_parser()
if setup.cfp.has_section("errors"):
errors = dict(setup.cfp.items("errors"))
else:
@@ -64,9 +64,9 @@ def get_errorhandler(setup):
return Bcfg2.Server.Lint.ErrorHandler(errors=errors)
-def load_server(setup):
+def load_server():
""" load server """
- core = Bcfg2.Server.Core.BaseCore(setup)
+ core = Bcfg2.Server.Core.BaseCore()
core.load_plugins()
core.fam.handle_events_in_interval(0.1)
return core
@@ -84,8 +84,9 @@ def load_plugin(module, obj_name=None):
return getattr(mod, obj_name)
-def load_plugins(setup):
+def load_plugins():
""" get list of plugins to run """
+ setup = Bcfg2.Options.get_option_parser()
if setup['args']:
plugin_list = setup['args']
elif "bcfg2-repo-validate" in sys.argv[0]:
@@ -147,7 +148,7 @@ def main():
lint_plugins=Bcfg2.Options.LINT_PLUGINS)
optinfo.update(Bcfg2.Options.CLI_COMMON_OPTIONS)
optinfo.update(Bcfg2.Options.SERVER_COMMON_OPTIONS)
- setup = Bcfg2.Options.OptionParser(optinfo)
+ setup = Bcfg2.Options.load_option_parser(optinfo)
setup.parse(sys.argv[1:])
log_args = dict(to_syslog=setup['syslog'], to_console=logging.WARNING)
@@ -163,9 +164,8 @@ def main():
else:
files = None
- (serverlessplugins, serverplugins) = load_plugins(setup)
-
- errorhandler = get_errorhandler(setup)
+ (serverlessplugins, serverplugins) = load_plugins()
+ errorhandler = get_errorhandler()
if setup['showerrors']:
for plugin in serverplugins.values() + serverlessplugins.values():
@@ -177,7 +177,7 @@ def main():
raise SystemExit(0)
run_serverless_plugins(serverlessplugins, errorhandler=errorhandler,
- setup=setup, files=files)
+ files=files)
if serverplugins:
if errorhandler.errors:
@@ -193,7 +193,7 @@ def main():
"plugins")
else:
run_server_plugins(serverplugins, errorhandler=errorhandler,
- setup=setup, files=files)
+ files=files)
if errorhandler.errors or errorhandler.warnings or setup['verbose']:
print("%d errors" % errorhandler.errors)
diff --git a/src/sbin/bcfg2-report-collector b/src/sbin/bcfg2-report-collector
index 594be13bf..ae6d3b167 100755
--- a/src/sbin/bcfg2-report-collector
+++ b/src/sbin/bcfg2-report-collector
@@ -18,8 +18,8 @@ def main():
web_configfile=Bcfg2.Options.WEB_CFILE)
optinfo.update(Bcfg2.Options.CLI_COMMON_OPTIONS)
optinfo.update(Bcfg2.Options.REPORTING_COMMON_OPTIONS)
- setup = Bcfg2.Options.OptionParser(optinfo)
- setup.parse(sys.argv[1:])
+ setup = Bcfg2.Options.load_option_parser(optinfo)
+ setup.parse()
# run collector
try:
diff --git a/src/sbin/bcfg2-server b/src/sbin/bcfg2-server
index 4c4a71fa7..beb19cef6 100755
--- a/src/sbin/bcfg2-server
+++ b/src/sbin/bcfg2-server
@@ -17,7 +17,7 @@ def main():
optinfo.update(Bcfg2.Options.CLI_COMMON_OPTIONS)
optinfo.update(Bcfg2.Options.SERVER_COMMON_OPTIONS)
optinfo.update(Bcfg2.Options.DAEMON_COMMON_OPTIONS)
- setup = Bcfg2.Options.OptionParser(optinfo)
+ setup = Bcfg2.Options.load_option_parser(optinfo)
setup.parse(sys.argv[1:])
# check whether the specified bcfg2.conf exists
if not os.path.exists(setup['configfile']):
diff --git a/src/sbin/bcfg2-test b/src/sbin/bcfg2-test
index c33143a04..f13240879 100755
--- a/src/sbin/bcfg2-test
+++ b/src/sbin/bcfg2-test
@@ -195,7 +195,7 @@ def parse_args():
optinfo = dict(Bcfg2.Options.TEST_COMMON_OPTIONS)
optinfo.update(Bcfg2.Options.CLI_COMMON_OPTIONS)
optinfo.update(Bcfg2.Options.SERVER_COMMON_OPTIONS)
- setup = Bcfg2.Options.OptionParser(optinfo)
+ setup = Bcfg2.Options.load_option_parser(optinfo)
setup.hm = \
"bcfg2-test [options] [client] [client] [...]\nOptions:\n %s" % \
setup.buildHelpMessage()