summaryrefslogtreecommitdiffstats
path: root/src/lib/Bcfg2/Server
diff options
context:
space:
mode:
Diffstat (limited to 'src/lib/Bcfg2/Server')
-rw-r--r--src/lib/Bcfg2/Server/Admin/Bundle.py20
-rw-r--r--src/lib/Bcfg2/Server/Admin/Client.py42
-rw-r--r--src/lib/Bcfg2/Server/Admin/Compare.py3
-rw-r--r--src/lib/Bcfg2/Server/Admin/Group.py63
-rw-r--r--src/lib/Bcfg2/Server/Admin/Init.py79
-rw-r--r--src/lib/Bcfg2/Server/Admin/Pull.py11
-rw-r--r--src/lib/Bcfg2/Server/Admin/Query.py11
-rw-r--r--src/lib/Bcfg2/Server/Admin/Reports.py81
-rw-r--r--src/lib/Bcfg2/Server/Admin/Syncdb.py36
-rw-r--r--src/lib/Bcfg2/Server/Admin/Tidy.py7
-rw-r--r--src/lib/Bcfg2/Server/Admin/Viz.py2
-rw-r--r--src/lib/Bcfg2/Server/Admin/__init__.py11
-rw-r--r--src/lib/Bcfg2/Server/BuiltinCore.py103
-rw-r--r--src/lib/Bcfg2/Server/CherryPyCore.py131
-rw-r--r--src/lib/Bcfg2/Server/Core.py458
-rw-r--r--src/lib/Bcfg2/Server/FileMonitor.py315
-rw-r--r--src/lib/Bcfg2/Server/FileMonitor/Fam.py82
-rw-r--r--src/lib/Bcfg2/Server/FileMonitor/Gamin.py64
-rw-r--r--src/lib/Bcfg2/Server/FileMonitor/Inotify.py126
-rw-r--r--src/lib/Bcfg2/Server/FileMonitor/Pseudo.py25
-rw-r--r--src/lib/Bcfg2/Server/FileMonitor/__init__.py143
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/backends.py5
-rw-r--r--src/lib/Bcfg2/Server/Hostbase/ldapauth.py1
-rw-r--r--src/lib/Bcfg2/Server/Lint/Bundles.py54
-rw-r--r--src/lib/Bcfg2/Server/Lint/Comments.py5
-rw-r--r--src/lib/Bcfg2/Server/Lint/Deltas.py25
-rw-r--r--src/lib/Bcfg2/Server/Lint/Duplicates.py5
-rwxr-xr-xsrc/lib/Bcfg2/Server/Lint/Genshi.py1
-rw-r--r--src/lib/Bcfg2/Server/Lint/GroupNames.py78
-rw-r--r--src/lib/Bcfg2/Server/Lint/GroupPatterns.py35
-rw-r--r--src/lib/Bcfg2/Server/Lint/InfoXML.py41
-rw-r--r--src/lib/Bcfg2/Server/Lint/Pkgmgr.py38
-rw-r--r--src/lib/Bcfg2/Server/Lint/RequiredAttrs.py163
-rw-r--r--src/lib/Bcfg2/Server/Lint/TemplateHelper.py64
-rw-r--r--src/lib/Bcfg2/Server/Lint/Validate.py62
-rw-r--r--src/lib/Bcfg2/Server/Lint/__init__.py10
-rw-r--r--src/lib/Bcfg2/Server/Plugin.py688
-rw-r--r--src/lib/Bcfg2/Server/Plugins/BB.py83
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Base.py5
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Bundler.py138
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgCheetahGenerator.py9
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedCheetahGenerator.py14
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenerator.py63
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenshiGenerator.py26
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgGenshiGenerator.py72
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgInfoXML.py8
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/CfgLegacyInfo.py4
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py100
-rw-r--r--src/lib/Bcfg2/Server/Plugins/DBStats.py52
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Decisions.py5
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Deps.py21
-rw-r--r--src/lib/Bcfg2/Server/Plugins/FileProbes.py62
-rw-r--r--src/lib/Bcfg2/Server/Plugins/GroupPatterns.py53
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Hostbase.py23
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Ldap.py2
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Metadata.py957
-rw-r--r--src/lib/Bcfg2/Server/Plugins/NagiosGen.py25
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Ohai.py2
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Apt.py17
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Collection.py75
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Pac.py7
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py32
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Source.py45
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Yum.py211
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/__init__.py99
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Pkgmgr.py56
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Probes.py247
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Properties.py111
-rw-r--r--src/lib/Bcfg2/Server/Plugins/PuppetENC.py117
-rw-r--r--src/lib/Bcfg2/Server/Plugins/SEModules.py45
-rw-r--r--src/lib/Bcfg2/Server/Plugins/SGenshi.py97
-rw-r--r--src/lib/Bcfg2/Server/Plugins/SSHbase.py12
-rw-r--r--src/lib/Bcfg2/Server/Plugins/SSLCA.py110
-rw-r--r--src/lib/Bcfg2/Server/Plugins/ServiceCompat.py32
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Snapshots.py27
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Statistics.py26
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Svcmgr.py10
-rw-r--r--src/lib/Bcfg2/Server/Plugins/TCheetah.py9
-rw-r--r--src/lib/Bcfg2/Server/Plugins/TGenshi.py18
-rw-r--r--src/lib/Bcfg2/Server/Plugins/TemplateHelper.py120
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Trigger.py65
-rw-r--r--src/lib/Bcfg2/Server/Plugins/__init__.py2
-rwxr-xr-xsrc/lib/Bcfg2/Server/Reports/importscript.py307
-rw-r--r--src/lib/Bcfg2/Server/Reports/reports/fixtures/initial_version.xml43
-rw-r--r--src/lib/Bcfg2/Server/Reports/reports/models.py139
-rw-r--r--src/lib/Bcfg2/Server/Reports/reports/sql/client.sql7
-rw-r--r--src/lib/Bcfg2/Server/Reports/reports/templates/base-timeview.html5
-rw-r--r--src/lib/Bcfg2/Server/Reports/reports/templates/base.html3
-rw-r--r--src/lib/Bcfg2/Server/Reports/reports/templates/clients/detail.html56
-rw-r--r--src/lib/Bcfg2/Server/Reports/reports/templates/clients/detailed-list.html18
-rw-r--r--src/lib/Bcfg2/Server/Reports/reports/templates/clients/index.html1
-rw-r--r--src/lib/Bcfg2/Server/Reports/reports/templates/clients/manage.html2
-rw-r--r--src/lib/Bcfg2/Server/Reports/reports/templates/config_items/common.html42
-rw-r--r--src/lib/Bcfg2/Server/Reports/reports/templates/config_items/entry_status.html30
-rw-r--r--src/lib/Bcfg2/Server/Reports/reports/templates/config_items/listing.html22
-rw-r--r--src/lib/Bcfg2/Server/Reports/reports/templates/widgets/filter_bar.html16
-rw-r--r--src/lib/Bcfg2/Server/Reports/reports/templatetags/bcfg2_tags.py130
-rw-r--r--src/lib/Bcfg2/Server/Reports/reports/templatetags/syntax_coloring.py10
-rw-r--r--src/lib/Bcfg2/Server/Reports/reports/urls.py9
-rw-r--r--src/lib/Bcfg2/Server/Reports/reports/views.py266
-rw-r--r--src/lib/Bcfg2/Server/Reports/settings.py161
-rw-r--r--src/lib/Bcfg2/Server/Reports/updatefix.py281
-rwxr-xr-xsrc/lib/Bcfg2/Server/Reports/utils.py4
-rw-r--r--src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_0_x.py11
-rw-r--r--src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_1_x.py59
-rw-r--r--src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_2_x.py15
-rw-r--r--src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_3_0.py27
-rw-r--r--src/lib/Bcfg2/Server/SchemaUpdater/Changes/__init__.py0
-rw-r--r--src/lib/Bcfg2/Server/SchemaUpdater/Routines.py279
-rw-r--r--src/lib/Bcfg2/Server/SchemaUpdater/__init__.py257
-rw-r--r--src/lib/Bcfg2/Server/Snapshots/model.py8
-rw-r--r--src/lib/Bcfg2/Server/__init__.py11
-rw-r--r--src/lib/Bcfg2/Server/models.py77
113 files changed, 5472 insertions, 3226 deletions
diff --git a/src/lib/Bcfg2/Server/Admin/Bundle.py b/src/lib/Bcfg2/Server/Admin/Bundle.py
index 89c099602..e5e4eadf3 100644
--- a/src/lib/Bcfg2/Server/Admin/Bundle.py
+++ b/src/lib/Bcfg2/Server/Admin/Bundle.py
@@ -8,12 +8,11 @@ from Bcfg2.Server.Plugins.Metadata import MetadataConsistencyError
class Bundle(Bcfg2.Server.Admin.MetadataCore):
- __shorthelp__ = "Create or delete bundle entries"
- # TODO: add/del functions
+ __shorthelp__ = "List and view bundle entries"
__longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin bundle list-xml"
"\nbcfg2-admin bundle list-genshi"
"\nbcfg2-admin bundle show\n")
- __usage__ = ("bcfg2-admin bundle [options] [add|del] [group]")
+ __usage__ = ("bcfg2-admin bundle [options] [list-xml|list-genshi|show]")
def __call__(self, args):
Bcfg2.Server.Admin.MetadataCore.__call__(self, args)
@@ -28,18 +27,6 @@ class Bundle(Bcfg2.Server.Admin.MetadataCore):
if len(args) == 0:
self.errExit("No argument specified.\n"
"Please see bcfg2-admin bundle help for usage.")
-# if args[0] == 'add':
-# try:
-# self.metadata.add_bundle(args[1])
-# except MetadataConsistencyError:
-# print("Error in adding bundle.")
-# raise SystemExit(1)
-# elif args[0] in ['delete', 'remove', 'del', 'rm']:
-# try:
-# self.metadata.remove_bundle(args[1])
-# except MetadataConsistencyError:
-# print("Error in deleting bundle.")
-# raise SystemExit(1)
# Lists all available xml bundles
elif args[0] in ['list-xml', 'ls-xml']:
bundle_name = []
@@ -63,7 +50,6 @@ class Bundle(Bcfg2.Server.Admin.MetadataCore):
bundle_name = []
bundle_list = xml_list + genshi_list
for bundle_path in bundle_list:
- print "matching %s" % bundle_path
bundle_name.append(rg.search(bundle_path).group(1))
text = "Available bundles (Number of bundles: %s)" % \
(len(bundle_list))
@@ -85,8 +71,6 @@ class Bundle(Bcfg2.Server.Admin.MetadataCore):
print('Details for the "%s" bundle:' % \
(bundle_name[int(lineno)].split('.')[0]))
tree = lxml.etree.parse(bundle_list[int(lineno)])
- #Prints bundle content
- #print(lxml.etree.tostring(tree))
names = ['Action', 'Package', 'Path', 'Service']
for name in names:
for node in tree.findall("//" + name):
diff --git a/src/lib/Bcfg2/Server/Admin/Client.py b/src/lib/Bcfg2/Server/Admin/Client.py
index 4d580c54c..34dfd7550 100644
--- a/src/lib/Bcfg2/Server/Admin/Client.py
+++ b/src/lib/Bcfg2/Server/Admin/Client.py
@@ -4,50 +4,23 @@ from Bcfg2.Server.Plugins.Metadata import MetadataConsistencyError
class Client(Bcfg2.Server.Admin.MetadataCore):
- __shorthelp__ = "Create, delete, or modify client entries"
+ __shorthelp__ = "Create, delete, or list client entries"
__longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin client add <client> "
- "attr1=val1 attr2=val2"
- "\nbcfg2-admin client update <client> "
- "attr1=val1 attr2=val2"
"\nbcfg2-admin client list"
"\nbcfg2-admin client del <client>\n")
- __usage__ = ("bcfg2-admin client [options] [add|del|update|list] [attr=val]")
+ __usage__ = ("bcfg2-admin client [options] [add|del|list] [attr=val]")
def __call__(self, args):
Bcfg2.Server.Admin.MetadataCore.__call__(self, args)
if len(args) == 0:
self.errExit("No argument specified.\n"
- "Please see bcfg2-admin client help for usage.")
+ "Usage: %s" % self.usage)
if args[0] == 'add':
- attr_d = {}
- for i in args[2:]:
- attr, val = i.split('=', 1)
- if attr not in ['profile', 'uuid', 'password',
- 'location', 'secure', 'address',
- 'auth']:
- print("Attribute %s unknown" % attr)
- raise SystemExit(1)
- attr_d[attr] = val
try:
- self.metadata.add_client(args[1], attr_d)
+ self.metadata.add_client(args[1])
except MetadataConsistencyError:
print("Error in adding client")
raise SystemExit(1)
- elif args[0] in ['update', 'up']:
- attr_d = {}
- for i in args[2:]:
- attr, val = i.split('=', 1)
- if attr not in ['profile', 'uuid', 'password',
- 'location', 'secure', 'address',
- 'auth']:
- print("Attribute %s unknown" % attr)
- raise SystemExit(1)
- attr_d[attr] = val
- try:
- self.metadata.update_client(args[1], attr_d)
- except MetadataConsistencyError:
- print("Error in updating client")
- raise SystemExit(1)
elif args[0] in ['delete', 'remove', 'del', 'rm']:
try:
self.metadata.remove_client(args[1])
@@ -55,10 +28,9 @@ class Client(Bcfg2.Server.Admin.MetadataCore):
print("Error in deleting client")
raise SystemExit(1)
elif args[0] in ['list', 'ls']:
- tree = lxml.etree.parse(self.metadata.data + "/clients.xml")
- tree.xinclude()
- for node in tree.findall("//Client"):
- print(node.attrib["name"])
+ for client in self.metadata.list_clients():
+ print(client.hostname)
else:
print("No command specified")
raise SystemExit(1)
+
diff --git a/src/lib/Bcfg2/Server/Admin/Compare.py b/src/lib/Bcfg2/Server/Admin/Compare.py
index 050dd69f8..78b30120a 100644
--- a/src/lib/Bcfg2/Server/Admin/Compare.py
+++ b/src/lib/Bcfg2/Server/Admin/Compare.py
@@ -18,7 +18,8 @@ class Compare(Bcfg2.Server.Admin.Mode):
'important', 'paranoid', 'sensitive',
'dev_type', 'major', 'minor', 'prune',
'encoding', 'empty', 'to', 'recursive',
- 'vcstype', 'sourceurl', 'revision'],
+ 'vcstype', 'sourceurl', 'revision',
+ 'secontext'],
'Package': ['name', 'type', 'version', 'simplefile',
'verify'],
'Service': ['name', 'type', 'status', 'mode',
diff --git a/src/lib/Bcfg2/Server/Admin/Group.py b/src/lib/Bcfg2/Server/Admin/Group.py
deleted file mode 100644
index 16a773d6f..000000000
--- a/src/lib/Bcfg2/Server/Admin/Group.py
+++ /dev/null
@@ -1,63 +0,0 @@
-import lxml.etree
-import Bcfg2.Server.Admin
-from Bcfg2.Server.Plugins.Metadata import MetadataConsistencyError
-
-
-class Group(Bcfg2.Server.Admin.MetadataCore):
- __shorthelp__ = "Create, delete, or modify group entries"
- __longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin group add <group> "
- "attr1=val1 attr2=val2"
- "\nbcfg2-admin group update <group> "
- "attr1=val1 attr2=val2"
- "\nbcfg2-admin group list"
- "\nbcfg2-admin group del <group>\n")
- __usage__ = ("bcfg2-admin group [options] [add|del|update|list] [attr=val]")
-
- def __call__(self, args):
- Bcfg2.Server.Admin.MetadataCore.__call__(self, args)
- if len(args) == 0:
- self.errExit("No argument specified.\n"
- "Please see bcfg2-admin group help for usage.")
- if args[0] == 'add':
- attr_d = {}
- for i in args[2:]:
- attr, val = i.split('=', 1)
- if attr not in ['profile', 'public', 'default',
- 'name', 'auth', 'toolset', 'category',
- 'comment']:
- print("Attribute %s unknown" % attr)
- raise SystemExit(1)
- attr_d[attr] = val
- try:
- self.metadata.add_group(args[1], attr_d)
- except MetadataConsistencyError:
- print("Error in adding group")
- raise SystemExit(1)
- elif args[0] in ['update', 'up']:
- attr_d = {}
- for i in args[2:]:
- attr, val = i.split('=', 1)
- if attr not in ['profile', 'public', 'default',
- 'name', 'auth', 'toolset', 'category',
- 'comment']:
- print("Attribute %s unknown" % attr)
- raise SystemExit(1)
- attr_d[attr] = val
- try:
- self.metadata.update_group(args[1], attr_d)
- except MetadataConsistencyError:
- print("Error in updating group")
- raise SystemExit(1)
- elif args[0] in ['delete', 'remove', 'del', 'rm']:
- try:
- self.metadata.remove_group(args[1])
- except MetadataConsistencyError:
- print("Error in deleting group")
- raise SystemExit(1)
- elif args[0] in ['list', 'ls']:
- tree = lxml.etree.parse(self.metadata.data + "/groups.xml")
- for node in tree.findall("//Group"):
- print(node.attrib["name"])
- else:
- print("No command specified")
- raise SystemExit(1)
diff --git a/src/lib/Bcfg2/Server/Admin/Init.py b/src/lib/Bcfg2/Server/Admin/Init.py
index c1f9ed484..fefd17d6a 100644
--- a/src/lib/Bcfg2/Server/Admin/Init.py
+++ b/src/lib/Bcfg2/Server/Admin/Init.py
@@ -6,9 +6,11 @@ import stat
import string
import sys
import subprocess
+
import Bcfg2.Server.Admin
import Bcfg2.Server.Plugin
import Bcfg2.Options
+from Bcfg2.Bcfg2Py3k import input
# default config file
config = '''
@@ -18,18 +20,22 @@ plugins = %s
[statistics]
sendmailpath = %s
-database_engine = sqlite3
+#web_debug = False
+#time_zone =
+
+[database]
+#engine = sqlite3
# 'postgresql', 'mysql', 'mysql_old', 'sqlite3' or 'ado_mssql'.
-database_name =
+#name =
# Or path to database file if using sqlite3.
-#<repository>/etc/brpt.sqlite is default path if left empty
-database_user =
+#<repository>/bcfg2.sqlite is default path if left empty
+#user =
# Not used with sqlite3.
-database_password =
+#password =
# Not used with sqlite3.
-database_host =
+#host =
# Not used with sqlite3.
-database_port =
+#port =
[communication]
protocol = %s
@@ -61,7 +67,7 @@ groups = '''<Groups version='3.0'>
# Default contents of clients.xml
clients = '''<Clients version="3.0">
- <Client profile="basic" pingable="Y" pingtime="0" name="%s"/>
+ <Client profile="basic" name="%s"/>
</Clients>
'''
@@ -98,7 +104,6 @@ plugin_list = ['Account',
'SSHbase',
'SSLCA',
'Statistics',
- 'Svcmgr',
'TCheetah',
'TGenshi']
@@ -106,14 +111,6 @@ plugin_list = ['Account',
default_plugins = Bcfg2.Options.SERVER_PLUGINS.default
-def get_input(prompt):
- """py3k compatible function to get input"""
- try:
- return raw_input(prompt)
- except NameError:
- return input(prompt)
-
-
def gen_password(length):
"""Generates a random alphanumeric password with length characters."""
chars = string.letters + string.digits
@@ -147,8 +144,8 @@ def create_key(hostname, keypath, certpath, country, state, location):
def create_conf(confpath, confdata, keypath):
# Don't overwrite existing bcfg2.conf file
if os.path.exists(confpath):
- result = get_input("\nWarning: %s already exists. "
- "Overwrite? [y/N]: " % confpath)
+ result = input("\nWarning: %s already exists. "
+ "Overwrite? [y/N]: " % confpath)
if result not in ['Y', 'y']:
print("Leaving %s unchanged" % confpath)
return
@@ -206,8 +203,8 @@ class Init(Bcfg2.Server.Admin.Mode):
def _prompt_hostname(self):
"""Ask for the server hostname."""
- data = get_input("What is the server's hostname [%s]: " %
- socket.getfqdn())
+ data = input("What is the server's hostname [%s]: " %
+ socket.getfqdn())
if data != '':
self.shostname = data
else:
@@ -215,21 +212,21 @@ class Init(Bcfg2.Server.Admin.Mode):
def _prompt_config(self):
"""Ask for the configuration file path."""
- newconfig = get_input("Store Bcfg2 configuration in [%s]: " %
- self.configfile)
+ newconfig = input("Store Bcfg2 configuration in [%s]: " %
+ self.configfile)
if newconfig != '':
self.configfile = os.path.abspath(newconfig)
def _prompt_repopath(self):
"""Ask for the repository path."""
while True:
- newrepo = get_input("Location of Bcfg2 repository [%s]: " %
- self.repopath)
+ newrepo = input("Location of Bcfg2 repository [%s]: " %
+ self.repopath)
if newrepo != '':
self.repopath = os.path.abspath(newrepo)
if os.path.isdir(self.repopath):
- response = get_input("Directory %s exists. Overwrite? [y/N]:" \
- % self.repopath)
+ response = input("Directory %s exists. Overwrite? [y/N]:" \
+ % self.repopath)
if response.lower().strip() == 'y':
break
else:
@@ -245,8 +242,8 @@ class Init(Bcfg2.Server.Admin.Mode):
def _prompt_server(self):
"""Ask for the server name."""
- newserver = get_input("Input the server location [%s]: " %
- self.server_uri)
+ newserver = input("Input the server location [%s]: " %
+ self.server_uri)
if newserver != '':
self.server_uri = newserver
@@ -258,19 +255,19 @@ class Init(Bcfg2.Server.Admin.Mode):
prompt += ': '
while True:
try:
- osidx = int(get_input(prompt))
+ osidx = int(input(prompt))
self.os_sel = os_list[osidx - 1][1]
break
except ValueError:
continue
def _prompt_plugins(self):
- default = get_input("Use default plugins? (%s) [Y/n]: " %
- ''.join(default_plugins)).lower()
+ default = input("Use default plugins? (%s) [Y/n]: " %
+ ''.join(default_plugins)).lower()
if default != 'y' or default != '':
while True:
plugins_are_valid = True
- plug_str = get_input("Specify plugins: ")
+ plug_str = input("Specify plugins: ")
plugins = plug_str.split(',')
for plugin in plugins:
plugin = plugin.strip()
@@ -284,26 +281,26 @@ class Init(Bcfg2.Server.Admin.Mode):
"""Ask for the key details (country, state, and location)."""
print("The following questions affect SSL certificate generation.")
print("If no data is provided, the default values are used.")
- newcountry = get_input("Country name (2 letter code) for certificate: ")
+ newcountry = input("Country name (2 letter code) for certificate: ")
if newcountry != '':
if len(newcountry) == 2:
self.country = newcountry
else:
while len(newcountry) != 2:
- newcountry = get_input("2 letter country code (eg. US): ")
+ newcountry = input("2 letter country code (eg. US): ")
if len(newcountry) == 2:
self.country = newcountry
break
else:
self.country = 'US'
- newstate = get_input("State or Province Name (full name) for certificate: ")
+ newstate = input("State or Province Name (full name) for certificate: ")
if newstate != '':
self.state = newstate
else:
self.state = 'Illinois'
- newlocation = get_input("Locality Name (eg, city) for certificate: ")
+ newlocation = input("Locality Name (eg, city) for certificate: ")
if newlocation != '':
self.location = newlocation
else:
@@ -313,10 +310,10 @@ class Init(Bcfg2.Server.Admin.Mode):
"""Initialize each plugin-specific portion of the repository."""
for plugin in self.plugins:
if plugin == 'Metadata':
- Bcfg2.Server.Plugins.Metadata.Metadata.init_repo(self.repopath,
- groups,
- self.os_sel,
- clients)
+ Bcfg2.Server.Plugins.Metadata.Metadata.init_repo(
+ self.repopath,
+ groups_xml=groups % self.os_sel,
+ clients_xml=clients % socket.getfqdn())
else:
try:
module = __import__("Bcfg2.Server.Plugins.%s" % plugin, '',
diff --git a/src/lib/Bcfg2/Server/Admin/Pull.py b/src/lib/Bcfg2/Server/Admin/Pull.py
index daf353107..64327e018 100644
--- a/src/lib/Bcfg2/Server/Admin/Pull.py
+++ b/src/lib/Bcfg2/Server/Admin/Pull.py
@@ -2,6 +2,7 @@ import getopt
import sys
import Bcfg2.Server.Admin
+from Bcfg2.Bcfg2Py3k import input
class Pull(Bcfg2.Server.Admin.MetadataCore):
@@ -26,7 +27,7 @@ class Pull(Bcfg2.Server.Admin.MetadataCore):
"interactive",
"-s",
"stdin"))
- allowed = ['Metadata', 'BB', "DBStats", "Statistics", "Cfg", "SSHbase"]
+ allowed = ['Metadata', "DBStats", "Statistics", "Cfg", "SSHbase"]
def __init__(self, setup):
Bcfg2.Server.Admin.MetadataCore.__init__(self, setup)
@@ -92,7 +93,6 @@ class Pull(Bcfg2.Server.Admin.MetadataCore):
for k, v in list(data.items()):
if v:
new_entry[k] = v
- #print new_entry
return new_entry
def Choose(self, choices):
@@ -109,11 +109,8 @@ class Pull(Bcfg2.Server.Admin.MetadataCore):
(choice.group, choice.prio))
else:
print(" => host entry: %s" % (choice.hostname))
- # py3k compatibility
- try:
- ans = raw_input("Use this entry? [yN]: ") in ['y', 'Y']
- except NameError:
- ans = input("Use this entry? [yN]: ") in ['y', 'Y']
+
+ ans = input("Use this entry? [yN]: ") in ['y', 'Y']
if ans:
return choice
return False
diff --git a/src/lib/Bcfg2/Server/Admin/Query.py b/src/lib/Bcfg2/Server/Admin/Query.py
index 3dd326645..f81ec41d2 100644
--- a/src/lib/Bcfg2/Server/Admin/Query.py
+++ b/src/lib/Bcfg2/Server/Admin/Query.py
@@ -7,8 +7,8 @@ import Bcfg2.Server.Admin
class Query(Bcfg2.Server.Admin.MetadataCore):
__shorthelp__ = "Query clients"
__longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin query [-n] [-c] "
- "[-f filename] g=group p=profile")
- __usage__ = ("bcfg2-admin query [options] <g=group> <p=profile>\n\n"
+ "[-f filename] g=group p=profile b=bundle")
+ __usage__ = ("bcfg2-admin query [options] <g=group> <p=profile> <b=bundle>\n\n"
" %-25s%s\n"
" %-25s%s\n"
" %-25s%s\n" %
@@ -22,7 +22,8 @@ class Query(Bcfg2.Server.Admin.MetadataCore):
def __init__(self, setup):
Bcfg2.Server.Admin.MetadataCore.__init__(self, setup)
logging.root.setLevel(100)
- Bcfg2.Logger.setup_logging(100, to_console=False, to_syslog=False)
+ Bcfg2.Logger.setup_logging(100, to_console=False,
+ to_syslog=setup['syslog'])
def __call__(self, args):
Bcfg2.Server.Admin.MetadataCore.__call__(self, args)
@@ -55,8 +56,10 @@ class Query(Bcfg2.Server.Admin.MetadataCore):
for g in glist:
if g in v.split(','):
nc.append(c)
+ elif k == 'b':
+ nc = self.metadata.get_client_names_by_bundles(v.split(','))
else:
- print("One of g= or p= must be specified")
+ print("One of g=, p= or b= must be specified")
raise SystemExit(1)
clients = [c for c in clients if c in nc]
if '-n' in args:
diff --git a/src/lib/Bcfg2/Server/Admin/Reports.py b/src/lib/Bcfg2/Server/Admin/Reports.py
index 974cdff9d..335d6a1e7 100644
--- a/src/lib/Bcfg2/Server/Admin/Reports.py
+++ b/src/lib/Bcfg2/Server/Admin/Reports.py
@@ -19,17 +19,15 @@ if sys.version_info >= (2, 5):
else:
from md5 import md5
-# Prereq issues can be signaled with ImportError, so no try needed
-# FIXME - settings file uses a hardcoded path for /etc/bcfg2.conf
-import Bcfg2.Server.Reports.settings
+import Bcfg2.settings
# Load django and reports stuff _after_ we know we can load settings
import django.core.management
from Bcfg2.Server.Reports.importscript import load_stats
-from Bcfg2.Server.Reports.updatefix import update_database
+from Bcfg2.Server.SchemaUpdater import update_database, UpdaterError
from Bcfg2.Server.Reports.utils import *
-project_directory = os.path.dirname(Bcfg2.Server.Reports.settings.__file__)
+project_directory = os.path.dirname(Bcfg2.settings.__file__)
project_name = os.path.basename(project_directory)
sys.path.append(os.path.join(project_directory, '..'))
project_module = __import__(project_name, '', '', [''])
@@ -41,7 +39,7 @@ from django.db import connection, transaction
from Bcfg2.Server.Reports.reports.models import Client, Interaction, Entries, \
Entries_interactions, Performance, \
- Reason, Ping
+ Reason
def printStats(fn):
@@ -55,7 +53,6 @@ def printStats(fn):
start_i = Interaction.objects.count()
start_ei = Entries_interactions.objects.count()
start_perf = Performance.objects.count()
- start_ping = Ping.objects.count()
fn(self, *data)
@@ -67,8 +64,6 @@ def printStats(fn):
(start_ei - Entries_interactions.objects.count()))
self.log.info("Metrics removed: %s" %
(start_perf - Performance.objects.count()))
- self.log.info("Ping metrics removed: %s" %
- (start_ping - Ping.objects.count()))
return print_stats
@@ -77,16 +72,13 @@ class Reports(Bcfg2.Server.Admin.Mode):
'''Admin interface for dynamic reports'''
__shorthelp__ = "Manage dynamic reports"
__longhelp__ = (__shorthelp__)
- django_commands = ['syncdb', 'sqlall', 'validate']
+ django_commands = ['dbshell', 'shell', 'syncdb', 'sqlall', 'validate']
__usage__ = ("bcfg2-admin reports [command] [options]\n"
- " -v|--verbose Be verbose\n"
- " -q|--quiet Print only errors\n"
"\n"
" Commands:\n"
" init Initialize the database\n"
" load_stats Load statistics data\n"
" -s|--stats Path to statistics.xml file\n"
- " -c|--clients-file Path to clients.xml file\n"
" -O3 Fast mode. Duplicates data!\n"
" purge Purge records\n"
" --client [n] Client to operate on\n"
@@ -95,12 +87,11 @@ class Reports(Bcfg2.Server.Admin.Mode):
" scrub Scrub the database for duplicate reasons and orphaned entries\n"
" update Apply any updates to the reporting database\n"
"\n"
- " Django commands:\n "
- "\n ".join(django_commands))
+ " Django commands:\n " \
+ + "\n ".join(django_commands))
def __init__(self, setup):
Bcfg2.Server.Admin.Mode.__init__(self, setup)
- self.log.setLevel(logging.INFO)
def __call__(self, args):
Bcfg2.Server.Admin.Mode.__call__(self, args)
@@ -108,28 +99,21 @@ class Reports(Bcfg2.Server.Admin.Mode):
print(self.__usage__)
raise SystemExit(0)
- verb = 0
-
- if '-v' in args or '--verbose' in args:
- self.log.setLevel(logging.DEBUG)
- verb = 1
- if '-q' in args or '--quiet' in args:
- self.log.setLevel(logging.WARNING)
-
# FIXME - dry run
if args[0] in self.django_commands:
self.django_command_proxy(args[0])
elif args[0] == 'scrub':
self.scrub()
- elif args[0] == 'init':
- update_database()
- elif args[0] == 'update':
- update_database()
+ elif args[0] in ['init', 'update']:
+ try:
+ update_database()
+ except UpdaterError:
+ print("Update failed")
+ raise SystemExit(-1)
elif args[0] == 'load_stats':
quick = '-O3' in args
stats_file = None
- clients_file = None
i = 1
while i < len(args):
if args[i] == '-s' or args[i] == '--stats':
@@ -137,11 +121,9 @@ class Reports(Bcfg2.Server.Admin.Mode):
if stats_file[0] == '-':
self.errExit("Invalid statistics file: %s" % stats_file)
elif args[i] == '-c' or args[i] == '--clients-file':
- clients_file = args[i + 1]
- if clients_file[0] == '-':
- self.errExit("Invalid clients file: %s" % clients_file)
+ print("DeprecationWarning: %s is no longer used" % args[i])
i = i + 1
- self.load_stats(stats_file, clients_file, verb, quick)
+ self.load_stats(stats_file, self.log.getEffectiveLevel() > logging.WARNING, quick)
elif args[0] == 'purge':
expired = False
client = None
@@ -239,7 +221,7 @@ class Reports(Bcfg2.Server.Admin.Mode):
else:
django.core.management.call_command(command)
- def load_stats(self, stats_file=None, clientspath=None, verb=0, quick=False):
+ def load_stats(self, stats_file=None, verb=0, quick=False):
'''Load statistics data into the database'''
location = ''
@@ -258,27 +240,18 @@ class Reports(Bcfg2.Server.Admin.Mode):
except:
encoding = 'UTF-8'
- if not clientspath:
- try:
- clientspath = "%s/Metadata/clients.xml" % \
- self.cfp.get('server', 'repository')
- except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
- self.errExit("Could not read bcfg2.conf; exiting")
- try:
- clientsdata = XML(open(clientspath).read())
- except (IOError, XMLSyntaxError):
- self.errExit("StatReports: Failed to parse %s" % (clientspath))
-
try:
- load_stats(clientsdata,
- statsdata,
+ load_stats(statsdata,
encoding,
verb,
self.log,
quick=quick,
location=platform.node())
+ except UpdaterError:
+ self.errExit("StatReports: Database updater failed")
except:
- pass
+ self.errExit("failed to import stats: %s"
+ % traceback.format_exc().splitlines()[-1])
@printStats
def purge(self, client=None, maxdate=None, state=None):
@@ -306,12 +279,10 @@ class Reports(Bcfg2.Server.Admin.Mode):
self.log.debug("Filtering by maxdate: %s" % maxdate)
ipurge = ipurge.filter(timestamp__lt=maxdate)
- # Handle ping data as well
- ping = Ping.objects.filter(endtime__lt=maxdate)
- if client:
- ping = ping.filter(client=cobj)
- ping.delete()
-
+ if Bcfg2.settings.DATABASES['default']['ENGINE'] == 'django.db.backends.sqlite3':
+ grp_limit = 100
+ else:
+ grp_limit = 1000
if state:
filtered = True
if state not in ('dirty', 'clean', 'modified'):
@@ -324,7 +295,7 @@ class Reports(Bcfg2.Server.Admin.Mode):
rnum = 0
try:
while rnum < count:
- grp = list(ipurge[:1000].values("id"))
+ grp = list(ipurge[:grp_limit].values("id"))
# just in case...
if not grp:
break
diff --git a/src/lib/Bcfg2/Server/Admin/Syncdb.py b/src/lib/Bcfg2/Server/Admin/Syncdb.py
new file mode 100644
index 000000000..bff232b05
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Admin/Syncdb.py
@@ -0,0 +1,36 @@
+import Bcfg2.settings
+import Bcfg2.Options
+import Bcfg2.Server.Admin
+from Bcfg2.Server.SchemaUpdater import update_database, UpdaterError
+from django.core.management import setup_environ
+
+class Syncdb(Bcfg2.Server.Admin.Mode):
+ __shorthelp__ = ("Sync the Django ORM with the configured database")
+ __longhelp__ = __shorthelp__ + "\n\nbcfg2-admin syncdb"
+ __usage__ = "bcfg2-admin syncdb"
+ options = {'web_configfile': Bcfg2.Options.WEB_CFILE,
+ 'repo': Bcfg2.Options.SERVER_REPOSITORY}
+
+ def __call__(self, args):
+ import Bcfg2.Server.Admin
+ Bcfg2.Server.Admin.Mode.__call__(self, args)
+
+ # Parse options
+ self.opts = Bcfg2.Options.OptionParser(self.options)
+ self.opts.parse(args)
+
+ # we have to set up the django environment before we import
+ # the syncdb command, but we have to wait to set up the
+ # environment until we've read the config, which has to wait
+ # until we've parsed options. it's a windy, twisting road.
+ Bcfg2.settings.read_config(cfile=self.opts['web_configfile'],
+ repo=self.opts['repo'])
+ setup_environ(Bcfg2.settings)
+ import Bcfg2.Server.models
+ Bcfg2.Server.models.load_models(cfile=self.opts['configfile'])
+
+ try:
+ update_database()
+ except UpdaterError:
+ print("Update failed")
+ raise SystemExit(-1)
diff --git a/src/lib/Bcfg2/Server/Admin/Tidy.py b/src/lib/Bcfg2/Server/Admin/Tidy.py
index 82319b93e..65aa955b4 100644
--- a/src/lib/Bcfg2/Server/Admin/Tidy.py
+++ b/src/lib/Bcfg2/Server/Admin/Tidy.py
@@ -3,6 +3,7 @@ import re
import socket
import Bcfg2.Server.Admin
+from Bcfg2.Bcfg2Py3k import input
class Tidy(Bcfg2.Server.Admin.Mode):
@@ -22,11 +23,7 @@ class Tidy(Bcfg2.Server.Admin.Mode):
if '-f' in args or '-I' in args:
if '-I' in args:
for name in badfiles[:]:
- # py3k compatibility
- try:
- answer = raw_input("Unlink file %s? [yN] " % name)
- except NameError:
- answer = input("Unlink file %s? [yN] " % name)
+ answer = input("Unlink file %s? [yN] " % name)
if answer not in ['y', 'Y']:
badfiles.remove(name)
for name in badfiles:
diff --git a/src/lib/Bcfg2/Server/Admin/Viz.py b/src/lib/Bcfg2/Server/Admin/Viz.py
index 2faa423c1..b190dd62a 100644
--- a/src/lib/Bcfg2/Server/Admin/Viz.py
+++ b/src/lib/Bcfg2/Server/Admin/Viz.py
@@ -35,7 +35,7 @@ class Viz(Bcfg2.Server.Admin.MetadataCore):
__plugin_blacklist__ = ['DBStats', 'Snapshots', 'Cfg', 'Pkgmgr', 'Packages',
'Rules', 'Account', 'Decisions', 'Deps', 'Git',
'Svn', 'Fossil', 'Bzr', 'Bundler', 'TGenshi',
- 'SGenshi', 'Base']
+ 'Base']
def __call__(self, args):
Bcfg2.Server.Admin.MetadataCore.__call__(self, args)
diff --git a/src/lib/Bcfg2/Server/Admin/__init__.py b/src/lib/Bcfg2/Server/Admin/__init__.py
index 618fa450e..3a7ba45cf 100644
--- a/src/lib/Bcfg2/Server/Admin/__init__.py
+++ b/src/lib/Bcfg2/Server/Admin/__init__.py
@@ -11,6 +11,7 @@ __all__ = [
'Query',
'Reports',
'Snapshots',
+ 'Syncdb',
'Tidy',
'Viz',
'Xcmd'
@@ -117,15 +118,7 @@ class MetadataCore(Mode):
if p not in self.__plugin_blacklist__]
try:
- self.bcore = \
- Bcfg2.Server.Core.Core(setup['repo'],
- setup['plugins'],
- setup['password'],
- setup['encoding'],
- filemonitor=setup['filemonitor'],
- setup=setup)
- if setup['event debug']:
- self.bcore.fam.debug = True
+ self.bcore = Bcfg2.Server.Core.BaseCore(setup)
except Bcfg2.Server.Core.CoreInitError:
msg = sys.exc_info()[1]
self.errExit("Core load failed: %s" % msg)
diff --git a/src/lib/Bcfg2/Server/BuiltinCore.py b/src/lib/Bcfg2/Server/BuiltinCore.py
new file mode 100644
index 000000000..c52c49931
--- /dev/null
+++ b/src/lib/Bcfg2/Server/BuiltinCore.py
@@ -0,0 +1,103 @@
+""" the core of the builtin bcfg2 server """
+
+import os
+import sys
+import time
+import socket
+import logging
+from Bcfg2.Server.Core import BaseCore
+from Bcfg2.Bcfg2Py3k import xmlrpclib, urlparse
+from Bcfg2.SSLServer import XMLRPCServer
+
+logger = logging.getLogger()
+
+class NoExposedMethod (Exception):
+ """There is no method exposed with the given name."""
+
+
+class Core(BaseCore):
+ name = 'bcfg2-server'
+
+ def _resolve_exposed_method(self, method_name):
+ """Resolve an exposed method.
+
+ Arguments:
+ method_name -- name of the method to resolve
+
+ """
+ try:
+ func = getattr(self, method_name)
+ except AttributeError:
+ raise NoExposedMethod(method_name)
+ if not getattr(func, "exposed", False):
+ raise NoExposedMethod(method_name)
+ return func
+
+ def _dispatch(self, method, args, dispatch_dict):
+ """Custom XML-RPC dispatcher for components.
+
+ method -- XML-RPC method name
+ args -- tuple of paramaters to method
+
+ """
+ if method in dispatch_dict:
+ method_func = dispatch_dict[method]
+ else:
+ try:
+ method_func = self._resolve_exposed_method(method)
+ except NoExposedMethod:
+ self.logger.error("Unknown method %s" % (method))
+ raise xmlrpclib.Fault(xmlrpclib.METHOD_NOT_FOUND,
+ "Unknown method %s" % method)
+
+ try:
+ method_start = time.time()
+ try:
+ result = method_func(*args)
+ finally:
+ method_done = time.time()
+ except xmlrpclib.Fault:
+ raise
+ except Exception:
+ e = sys.exc_info()[1]
+ if getattr(e, "log", True):
+ self.logger.error(e, exc_info=True)
+ raise xmlrpclib.Fault(getattr(e, "fault_code", 1), str(e))
+ return result
+
+ def run(self):
+ if self.setup['daemon']:
+ self._daemonize()
+
+ hostname, port = urlparse(self.setup['location'])[1].split(':')
+ server_address = socket.getaddrinfo(hostname,
+ port,
+ socket.AF_UNSPEC,
+ socket.SOCK_STREAM)[0][4]
+ try:
+ server = XMLRPCServer(self.setup['listen_all'],
+ server_address,
+ keyfile=self.setup['key'],
+ certfile=self.setup['cert'],
+ register=False,
+ timeout=1,
+ ca=self.setup['ca'],
+ protocol=self.setup['protocol'])
+ except:
+ err = sys.exc_info()[1]
+ self.logger.error("Server startup failed: %s" % err)
+ os._exit(1)
+ server.register_instance(self)
+
+ try:
+ server.serve_forever()
+ finally:
+ server.server_close()
+ self.shutdown()
+
+ def methodHelp(self, method_name):
+ try:
+ func = self._resolve_exposed_method(method_name)
+ except NoExposedMethod:
+ return ""
+ return func.__doc__
diff --git a/src/lib/Bcfg2/Server/CherryPyCore.py b/src/lib/Bcfg2/Server/CherryPyCore.py
new file mode 100644
index 000000000..91e7f89bd
--- /dev/null
+++ b/src/lib/Bcfg2/Server/CherryPyCore.py
@@ -0,0 +1,131 @@
+""" the core of the CherryPy-powered server """
+
+import sys
+import base64
+import atexit
+import cherrypy
+import Bcfg2.Options
+from Bcfg2.Bcfg2Py3k import urlparse, xmlrpclib
+from Bcfg2.Server.Core import BaseCore
+from cherrypy.lib import xmlrpcutil
+from cherrypy._cptools import ErrorTool
+
+if cherrypy.engine.state == 0:
+ cherrypy.engine.start(blocking=False)
+ atexit.register(cherrypy.engine.stop)
+
+# define our own error handler that handles xmlrpclib.Fault objects
+# and so allows for the possibility of returning proper error
+# codes. this obviates the need to use the builtin CherryPy xmlrpc
+# tool
+def on_error(*args, **kwargs):
+ err = sys.exc_info()[1]
+ if not isinstance(err, xmlrpclib.Fault):
+ err = xmlrpclib.Fault(xmlrpclib.INTERNAL_ERROR, str(err))
+ xmlrpcutil._set_response(xmlrpclib.dumps(err))
+cherrypy.tools.xmlrpc_error = ErrorTool(on_error)
+
+
+class Core(BaseCore):
+ _cp_config = {'tools.xmlrpc_error.on': True,
+ 'tools.bcfg2_authn.on': True}
+
+ def __init__(self, *args, **kwargs):
+ BaseCore.__init__(self, *args, **kwargs)
+
+ cherrypy.tools.bcfg2_authn = cherrypy.Tool('on_start_resource',
+ self.do_authn)
+
+ self.rmi = self._get_rmi()
+
+ def do_authn(self):
+ try:
+ header = cherrypy.request.headers['Authorization']
+ except KeyError:
+ self.critical_error("No authentication data presented")
+ auth_type, auth_content = header.split()
+ try:
+ # py3k compatibility
+ auth_content = base64.standard_b64decode(auth_content)
+ except TypeError:
+ auth_content = \
+ base64.standard_b64decode(bytes(auth_content.encode('ascii')))
+ try:
+ # py3k compatibility
+ try:
+ username, password = auth_content.split(":")
+ except TypeError:
+ username, pw = auth_content.split(bytes(":", encoding='utf-8'))
+ password = pw.decode('utf-8')
+ except ValueError:
+ username = auth_content
+ password = ""
+
+ # FIXME: Get client cert
+ cert = None
+ address = (cherrypy.request.remote.ip, cherrypy.request.remote.name)
+ return self.authenticate(cert, username, password, address)
+
+ @cherrypy.expose
+ def default(self, *vpath, **params):
+ # needed to make enough changes to the stock XMLRPCController
+ # to support plugin.__rmi__ and prepending client address that
+ # we just rewrote. it clearly wasn't written with inheritance
+ # in mind :(
+ rpcparams, rpcmethod = xmlrpcutil.process_body()
+ if "." not in rpcmethod:
+ address = (cherrypy.request.remote.ip, cherrypy.request.remote.name)
+ rpcparams = (address, ) + rpcparams
+
+ handler = getattr(self, rpcmethod)
+ if not handler or not getattr(handler, "exposed", False):
+ raise Exception('method "%s" is not supported' % attr)
+ else:
+ try:
+ handler = self.rmi[rpcmethod]
+ except:
+ raise Exception('method "%s" is not supported' % rpcmethod)
+
+ body = handler(*rpcparams, **params)
+
+ xmlrpcutil.respond(body, 'utf-8', True)
+ return cherrypy.serving.response.body
+
+ def run(self):
+ hostname, port = urlparse(self.setup['location'])[1].split(':')
+ if self.setup['listen_all']:
+ hostname = '0.0.0.0'
+
+ config = {'engine.autoreload.on': False,
+ 'server.socket_port': int(port)}
+ if self.setup['cert'] and self.setup['key']:
+ config.update({'server.ssl_module': 'pyopenssl',
+ 'server.ssl_certificate': self.setup['cert'],
+ 'server.ssl_private_key': self.setup['key']})
+ if self.setup['debug']:
+ config['log.screen'] = True
+ cherrypy.config.update(config)
+ cherrypy.quickstart(self, config={'/': self.setup})
+
+
+def parse_opts(argv=None):
+ if argv is None:
+ argv = sys.argv[1:]
+ optinfo = dict()
+ optinfo.update(Bcfg2.Options.CLI_COMMON_OPTIONS)
+ optinfo.update(Bcfg2.Options.SERVER_COMMON_OPTIONS)
+ optinfo.update(Bcfg2.Options.DAEMON_COMMON_OPTIONS)
+ setup = Bcfg2.Options.OptionParser(optinfo, argv=argv)
+ setup.parse(argv)
+ return setup
+
+def application(environ, start_response):
+ """ running behind Apache as a WSGI app is not currently
+ supported, but I'm keeping this code here because I hope for it to
+ be supported some day. we'll need to set up an AMQP task queue
+ and related magic for that to happen, though. """
+ cherrypy.config.update({'environment': 'embedded'})
+ setup = parse_opts(argv=['-C', environ['config']])
+ root = Core(setup, start_fam_thread=True)
+ cherrypy.tree.mount(root)
+ return cherrypy.tree(environ, start_response)
diff --git a/src/lib/Bcfg2/Server/Core.py b/src/lib/Bcfg2/Server/Core.py
index 8482925b7..f39453edd 100644
--- a/src/lib/Bcfg2/Server/Core.py
+++ b/src/lib/Bcfg2/Server/Core.py
@@ -1,35 +1,21 @@
"""Bcfg2.Server.Core provides the runtime support for Bcfg2 modules."""
+import os
import atexit
import logging
import select
import sys
import threading
import time
+import inspect
+import lxml.etree
from traceback import format_exc
-
-try:
- import lxml.etree
-except ImportError:
- print("Failed to import lxml dependency. Shutting down server.")
- raise SystemExit(1)
-
-from Bcfg2.Component import Component, exposed
-from Bcfg2.Server.Plugin import PluginInitError, PluginExecutionError
+import Bcfg2.settings
+import Bcfg2.Server
+import Bcfg2.Logger
import Bcfg2.Server.FileMonitor
-import Bcfg2.Server.Plugins.Metadata
-# Compatibility imports
-from Bcfg2.Bcfg2Py3k import xmlrpclib
-if sys.hexversion >= 0x03000000:
- from functools import reduce
-
-logger = logging.getLogger('Bcfg2.Server.Core')
-
-
-def critical_error(operation):
- """Log and err, traceback and return an xmlrpc fault to client."""
- logger.error(operation, exc_info=1)
- raise xmlrpclib.Fault(7, "Critical unexpected failure: %s" % (operation))
+from Bcfg2.Bcfg2Py3k import xmlrpclib, reduce
+from Bcfg2.Server.Plugin import PluginInitError, PluginExecutionError
try:
import psyco
@@ -37,6 +23,11 @@ try:
except:
pass
+os.environ['DJANGO_SETTINGS_MODULE'] = 'Bcfg2.settings'
+
+def exposed(func):
+ func.exposed = True
+ return func
def sort_xml(node, key=None):
for child in node:
@@ -54,88 +45,134 @@ class CoreInitError(Exception):
pass
-class Core(Component):
+class BaseCore(object):
"""The Core object is the container for all
Bcfg2 Server logic and modules.
"""
- name = 'bcfg2-server'
- implementation = 'bcfg2-server'
-
- def __init__(self, repo, plugins, password, encoding,
- cfile='/etc/bcfg2.conf', ca=None, setup=None,
- filemonitor='default', start_fam_thread=False):
- Component.__init__(self)
- self.datastore = repo
- if filemonitor not in Bcfg2.Server.FileMonitor.available:
- logger.error("File monitor driver %s not available; "
- "forcing to default" % filemonitor)
- filemonitor = 'default'
+
+ def __init__(self, setup, start_fam_thread=False):
+ self.datastore = setup['repo']
+
+ if setup['debug']:
+ level = logging.DEBUG
+ elif setup['verbose']:
+ level = logging.INFO
+ else:
+ level = logging.WARNING
+ # we set a higher log level for the console by default. we
+ # assume that if someone is running bcfg2-server in such a way
+ # that it _can_ log to console, they want more output. if
+ # level is set to DEBUG, that will get handled by
+ # setup_logging and the console will get DEBUG output.
+ Bcfg2.Logger.setup_logging('bcfg2-server',
+ to_console=logging.INFO,
+ to_syslog=setup['syslog'],
+ to_file=setup['logging'],
+ level=level)
+ self.logger = logging.getLogger('bcfg2-server')
+
try:
- self.fam = Bcfg2.Server.FileMonitor.available[filemonitor]()
+ fm = Bcfg2.Server.FileMonitor.available[setup['filemonitor']]
+ except KeyError:
+ self.logger.error("File monitor driver %s not available; "
+ "forcing to default" % filemonitor)
+ fm = Bcfg2.Server.FileMonitor.available['default']
+ famargs = dict(ignore=[], debug=False)
+ if 'ignore' in setup:
+ famargs['ignore'] = setup['ignore']
+ if 'debug' in setup:
+ famargs['debug'] = setup['debug']
+ try:
+ self.fam = fm(**famargs)
except IOError:
- logger.error("Failed to instantiate fam driver %s" % filemonitor,
- exc_info=1)
- raise CoreInitError("failed to instantiate fam driver (used %s)" % \
- filemonitor)
+ msg = "Failed to instantiate fam driver %s" % setup['filemonitor']
+ self.logger.error(msg, exc_info=1)
+ raise CoreInitError(msg)
self.pubspace = {}
- self.cfile = cfile
+ self.cfile = setup['configfile']
self.cron = {}
self.plugins = {}
self.plugin_blacklist = {}
self.revision = '-1'
- self.password = password
- self.encoding = encoding
+ self.password = setup['password']
+ self.encoding = setup['encoding']
self.setup = setup
atexit.register(self.shutdown)
# Create an event to signal worker threads to shutdown
self.terminate = threading.Event()
- if '' in plugins:
- plugins.remove('')
+ # generate Django ORM settings. this must be done _before_ we
+ # load plugins
+ Bcfg2.settings.read_config(cfile=self.setup['web_configfile'],
+ repo=self.datastore)
- for plugin in plugins:
+ self._database_available = False
+ # verify our database schema
+ try:
+ from Bcfg2.Server.SchemaUpdater import update_database, UpdaterError
+ try:
+ update_database()
+ self._database_available = True
+ except UpdaterError:
+ err = sys.exc_info()[1]
+ self.logger.error("Failed to update database schema: %s" % err)
+ except ImportError:
+ # assume django is not installed
+ pass
+ except Exception:
+ inst = sys.exc_info()[1]
+ self.logger.error("Failed to update database schema")
+ self.logger.error(str(inst))
+ self.logger.error(str(type(inst)))
+ raise CoreInitError
+
+ if '' in setup['plugins']:
+ setup['plugins'].remove('')
+
+ for plugin in setup['plugins']:
if not plugin in self.plugins:
self.init_plugins(plugin)
# Remove blacklisted plugins
for p, bl in list(self.plugin_blacklist.items()):
if len(bl) > 0:
- logger.error("The following plugins conflict with %s;"
- "Unloading %s" % (p, bl))
+ self.logger.error("The following plugins conflict with %s;"
+ "Unloading %s" % (p, bl))
for plug in bl:
del self.plugins[plug]
# This section logs the experimental plugins
expl = [plug for (name, plug) in list(self.plugins.items())
if plug.experimental]
if expl:
- logger.info("Loading experimental plugin(s): %s" % \
- (" ".join([x.name for x in expl])))
- logger.info("NOTE: Interfaces subject to change")
+ self.logger.info("Loading experimental plugin(s): %s" %
+ (" ".join([x.name for x in expl])))
+ self.logger.info("NOTE: Interfaces subject to change")
# This section logs the deprecated plugins
depr = [plug for (name, plug) in list(self.plugins.items())
if plug.deprecated]
if depr:
- logger.info("Loading deprecated plugin(s): %s" % \
- (" ".join([x.name for x in depr])))
+ self.logger.info("Loading deprecated plugin(s): %s" %
+ (" ".join([x.name for x in depr])))
mlist = self.plugins_by_type(Bcfg2.Server.Plugin.Metadata)
if len(mlist) == 1:
self.metadata = mlist[0]
else:
- logger.error("No Metadata Plugin loaded; failed to instantiate Core")
+ self.logger.error("No Metadata Plugin loaded; "
+ "failed to instantiate Core")
raise CoreInitError("No Metadata Plugin")
self.statistics = self.plugins_by_type(Bcfg2.Server.Plugin.Statistics)
self.pull_sources = self.plugins_by_type(Bcfg2.Server.Plugin.PullSource)
self.generators = self.plugins_by_type(Bcfg2.Server.Plugin.Generator)
self.structures = self.plugins_by_type(Bcfg2.Server.Plugin.Structure)
self.connectors = self.plugins_by_type(Bcfg2.Server.Plugin.Connector)
- self.ca = ca
- self.fam_thread = threading.Thread(target=self._file_monitor_thread)
+ self.ca = setup['ca']
+ self.fam_thread = \
+ threading.Thread(name="%sFAMThread" % setup['filemonitor'],
+ target=self._file_monitor_thread)
+ self.lock = threading.Lock()
+
if start_fam_thread:
self.fam_thread.start()
- self.monitor_cfile()
-
- def monitor_cfile(self):
- if self.setup:
self.fam.AddMonitor(self.cfile, self.setup)
def plugins_by_type(self, base_cls):
@@ -171,16 +208,21 @@ class Core(Component):
def init_plugins(self, plugin):
"""Handling for the plugins."""
+ self.logger.debug("Loading plugin %s" % plugin)
try:
mod = getattr(__import__("Bcfg2.Server.Plugins.%s" %
(plugin)).Server.Plugins, plugin)
except ImportError:
try:
- mod = __import__(plugin)
+ mod = __import__(plugin, globals(), locals(), [plugin.split('.')[-1]])
except:
- logger.error("Failed to load plugin %s" % (plugin))
+ self.logger.error("Failed to load plugin %s" % plugin)
return
- plug = getattr(mod, plugin)
+ try:
+ plug = getattr(mod, plugin.split('.')[-1])
+ except AttributeError:
+ self.logger.error("Failed to load plugin %s (AttributeError)" % plugin)
+ return
# Blacklist conflicting plugins
cplugs = [conflict for conflict in plug.conflicts
if conflict in self.plugins]
@@ -188,18 +230,35 @@ class Core(Component):
try:
self.plugins[plugin] = plug(self, self.datastore)
except PluginInitError:
- logger.error("Failed to instantiate plugin %s" % (plugin))
+ self.logger.error("Failed to instantiate plugin %s" % plugin,
+ exc_info=1)
except:
- logger.error("Unexpected instantiation failure for plugin %s" %
- (plugin), exc_info=1)
+ self.logger.error("Unexpected instantiation failure for plugin %s" %
+ plugin, exc_info=1)
def shutdown(self):
"""Shutting down the plugins."""
if not self.terminate.isSet():
self.terminate.set()
+ self.fam.shutdown()
for plugin in list(self.plugins.values()):
plugin.shutdown()
+ def client_run_hook(self, hook, metadata):
+ """Checks the data structure."""
+ for plugin in self.plugins_by_type(Bcfg2.Server.Plugin.ClientRunHooks):
+ try:
+ getattr(plugin, hook)(metadata)
+ except AttributeError:
+ err = sys.exc_info()[1]
+ self.logger.error("Unknown attribute: %s" % err)
+ raise
+ except:
+ err = sys.exc_info()[1]
+ self.logger.error("%s: Error invoking hook %s: %s" % (plugin,
+ hook,
+ err))
+
def validate_structures(self, metadata, data):
"""Checks the data structure."""
for plugin in self.plugins_by_type(Bcfg2.Server.Plugin.StructureValidator):
@@ -207,12 +266,12 @@ class Core(Component):
plugin.validate_structures(metadata, data)
except Bcfg2.Server.Plugin.ValidationError:
err = sys.exc_info()[1]
- logger.error("Plugin %s structure validation failed: %s" \
- % (plugin.name, err.message))
+ self.logger.error("Plugin %s structure validation failed: %s" %
+ (plugin.name, err))
raise
except:
- logger.error("Plugin %s: unexpected structure validation failure" \
- % (plugin.name), exc_info=1)
+ self.logger.error("Plugin %s: unexpected structure validation "
+ "failure" % plugin.name, exc_info=1)
def validate_goals(self, metadata, data):
"""Checks that the config matches the goals enforced by the plugins."""
@@ -221,23 +280,23 @@ class Core(Component):
plugin.validate_goals(metadata, data)
except Bcfg2.Server.Plugin.ValidationError:
err = sys.exc_info()[1]
- logger.error("Plugin %s goal validation failed: %s" \
- % (plugin.name, err.message))
+ self.logger.error("Plugin %s goal validation failed: %s" %
+ (plugin.name, err.message))
raise
except:
- logger.error("Plugin %s: unexpected goal validation failure" \
- % (plugin.name), exc_info=1)
+ self.logger.error("Plugin %s: unexpected goal validation "
+ "failure" % plugin.name, exc_info=1)
def GetStructures(self, metadata):
"""Get all structures for client specified by metadata."""
structures = reduce(lambda x, y: x + y,
- [struct.BuildStructures(metadata) for struct \
- in self.structures], [])
+ [struct.BuildStructures(metadata)
+ for struct in self.structures], [])
sbundles = [b.get('name') for b in structures if b.tag == 'Bundle']
missing = [b for b in metadata.bundles if b not in sbundles]
if missing:
- logger.error("Client %s configuration missing bundles: %s" \
- % (metadata.hostname, ':'.join(missing)))
+ self.logger.error("Client %s configuration missing bundles: %s" %
+ (metadata.hostname, ':'.join(missing)))
return structures
def BindStructure(self, structure, metadata):
@@ -252,14 +311,14 @@ class Core(Component):
exc = sys.exc_info()[1]
if 'failure' not in entry.attrib:
entry.set('failure', 'bind error: %s' % format_exc())
- logger.error("Failed to bind entry %s:%s: %s" %
- (entry.tag, entry.get('name'), exc))
+ self.logger.error("Failed to bind entry %s:%s: %s" %
+ (entry.tag, entry.get('name'), exc))
except Exception:
exc = sys.exc_info()[1]
if 'failure' not in entry.attrib:
entry.set('failure', 'bind error: %s' % format_exc())
- logger.error("Unexpected failure in BindStructure: %s %s" \
- % (entry.tag, entry.get('name')), exc_info=1)
+ self.logger.error("Unexpected failure in BindStructure: %s %s" %
+ (entry.tag, entry.get('name')), exc_info=1)
def Bind(self, entry, metadata):
"""Bind an entry using the appropriate generator."""
@@ -275,11 +334,11 @@ class Core(Component):
return ret
except:
entry.set('name', oldname)
- logger.error("Failed binding entry %s:%s with altsrc %s" \
- % (entry.tag, entry.get('name'),
- entry.get('altsrc')))
- logger.error("Falling back to %s:%s" % (entry.tag,
- entry.get('name')))
+ self.logger.error("Failed binding entry %s:%s with altsrc %s" %
+ (entry.tag, entry.get('name'),
+ entry.get('altsrc')))
+ self.logger.error("Falling back to %s:%s" % (entry.tag,
+ entry.get('name')))
glist = [gen for gen in self.generators if
entry.get('name') in gen.Entries.get(entry.tag, {})]
@@ -288,8 +347,8 @@ class Core(Component):
metadata)
elif len(glist) > 1:
generators = ", ".join([gen.name for gen in glist])
- logger.error("%s %s served by multiple generators: %s" % \
- (entry.tag, entry.get('name'), generators))
+ self.logger.error("%s %s served by multiple generators: %s" %
+ (entry.tag, entry.get('name'), generators))
g2list = [gen for gen in self.generators if
gen.HandlesEntry(entry, metadata)]
if len(g2list) == 1:
@@ -301,18 +360,21 @@ class Core(Component):
def BuildConfiguration(self, client):
"""Build configuration for clients."""
start = time.time()
- config = lxml.etree.Element("Configuration", version='2.0', \
+ config = lxml.etree.Element("Configuration", version='2.0',
revision=self.revision)
try:
meta = self.build_metadata(client)
- except Bcfg2.Server.Plugins.Metadata.MetadataConsistencyError:
- logger.error("Metadata consistency error for client %s" % client)
+ except Bcfg2.Server.Plugin.MetadataConsistencyError:
+ self.logger.error("Metadata consistency error for client %s" %
+ client)
return lxml.etree.Element("error", type='metadata error')
+ self.client_run_hook("start_client_run", meta)
+
try:
structures = self.GetStructures(meta)
except:
- logger.error("error in GetStructures", exc_info=1)
+ self.logger.error("error in GetStructures", exc_info=1)
return lxml.etree.Element("error", type='structure error')
self.validate_structures(meta, structures)
@@ -324,7 +386,8 @@ class Core(Component):
key = (entry.tag, entry.get('name'))
if key in esrcs:
if esrcs[key] != entry.get('altsrc'):
- logger.error("Found inconsistent altsrc mapping for entry %s:%s" % key)
+ self.logger.error("Found inconsistent altsrc mapping "
+ "for entry %s:%s" % key)
else:
esrcs[key] = entry.get('altsrc', None)
del esrcs
@@ -334,15 +397,49 @@ class Core(Component):
self.BindStructure(astruct, meta)
config.append(astruct)
except:
- logger.error("error in BindStructure", exc_info=1)
+ self.logger.error("error in BindStructure", exc_info=1)
self.validate_goals(meta, config)
+ self.client_run_hook("end_client_run", meta)
+
sort_xml(config, key=lambda e: e.get('name'))
- logger.info("Generated config for %s in %.03f seconds" % \
- (client, time.time() - start))
+ self.logger.info("Generated config for %s in %.03f seconds" %
+ (client, time.time() - start))
return config
+ def run(self, **kwargs):
+ """ run the server core """
+ raise NotImplementedError
+
+ def _daemonize(self):
+ child_pid = os.fork()
+ if child_pid != 0:
+ return
+
+ os.setsid()
+
+ child_pid = os.fork()
+ if child_pid != 0:
+ os._exit(0)
+
+ redirect_file = open("/dev/null", "w+")
+ os.dup2(redirect_file.fileno(), sys.__stdin__.fileno())
+ os.dup2(redirect_file.fileno(), sys.__stdout__.fileno())
+ os.dup2(redirect_file.fileno(), sys.__stderr__.fileno())
+
+ os.chdir(os.sep)
+
+ pidfile = open(self.setup['daemon'] or "/dev/null", "w")
+ pidfile.write("%s\n" % os.getpid())
+ pidfile.close()
+
+ return os.getpid()
+
+ def critical_error(self, operation):
+ """ this should be overridden by child classes """
+ self.logger.fatal(operation, exc_info=1)
+
def GetDecisions(self, metadata, mode):
"""Get data for the decision list."""
result = []
@@ -350,15 +447,15 @@ class Core(Component):
try:
result += plugin.GetDecisions(metadata, mode)
except:
- logger.error("Plugin: %s failed to generate decision list" \
- % plugin.name, exc_info=1)
+ self.logger.error("Plugin: %s failed to generate decision list"
+ % plugin.name, exc_info=1)
return result
def build_metadata(self, client_name):
"""Build the metadata structure."""
if not hasattr(self, 'metadata'):
# some threads start before metadata is even loaded
- raise Bcfg2.Server.Plugins.Metadata.MetadataRuntimeError
+ raise Bcfg2.Server.Plugin.MetadataRuntimeError
imd = self.metadata.get_initial_metadata(client_name)
for conn in self.connectors:
grps = conn.get_additional_groups(imd)
@@ -378,102 +475,147 @@ class Core(Component):
try:
plugin.process_statistics(meta, statistics)
except:
- logger.error("Plugin %s failed to process stats from %s" \
- % (plugin.name, meta.hostname),
- exc_info=1)
+ self.logger.error("Plugin %s failed to process stats from "
+ "%s" % (plugin.name, meta.hostname),
+ exc_info=1)
+
+ self.logger.info("Client %s reported state %s" % (client_name,
+ state.get('state')))
+ self.client_run_hook("end_statistics", meta)
+
+ def resolve_client(self, address, cleanup_cache=False, metadata=True):
+ try:
+ client = self.metadata.resolve_client(address,
+ cleanup_cache=cleanup_cache)
+ if metadata:
+ meta = self.build_metadata(client)
+ else:
+ meta = None
+ except Bcfg2.Server.Plugin.MetadataConsistencyError:
+ err = sys.exc_info()[1]
+ self.critical_error("Client metadata resolution error for %s: %s" %
+ (address[0], err))
+ except Bcfg2.Server.Plugin.MetadataRuntimeError:
+ err = sys.exc_info()[1]
+ self.critical_error('Metadata system runtime failure for %s: %s' %
+ (address[0], err))
+ return (client, meta)
+
+ def critical_error(self, operation):
+ """Log and err, traceback and return an xmlrpc fault to client."""
+ self.logger.error(operation, exc_info=1)
+ raise xmlrpclib.Fault(xmlrpclib.APPLICATION_ERROR,
+ "Critical failure: %s" % operation)
+
+ def _get_rmi(self):
+ rmi = dict()
+ if self.plugins:
+ for pname, pinst in list(self.plugins.items()):
+ for mname in pinst.__rmi__:
+ rmi["%s.%s" % (pname, mname)] = getattr(pinst, mname)
+ return rmi
- logger.info("Client %s reported state %s" % (client_name,
- state.get('state')))
# XMLRPC handlers start here
+ @exposed
+ def listMethods(self, address):
+ methods = [name
+ for name, func in inspect.getmembers(self, callable)
+ if getattr(func, "exposed", False)]
+ methods.extend(self._get_rmi().keys())
+ return methods
+
+ @exposed
+ def methodHelp(self, address, method_name):
+ raise NotImplementedError
+
+ @exposed
+ def DeclareVersion(self, address, version):
+ """ declare the client version """
+ client, metadata = self.resolve_client(address)
+ try:
+ self.metadata.set_version(client, version)
+ except (Bcfg2.Server.Plugin.MetadataConsistencyError,
+ Bcfg2.Server.Plugin.MetadataRuntimeError):
+ err = sys.exc_info()[1]
+ self.critical_error("Unable to set version for %s: %s" %
+ (client, err))
+ return True
@exposed
def GetProbes(self, address):
"""Fetch probes for a particular client."""
resp = lxml.etree.Element('probes')
+ client, metadata = self.resolve_client(address, cleanup_cache=True)
try:
- name = self.metadata.resolve_client(address, cleanup_cache=True)
- meta = self.build_metadata(name)
-
for plugin in self.plugins_by_type(Bcfg2.Server.Plugin.Probing):
- for probe in plugin.GetProbes(meta):
+ for probe in plugin.GetProbes(metadata):
resp.append(probe)
- return lxml.etree.tostring(resp, encoding='UTF-8',
- xml_declaration=True)
- except Bcfg2.Server.Plugins.Metadata.MetadataConsistencyError:
- warning = 'Client metadata resolution error for %s' % address[0]
- self.logger.warning(warning)
- raise xmlrpclib.Fault(6, warning + "; check server log")
- except Bcfg2.Server.Plugins.Metadata.MetadataRuntimeError:
- err_msg = 'Metadata system runtime failure'
- self.logger.error(err_msg)
- raise xmlrpclib.Fault(6, err_msg)
+ return lxml.etree.tostring(resp,
+ xml_declaration=False).decode('UTF-8')
except:
- critical_error("Error determining client probes")
+ err = sys.exc_info()[1]
+ self.critical_error("Error determining probes for %s: %s" %
+ (client, err))
@exposed
def RecvProbeData(self, address, probedata):
"""Receive probe data from clients."""
+ client, metadata = self.resolve_client(address)
try:
- name = self.metadata.resolve_client(address)
- meta = self.build_metadata(name)
- except Bcfg2.Server.Plugins.Metadata.MetadataConsistencyError:
- warning = 'Metadata consistency error'
- self.logger.warning(warning)
- raise xmlrpclib.Fault(6, warning)
- # clear dynamic groups
- self.metadata.cgroups[meta.hostname] = []
- try:
- xpdata = lxml.etree.XML(probedata.encode('utf-8'))
+ xpdata = lxml.etree.XML(probedata.encode('utf-8'),
+ parser=Bcfg2.Server.XMLParser)
except:
- self.logger.error("Failed to parse probe data from client %s" % \
- (address[0]))
- return False
+ err = sys.exc_info()[1]
+ self.critical_error("Failed to parse probe data from client %s: %s"
+ % (client, err))
sources = []
[sources.append(data.get('source')) for data in xpdata
if data.get('source') not in sources]
for source in sources:
if source not in self.plugins:
- self.logger.warning("Failed to locate plugin %s" % (source))
+ self.logger.warning("Failed to locate plugin %s" % source)
continue
dl = [data for data in xpdata if data.get('source') == source]
try:
- self.plugins[source].ReceiveData(meta, dl)
+ self.plugins[source].ReceiveData(metadata, dl)
except:
- logger.error("Failed to process probe data from client %s" % \
- (address[0]), exc_info=1)
+ err = sys.exc_info()[1]
+ self.critical_error("Failed to process probe data from client "
+ "%s: %s" %
+ (client, err))
return True
@exposed
def AssertProfile(self, address, profile):
"""Set profile for a client."""
+ client = self.resolve_client(address, metadata=False)[0]
try:
- client = self.metadata.resolve_client(address)
self.metadata.set_profile(client, profile, address)
- except (Bcfg2.Server.Plugins.Metadata.MetadataConsistencyError,
- Bcfg2.Server.Plugins.Metadata.MetadataRuntimeError):
- warning = 'Metadata consistency error'
- self.logger.warning(warning)
- raise xmlrpclib.Fault(6, warning)
+ except (Bcfg2.Server.Plugin.MetadataConsistencyError,
+ Bcfg2.Server.Plugin.MetadataRuntimeError):
+ err = sys.exc_info()[1]
+ self.critical_error("Unable to assert profile for %s: %s" %
+ (client, err))
return True
@exposed
def GetConfig(self, address, checksum=False):
"""Build config for a client."""
+ client = self.resolve_client(address)[0]
try:
- client = self.metadata.resolve_client(address)
config = self.BuildConfiguration(client)
- return lxml.etree.tostring(config, encoding='UTF-8',
- xml_declaration=True)
- except Bcfg2.Server.Plugins.Metadata.MetadataConsistencyError:
- self.logger.warning("Metadata consistency failure for %s" % (address))
- raise xmlrpclib.Fault(6, "Metadata consistency failure")
+ return lxml.etree.tostring(config,
+ xml_declaration=False).decode('UTF-8')
+ except Bcfg2.Server.Plugin.MetadataConsistencyError:
+ self.critical_error("Metadata consistency failure for %s" % client)
@exposed
def RecvStats(self, address, stats):
"""Act on statistics upload."""
- sdata = lxml.etree.XML(stats.encode('utf-8'))
- client = self.metadata.resolve_client(address)
+ client = self.resolve_client(address)[0]
+ sdata = lxml.etree.XML(stats.encode('utf-8'),
+ parser=Bcfg2.Server.XMLParser)
self.process_statistics(client, sdata)
return "<ok/>"
@@ -483,11 +625,17 @@ class Core(Component):
else:
# No ca, so no cert validation can be done
acert = None
- return self.metadata.AuthenticateConnection(acert, user, password, address)
+ return self.metadata.AuthenticateConnection(acert, user, password,
+ address)
@exposed
def GetDecisionList(self, address, mode):
"""Get the data of the decision list."""
- client = self.metadata.resolve_client(address)
- meta = self.build_metadata(client)
- return self.GetDecisions(meta, mode)
+ client, metadata = self.resolve_client(address)
+ return self.GetDecisions(metadata, mode)
+
+ @property
+ def database_available(self):
+ """Is the database configured and available"""
+ return self._database_available
+
diff --git a/src/lib/Bcfg2/Server/FileMonitor.py b/src/lib/Bcfg2/Server/FileMonitor.py
deleted file mode 100644
index d6b313e6b..000000000
--- a/src/lib/Bcfg2/Server/FileMonitor.py
+++ /dev/null
@@ -1,315 +0,0 @@
-"""Bcfg2.Server.FileMonitor provides the support for monitorung files."""
-
-import logging
-import os
-import stat
-from time import sleep, time
-
-logger = logging.getLogger('Bcfg2.Server.FileMonitor')
-
-
-def ShouldIgnore(event):
- """Test if the event should be suppresed."""
- # FIXME should move event suppression out of the core
- if event.filename.split('/')[-1] == '.svn':
- return True
- if event.filename.endswith('~') or \
- event.filename.startswith('#') or event.filename.startswith('.#'):
- #logger.error("Suppressing event for file %s" % (event.filename))
- return True
- return False
-
-
-class Event(object):
- def __init__(self, request_id, filename, code):
- self.requestID = request_id
- self.filename = filename
- self.action = code
-
- def code2str(self):
- """return static code for event"""
- return self.action
-
-available = {}
-
-
-class FileMonitor(object):
- """File Monitor baseclass."""
- def __init__(self, debug=False):
- object.__init__(self)
- self.debug = debug
- self.handles = dict()
-
- def get_event(self):
- return None
-
- def pending(self):
- return False
-
- def fileno(self):
- return 0
-
- def handle_one_event(self, event):
- if ShouldIgnore(event):
- return
- if event.requestID not in self.handles:
- logger.info("Got event for unexpected id %s, file %s" %
- (event.requestID, event.filename))
- return
- if self.debug:
- logger.info("Dispatching event %s %s to obj %s" \
- % (event.code2str(), event.filename,
- self.handles[event.requestID]))
- try:
- self.handles[event.requestID].HandleEvent(event)
- except:
- logger.error("error in handling of gamin event for %s" % \
- (event.filename), exc_info=1)
-
- def handle_event_set(self, lock=None):
- count = 1
- event = self.get_event()
- start = time()
- if lock:
- lock.acquire()
- try:
- self.handle_one_event(event)
- while self.pending():
- self.handle_one_event(self.get_event())
- count += 1
- except:
- pass
- if lock:
- lock.release()
- end = time()
- logger.info("Handled %d events in %.03fs" % (count, (end - start)))
-
- def handle_events_in_interval(self, interval):
- end = time() + interval
- while time() < end:
- if self.pending():
- self.handle_event_set()
- end = time() + interval
- else:
- sleep(0.5)
-
-
-class FamFam(object):
- """The fam object is a set of callbacks for
- file alteration events (FAM support).
- """
-
- def __init__(self):
- object.__init__(self)
- self.fm = _fam.open()
- self.users = {}
- self.handles = {}
- self.debug = False
-
- def fileno(self):
- """Return fam file handle number."""
- return self.fm.fileno()
-
- def handle_event_set(self, _):
- self.Service()
-
- def handle_events_in_interval(self, interval):
- now = time()
- while (time() - now) < interval:
- if self.Service():
- now = time()
-
- def AddMonitor(self, path, obj):
- """Add a monitor to path, installing a callback to obj.HandleEvent."""
- mode = os.stat(path)[stat.ST_MODE]
- if stat.S_ISDIR(mode):
- handle = self.fm.monitorDirectory(path, None)
- else:
- handle = self.fm.monitorFile(path, None)
- self.handles[handle.requestID()] = handle
- if obj != None:
- self.users[handle.requestID()] = obj
- return handle.requestID()
-
- def Service(self, interval=0.50):
- """Handle all fam work."""
- count = 0
- collapsed = 0
- rawevents = []
- start = time()
- now = time()
- while (time() - now) < interval:
- if self.fm.pending():
- while self.fm.pending():
- count += 1
- rawevents.append(self.fm.nextEvent())
- now = time()
- unique = []
- bookkeeping = []
- for event in rawevents:
- if ShouldIgnore(event):
- continue
- if event.code2str() != 'changed':
- # process all non-change events
- unique.append(event)
- else:
- if (event.filename, event.requestID) not in bookkeeping:
- bookkeeping.append((event.filename, event.requestID))
- unique.append(event)
- else:
- collapsed += 1
- for event in unique:
- if event.requestID in self.users:
- try:
- self.users[event.requestID].HandleEvent(event)
- except:
- logger.error("handling event for file %s" % (event.filename), exc_info=1)
- end = time()
- logger.info("Processed %s fam events in %03.03f seconds. %s coalesced" %
- (count, (end - start), collapsed))
- return count
-
-
-class Fam(FileMonitor):
- """
- The fam object is a set of callbacks for
- file alteration events (FAM support).
- """
-
- def __init__(self, debug=False):
- FileMonitor.__init__(self, debug)
- self.fm = _fam.open()
-
- def fileno(self):
- return self.fm.fileno()
-
- def AddMonitor(self, path, obj):
- """Add a monitor to path, installing a callback to obj.HandleEvent."""
- mode = os.stat(path)[stat.ST_MODE]
- if stat.S_ISDIR(mode):
- handle = self.fm.monitorDirectory(path, None)
- else:
- handle = self.fm.monitorFile(path, None)
- if obj != None:
- self.handles[handle.requestID()] = obj
- return handle.requestID()
-
- def pending(self):
- return self.fm.pending()
-
- def get_event(self):
- return self.fm.nextEvent()
-
-
-class Pseudo(FileMonitor):
- """
- The fam object is a set of callbacks for
- file alteration events (static monitor support).
- """
-
- def __init__(self, debug=False):
- FileMonitor.__init__(self, debug=False)
- self.pending_events = []
-
- def pending(self):
- return len(self.pending_events) != 0
-
- def get_event(self):
- return self.pending_events.pop()
-
- def AddMonitor(self, path, obj):
- """add a monitor to path, installing a callback to obj.HandleEvent"""
- handleID = len(list(self.handles.keys()))
- mode = os.stat(path)[stat.ST_MODE]
- handle = Event(handleID, path, 'exists')
- if stat.S_ISDIR(mode):
- dirList = os.listdir(path)
- self.pending_events.append(handle)
- for includedFile in dirList:
- self.pending_events.append(Event(handleID,
- includedFile,
- 'exists'))
- self.pending_events.append(Event(handleID, path, 'endExist'))
- else:
- self.pending_events.append(Event(handleID, path, 'exists'))
- if obj != None:
- self.handles[handleID] = obj
- return handleID
-
-
-try:
- from gamin import WatchMonitor, GAMCreated, GAMExists, GAMEndExist, \
- GAMChanged, GAMDeleted, GAMMoved
-
- class GaminEvent(Event):
- """
- This class provides an event analogous to
- python-fam events based on gamin sources.
- """
- def __init__(self, request_id, filename, code):
- Event.__init__(self, request_id, filename, code)
- action_map = {GAMCreated: 'created', GAMExists: 'exists',
- GAMChanged: 'changed', GAMDeleted: 'deleted',
- GAMEndExist: 'endExist', GAMMoved: 'moved'}
- if code in action_map:
- self.action = action_map[code]
-
- class Gamin(FileMonitor):
- """
- The fam object is a set of callbacks for
- file alteration events (Gamin support)
- """
- def __init__(self, debug=False):
- FileMonitor.__init__(self, debug)
- self.mon = WatchMonitor()
- self.counter = 0
- self.events = []
-
- def fileno(self):
- return self.mon.get_fd()
-
- def queue(self, path, action, request_id):
- """queue up the event for later handling"""
- self.events.append(GaminEvent(request_id, path, action))
-
- def AddMonitor(self, path, obj):
- """Add a monitor to path, installing a callback to obj.HandleEvent."""
- handle = self.counter
- self.counter += 1
- mode = os.stat(path)[stat.ST_MODE]
-
- # Flush queued gamin events
- while self.mon.event_pending():
- self.mon.handle_one_event()
-
- if stat.S_ISDIR(mode):
- self.mon.watch_directory(path, self.queue, handle)
- else:
- self.mon.watch_file(path, self.queue, handle)
- self.handles[handle] = obj
- return handle
-
- def pending(self):
- return len(self.events) > 0 or self.mon.event_pending()
-
- def get_event(self):
- if self.mon.event_pending():
- self.mon.handle_one_event()
- return self.events.pop(0)
-
- available['gamin'] = Gamin
-except ImportError:
- # fall back to _fam
- pass
-
-try:
- import _fam
- available['fam'] = FamFam
-except ImportError:
- pass
-available['pseudo'] = Pseudo
-
-for fdrv in ['gamin', 'fam', 'pseudo']:
- if fdrv in available:
- available['default'] = available[fdrv]
- break
diff --git a/src/lib/Bcfg2/Server/FileMonitor/Fam.py b/src/lib/Bcfg2/Server/FileMonitor/Fam.py
new file mode 100644
index 000000000..1a00fffa0
--- /dev/null
+++ b/src/lib/Bcfg2/Server/FileMonitor/Fam.py
@@ -0,0 +1,82 @@
+""" Fam provides FAM support for file alteration events """
+
+import os
+import _fam
+import stat
+import logging
+from time import time
+from Bcfg2.Server.FileMonitor import FileMonitor
+
+logger = logging.getLogger(__name__)
+
+class Fam(FileMonitor):
+ __priority__ = 90
+
+ def __init__(self, ignore=None, debug=False):
+ FileMonitor.__init__(self, ignore=ignore, debug=debug)
+ self.fm = _fam.open()
+ self.users = {}
+
+ def fileno(self):
+ """Return fam file handle number."""
+ return self.fm.fileno()
+
+ def handle_event_set(self, _):
+ self.Service()
+
+ def handle_events_in_interval(self, interval):
+ now = time()
+ while (time() - now) < interval:
+ if self.Service():
+ now = time()
+
+ def AddMonitor(self, path, obj):
+ """Add a monitor to path, installing a callback to obj.HandleEvent."""
+ mode = os.stat(path)[stat.ST_MODE]
+ if stat.S_ISDIR(mode):
+ handle = self.fm.monitorDirectory(path, None)
+ else:
+ handle = self.fm.monitorFile(path, None)
+ self.handles[handle.requestID()] = handle
+ if obj != None:
+ self.users[handle.requestID()] = obj
+ return handle.requestID()
+
+ def Service(self, interval=0.50):
+ """Handle all fam work."""
+ count = 0
+ collapsed = 0
+ rawevents = []
+ start = time()
+ now = time()
+ while (time() - now) < interval:
+ if self.fm.pending():
+ while self.fm.pending():
+ count += 1
+ rawevents.append(self.fm.nextEvent())
+ now = time()
+ unique = []
+ bookkeeping = []
+ for event in rawevents:
+ if self.should_ignore(event):
+ continue
+ if event.code2str() != 'changed':
+ # process all non-change events
+ unique.append(event)
+ else:
+ if (event.filename, event.requestID) not in bookkeeping:
+ bookkeeping.append((event.filename, event.requestID))
+ unique.append(event)
+ else:
+ collapsed += 1
+ for event in unique:
+ if event.requestID in self.users:
+ try:
+ self.users[event.requestID].HandleEvent(event)
+ except:
+ logger.error("Handling event for file %s" % event.filename,
+ exc_info=1)
+ end = time()
+ logger.info("Processed %s fam events in %03.03f seconds. %s coalesced" %
+ (count, (end - start), collapsed))
+ return count
diff --git a/src/lib/Bcfg2/Server/FileMonitor/Gamin.py b/src/lib/Bcfg2/Server/FileMonitor/Gamin.py
new file mode 100644
index 000000000..60f80c9c3
--- /dev/null
+++ b/src/lib/Bcfg2/Server/FileMonitor/Gamin.py
@@ -0,0 +1,64 @@
+""" Gamin driver for file alteration events """
+
+import os
+import stat
+import logging
+from gamin import WatchMonitor, GAMCreated, GAMExists, GAMEndExist, \
+ GAMChanged, GAMDeleted
+from Bcfg2.Server.FileMonitor import Event, FileMonitor
+
+logger = logging.getLogger(__name__)
+
+class GaminEvent(Event):
+ """
+ This class provides an event analogous to
+ python-fam events based on gamin sources.
+ """
+ action_map = {GAMCreated: 'created', GAMExists: 'exists',
+ GAMChanged: 'changed', GAMDeleted: 'deleted',
+ GAMEndExist: 'endExist'}
+
+ def __init__(self, request_id, filename, code):
+ Event.__init__(self, request_id, filename, code)
+ if code in self.action_map:
+ self.action = self.action_map[code]
+
+class Gamin(FileMonitor):
+ __priority__ = 10
+
+ def __init__(self, ignore=None, debug=False):
+ FileMonitor.__init__(self, ignore=ignore, debug=debug)
+ self.mon = WatchMonitor()
+ self.counter = 0
+
+ def fileno(self):
+ return self.mon.get_fd()
+
+ def queue(self, path, action, request_id):
+ """queue up the event for later handling"""
+ self.events.append(GaminEvent(request_id, path, action))
+
+ def AddMonitor(self, path, obj):
+ """Add a monitor to path, installing a callback to obj."""
+ handle = self.counter
+ self.counter += 1
+ mode = os.stat(path)[stat.ST_MODE]
+
+ # Flush queued gamin events
+ while self.mon.event_pending():
+ self.mon.handle_one_event()
+
+ if stat.S_ISDIR(mode):
+ self.mon.watch_directory(path, self.queue, handle)
+ else:
+ self.mon.watch_file(path, self.queue, handle)
+ self.handles[handle] = obj
+ return handle
+
+ def pending(self):
+ return FileMonitor.pending(self) or self.mon.event_pending()
+
+ def get_event(self):
+ if self.mon.event_pending():
+ self.mon.handle_one_event()
+ return FileMonitor.get_event(self)
diff --git a/src/lib/Bcfg2/Server/FileMonitor/Inotify.py b/src/lib/Bcfg2/Server/FileMonitor/Inotify.py
new file mode 100644
index 000000000..880ac7e8d
--- /dev/null
+++ b/src/lib/Bcfg2/Server/FileMonitor/Inotify.py
@@ -0,0 +1,126 @@
+""" Inotify driver for file alteration events """
+
+import logging
+import operator
+import os
+import pyinotify
+import sys
+from Bcfg2.Bcfg2Py3k import reduce
+from Bcfg2.Server.FileMonitor import Event
+from Bcfg2.Server.FileMonitor.Pseudo import Pseudo
+
+logger = logging.getLogger(__name__)
+
+class Inotify(Pseudo, pyinotify.ProcessEvent):
+ __priority__ = 1
+ action_map = {pyinotify.IN_CREATE: 'created',
+ pyinotify.IN_DELETE: 'deleted',
+ pyinotify.IN_MODIFY: 'changed',
+ pyinotify.IN_MOVED_FROM: 'deleted',
+ pyinotify.IN_MOVED_TO: 'created'}
+ mask = reduce(lambda x, y: x | y, action_map.keys())
+
+ def __init__(self, ignore=None, debug=False):
+ Pseudo.__init__(self, ignore=ignore, debug=debug)
+ self.wm = pyinotify.WatchManager()
+ self.notifier = pyinotify.ThreadedNotifier(self.wm, self)
+ self.notifier.start()
+ self.event_filter = dict()
+ self.watches_by_path = dict()
+
+ def fileno(self):
+ return self.wm.get_fd()
+
+ def process_default(self, ievent):
+ action = ievent.maskname
+ for amask, aname in self.action_map.items():
+ if ievent.mask & amask:
+ action = aname
+ break
+ try:
+ watch = self.wm.watches[ievent.wd]
+ except KeyError:
+ err = sys.exc_info()[1]
+ logger.error("Error handling event for %s: Watch %s not found" %
+ (ievent.pathname, ievent.wd))
+ return
+ # FAM-style file monitors return the full path to the parent
+ # directory that is being watched, relative paths to anything
+ # contained within the directory. since we can't use inotify
+ # to watch files directly, we have to sort of guess at whether
+ # this watch was actually added on a file (and thus is in
+ # self.event_filter because we're filtering out other events
+ # on the directory) or was added directly on a directory.
+ if (watch.path == ievent.pathname or ievent.wd in self.event_filter):
+ path = ievent.pathname
+ else:
+ # relative path
+ path = os.path.basename(ievent.pathname)
+ # figure out the handleID. start with the path of the event;
+ # that should catch events on files that are watched directly.
+ # (we have to watch the directory that a file is in, so this
+ # lets us handle events on different files in the same
+ # directory -- and thus under the same watch -- with different
+ # objects.) If the path to the event doesn't have a handler,
+ # use the path of the watch itself.
+ handleID = ievent.pathname
+ if handleID not in self.handles:
+ handleID = watch.path
+ evt = Event(handleID, path, action)
+
+ if (ievent.wd not in self.event_filter or
+ ievent.pathname in self.event_filter[ievent.wd]):
+ self.events.append(evt)
+
+ def AddMonitor(self, path, obj):
+ # strip trailing slashes
+ path = path.rstrip("/")
+ if not os.path.isdir(path):
+ # inotify is a little wonky about watching files. for
+ # instance, if you watch /tmp/foo, and then do 'mv
+ # /tmp/bar /tmp/foo', it processes that as a deletion of
+ # /tmp/foo (which it technically _is_, but that's rather
+ # useless -- we care that /tmp/foo changed, not that it
+ # was first deleted and then created). In order to
+ # effectively watch a file, we have to watch the directory
+ # it's in, and filter out events for other files in the
+ # same directory that are not similarly watched.
+ # watch_transient_file requires a Processor _class_, not
+ # an object, so we can't have this object handle events,
+ # which is Wrong, so we can't use that function.
+ watch_path = os.path.dirname(path)
+ is_dir = False
+ else:
+ watch_path = path
+ is_dir = True
+
+ # see if this path is already being watched
+ try:
+ wd = self.watches_by_path[watch_path]
+ except KeyError:
+ wd = self.wm.add_watch(watch_path, self.mask,
+ quiet=False)[watch_path]
+ self.watches_by_path[watch_path] = wd
+
+ produce_exists = True
+ if not is_dir:
+ if wd not in self.event_filter:
+ self.event_filter[wd] = [path]
+ elif path not in self.event_filter[wd]:
+ self.event_filter[wd].append(path)
+ else:
+ # we've been asked to watch a file that we're already
+ # watching, so we don't need to produce 'exists'
+ # events
+ produce_exists = False
+
+ # inotify doesn't produce initial 'exists' events, so we
+ # inherit from Pseudo to produce those
+ if produce_exists:
+ return Pseudo.AddMonitor(self, path, obj, handleID=path)
+ else:
+ self.handles[path] = obj
+ return path
+
+ def shutdown(self):
+ self.notifier.stop()
diff --git a/src/lib/Bcfg2/Server/FileMonitor/Pseudo.py b/src/lib/Bcfg2/Server/FileMonitor/Pseudo.py
new file mode 100644
index 000000000..089d4cf0f
--- /dev/null
+++ b/src/lib/Bcfg2/Server/FileMonitor/Pseudo.py
@@ -0,0 +1,25 @@
+""" Pseudo provides static monitor support for file alteration events """
+
+import os
+import logging
+from Bcfg2.Server.FileMonitor import FileMonitor, Event
+
+logger = logging.getLogger(__name__)
+
+class Pseudo(FileMonitor):
+ __priority__ = 99
+
+ def AddMonitor(self, path, obj, handleID=None):
+ """add a monitor to path, installing a callback to obj.HandleEvent"""
+ if handleID is None:
+ handleID = len(list(self.handles.keys()))
+ self.events.append(Event(handleID, path, 'exists'))
+ if os.path.isdir(path):
+ dirList = os.listdir(path)
+ for includedFile in dirList:
+ self.events.append(Event(handleID, includedFile, 'exists'))
+ self.events.append(Event(handleID, path, 'endExist'))
+
+ if obj != None:
+ self.handles[handleID] = obj
+ return handleID
diff --git a/src/lib/Bcfg2/Server/FileMonitor/__init__.py b/src/lib/Bcfg2/Server/FileMonitor/__init__.py
new file mode 100644
index 000000000..c490acc81
--- /dev/null
+++ b/src/lib/Bcfg2/Server/FileMonitor/__init__.py
@@ -0,0 +1,143 @@
+"""Bcfg2.Server.FileMonitor provides the support for monitoring files."""
+
+import os
+import sys
+import fnmatch
+import logging
+import pkgutil
+from time import sleep, time
+
+logger = logging.getLogger(__name__)
+
+class Event(object):
+ def __init__(self, request_id, filename, code):
+ self.requestID = request_id
+ self.filename = filename
+ self.action = code
+
+ def code2str(self):
+ """return static code for event"""
+ return self.action
+
+ def __str__(self):
+ return "%s: %s %s" % (self.__class__.__name__,
+ self.filename, self.action)
+
+ def __repr__(self):
+ return "%s (request ID %s)" % (str(self), self.requestID)
+
+
+class FileMonitor(object):
+ """File Monitor baseclass."""
+ def __init__(self, ignore=None, debug=False):
+ object.__init__(self)
+ self.debug = debug
+ self.handles = dict()
+ self.events = []
+ if ignore is None:
+ ignore = []
+ self.ignore = ignore
+
+ def __str__(self):
+ return "%s: %s" % (__name__, self.__class__.__name__)
+
+ def __repr__(self):
+ return "%s (%s events, fd %s)" % (str(self), len(self.events), self.fileno)
+
+ def debug_log(self, msg):
+ if self.debug:
+ logger.info(msg)
+
+ def should_ignore(self, event):
+ for pattern in self.ignore:
+ if (fnmatch.fnmatch(event.filename, pattern) or
+ fnmatch.fnmatch(os.path.split(event.filename)[-1], pattern)):
+ self.debug_log("Ignoring %s" % event)
+ return True
+ return False
+
+ def pending(self):
+ return bool(self.events)
+
+ def get_event(self):
+ return self.events.pop(0)
+
+ def fileno(self):
+ return 0
+
+ def handle_one_event(self, event):
+ if self.should_ignore(event):
+ return
+ if event.requestID not in self.handles:
+ logger.info("Got event for unexpected id %s, file %s" %
+ (event.requestID, event.filename))
+ return
+ self.debug_log("Dispatching event %s %s to obj %s" %
+ (event.code2str(), event.filename,
+ self.handles[event.requestID]))
+ try:
+ self.handles[event.requestID].HandleEvent(event)
+ except:
+ err = sys.exc_info()[1]
+ logger.error("Error in handling of event %s for %s: %s" %
+ (event.code2str(), event.filename, err))
+
+ def handle_event_set(self, lock=None):
+ count = 1
+ event = self.get_event()
+ start = time()
+ if lock:
+ lock.acquire()
+ try:
+ self.handle_one_event(event)
+ while self.pending():
+ self.handle_one_event(self.get_event())
+ count += 1
+ except:
+ pass
+ if lock:
+ lock.release()
+ end = time()
+ logger.info("Handled %d events in %.03fs" % (count, (end - start)))
+
+ def handle_events_in_interval(self, interval):
+ end = time() + interval
+ while time() < end:
+ if self.pending():
+ self.handle_event_set()
+ end = time() + interval
+ else:
+ sleep(0.5)
+
+ def shutdown(self):
+ pass
+
+
+available = dict()
+
+# todo: loading the monitor drivers should be automatic
+from Bcfg2.Server.FileMonitor.Pseudo import Pseudo
+available['pseudo'] = Pseudo
+
+try:
+ from Bcfg2.Server.FileMonitor.Fam import Fam
+ available['fam'] = Fam
+except ImportError:
+ pass
+
+try:
+ from Bcfg2.Server.FileMonitor.Gamin import Gamin
+ available['gamin'] = Gamin
+except ImportError:
+ pass
+
+try:
+ from Bcfg2.Server.FileMonitor.Inotify import Inotify
+ available['inotify'] = Inotify
+except ImportError:
+ pass
+
+for fdrv in sorted(available.keys(), key=lambda k: available[k].__priority__):
+ if fdrv in available:
+ available['default'] = available[fdrv]
+ break
diff --git a/src/lib/Bcfg2/Server/Hostbase/backends.py b/src/lib/Bcfg2/Server/Hostbase/backends.py
index ecaf3c109..cfa9e1e16 100644
--- a/src/lib/Bcfg2/Server/Hostbase/backends.py
+++ b/src/lib/Bcfg2/Server/Hostbase/backends.py
@@ -18,21 +18,16 @@ from nisauth import *
## uid=l.badge_no
## )
## #fixme: need to add this user session obj to session
-## #print str(ldap_user)
## user,created = User.objects.get_or_create(username=username)
-## #print user
-## #print "created " + str(created)
## return user
## except LDAPAUTHError,e:
-## #print str(e)
## return None
## def get_user(self,user_id):
## try:
## return User.objects.get(pk=user_id)
## except User.DoesNotExist, e:
-## print str(e)
## return None
diff --git a/src/lib/Bcfg2/Server/Hostbase/ldapauth.py b/src/lib/Bcfg2/Server/Hostbase/ldapauth.py
index f3db26f67..fc2ca1bf1 100644
--- a/src/lib/Bcfg2/Server/Hostbase/ldapauth.py
+++ b/src/lib/Bcfg2/Server/Hostbase/ldapauth.py
@@ -144,7 +144,6 @@ class ldapauth(object):
def member_of(self):
"""See if this user is in our group that is allowed to login"""
m = [g for g in self.memberOf if g == self.check_member_of]
- #print m
if len(m) == 1:
return True
else:
diff --git a/src/lib/Bcfg2/Server/Lint/Bundles.py b/src/lib/Bcfg2/Server/Lint/Bundles.py
deleted file mode 100644
index e6b6307f2..000000000
--- a/src/lib/Bcfg2/Server/Lint/Bundles.py
+++ /dev/null
@@ -1,54 +0,0 @@
-import lxml.etree
-import Bcfg2.Server.Lint
-
-class Bundles(Bcfg2.Server.Lint.ServerPlugin):
- """ Perform various bundle checks """
- def Run(self):
- """ run plugin """
- if 'Bundler' in self.core.plugins:
- self.missing_bundles()
- for bundle in self.core.plugins['Bundler'].entries.values():
- if self.HandlesFile(bundle.name):
- if (not Bcfg2.Server.Plugins.Bundler.have_genshi or
- type(bundle) is not
- Bcfg2.Server.Plugins.SGenshi.SGenshiTemplateFile):
- self.bundle_names(bundle)
-
- @classmethod
- def Errors(cls):
- return {"bundle-not-found":"error",
- "inconsistent-bundle-name":"warning"}
-
- def missing_bundles(self):
- """ find bundles listed in Metadata but not implemented in Bundler """
- if self.files is None:
- # when given a list of files on stdin, this check is
- # useless, so skip it
- groupdata = self.metadata.groups_xml.xdata
- ref_bundles = set([b.get("name")
- for b in groupdata.findall("//Bundle")])
-
- allbundles = self.core.plugins['Bundler'].entries.keys()
- for bundle in ref_bundles:
- xmlbundle = "%s.xml" % bundle
- genshibundle = "%s.genshi" % bundle
- if (xmlbundle not in allbundles and
- genshibundle not in allbundles):
- self.LintError("bundle-not-found",
- "Bundle %s referenced, but does not exist" %
- bundle)
-
- def bundle_names(self, bundle):
- """ verify bundle name attribute matches filename """
- try:
- xdata = lxml.etree.XML(bundle.data)
- except AttributeError:
- # genshi template
- xdata = lxml.etree.parse(bundle.template.filepath).getroot()
-
- fname = bundle.name.split('Bundler/')[1].split('.')[0]
- bname = xdata.get('name')
- if fname != bname:
- self.LintError("inconsistent-bundle-name",
- "Inconsistent bundle name: filename is %s, bundle name is %s" %
- (fname, bname))
diff --git a/src/lib/Bcfg2/Server/Lint/Comments.py b/src/lib/Bcfg2/Server/Lint/Comments.py
index f5d0e265f..59d18fc57 100644
--- a/src/lib/Bcfg2/Server/Lint/Comments.py
+++ b/src/lib/Bcfg2/Server/Lint/Comments.py
@@ -1,6 +1,7 @@
-import os.path
+import os
import lxml.etree
import Bcfg2.Server.Lint
+from Bcfg2.Server import XI, XI_NAMESPACE
from Bcfg2.Server.Plugins.Cfg.CfgPlaintextGenerator import CfgPlaintextGenerator
from Bcfg2.Server.Plugins.Cfg.CfgGenshiGenerator import CfgGenshiGenerator
from Bcfg2.Server.Plugins.Cfg.CfgCheetahGenerator import CfgCheetahGenerator
@@ -186,7 +187,7 @@ class Comments(Bcfg2.Server.Lint.ServerPlugin):
path = os.path.join(self.metadata.data, mfile)
if path in self.files:
xdata = lxml.etree.parse(path)
- for el in xdata.findall('./{http://www.w3.org/2001/XInclude}include'):
+ for el in xdata.findall('./%sinclude' % XI_NAMESPACE):
if not self.has_all_xincludes(el.get('href')):
self.LintError("broken-xinclude-chain",
"Broken XInclude chain: could not include %s" % path)
diff --git a/src/lib/Bcfg2/Server/Lint/Deltas.py b/src/lib/Bcfg2/Server/Lint/Deltas.py
deleted file mode 100644
index 114f2e348..000000000
--- a/src/lib/Bcfg2/Server/Lint/Deltas.py
+++ /dev/null
@@ -1,25 +0,0 @@
-import Bcfg2.Server.Lint
-from Bcfg2.Server.Plugins.Cfg import CfgFilter
-
-class Deltas(Bcfg2.Server.Lint.ServerPlugin):
- """ Warn about usage of .cat and .diff files """
-
- def Run(self):
- """ run plugin """
- if 'Cfg' in self.core.plugins:
- cfg = self.core.plugins['Cfg']
- for basename, entry in list(cfg.entries.items()):
- self.check_entry(basename, entry)
-
- @classmethod
- def Errors(cls):
- return {"cat-file-used":"warning",
- "diff-file-used":"warning"}
-
- def check_entry(self, basename, entry):
- for fname, processor in entry.entries.items():
- if self.HandlesFile(fname) and isinstance(processor, CfgFilter):
- extension = fname.split(".")[-1]
- self.LintError("%s-file-used" % extension,
- "%s file used on %s: %s" %
- (extension, basename, fname))
diff --git a/src/lib/Bcfg2/Server/Lint/Duplicates.py b/src/lib/Bcfg2/Server/Lint/Duplicates.py
index ee6b7a2e6..60a02ffb9 100644
--- a/src/lib/Bcfg2/Server/Lint/Duplicates.py
+++ b/src/lib/Bcfg2/Server/Lint/Duplicates.py
@@ -1,6 +1,7 @@
-import os.path
+import os
import lxml.etree
import Bcfg2.Server.Lint
+from Bcfg2.Server import XI, XI_NAMESPACE
class Duplicates(Bcfg2.Server.Lint.ServerPlugin):
""" Find duplicate clients, groups, etc. """
@@ -80,7 +81,7 @@ class Duplicates(Bcfg2.Server.Lint.ServerPlugin):
path = os.path.join(self.metadata.data, mfile)
if path in self.files:
xdata = lxml.etree.parse(path)
- for el in xdata.findall('./{http://www.w3.org/2001/XInclude}include'):
+ for el in xdata.findall('./%sinclude' % XI_NAMESPACE):
if not self.has_all_xincludes(el.get('href')):
self.LintError("broken-xinclude-chain",
"Broken XInclude chain: could not include %s" % path)
diff --git a/src/lib/Bcfg2/Server/Lint/Genshi.py b/src/lib/Bcfg2/Server/Lint/Genshi.py
index b6007161e..74142b446 100755
--- a/src/lib/Bcfg2/Server/Lint/Genshi.py
+++ b/src/lib/Bcfg2/Server/Lint/Genshi.py
@@ -1,3 +1,4 @@
+import sys
import genshi.template
import Bcfg2.Server.Lint
diff --git a/src/lib/Bcfg2/Server/Lint/GroupNames.py b/src/lib/Bcfg2/Server/Lint/GroupNames.py
new file mode 100644
index 000000000..5df98a30e
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Lint/GroupNames.py
@@ -0,0 +1,78 @@
+import os
+import re
+import Bcfg2.Server.Lint
+try:
+ from Bcfg2.Server.Plugins.Bundler import BundleTemplateFile
+ has_genshi = True
+except ImportError:
+ has_genshi = False
+
+class GroupNames(Bcfg2.Server.Lint.ServerPlugin):
+ """ ensure that all named groups are valid group names """
+ pattern = r'\S+$'
+ valid = re.compile(r'^' + pattern)
+
+ def Run(self):
+ self.check_metadata()
+ if 'Rules' in self.core.plugins:
+ self.check_rules()
+ if 'Bundler' in self.core.plugins:
+ self.check_bundles()
+ if 'GroupPatterns' in self.core.plugins:
+ self.check_grouppatterns()
+ if 'Cfg' in self.core.plugins:
+ self.check_cfg()
+
+ @classmethod
+ def Errors(cls):
+ return {"invalid-group-name": "error"}
+
+ def check_rules(self):
+ for rules in self.core.plugins['Rules'].entries.values():
+ if not self.HandlesFile(rules.name):
+ continue
+ xdata = rules.pnode.data
+ self.check_entries(xdata.xpath("//Group"),
+ os.path.join(self.config['repo'], rules.name))
+
+ def check_bundles(self):
+ """ check bundles for BoundPath entries with missing attrs """
+ for bundle in self.core.plugins['Bundler'].entries.values():
+ if (self.HandlesFile(bundle.name) and
+ (not has_genshi or
+ not isinstance(bundle, BundleTemplateFile))):
+ self.check_entries(bundle.xdata.xpath("//Group"),
+ bundle.name)
+
+ def check_metadata(self):
+ self.check_entries(self.metadata.groups_xml.xdata.xpath("//Group"),
+ os.path.join(self.config['repo'],
+ self.metadata.groups_xml.name))
+
+ def check_grouppatterns(self):
+ cfg = self.core.plugins['GroupPatterns'].config
+ if not self.HandlesFile(cfg.name):
+ return
+ for grp in cfg.xdata.xpath('//GroupPattern/Group'):
+ if not self.valid.search(grp.text):
+ self.LintError("invalid-group-name",
+ "Invalid group name in %s: %s" %
+ (cfg.name, self.RenderXML(grp, keep_text=True)))
+
+ def check_cfg(self):
+ for root, dirs, files in os.walk(self.core.plugins['Cfg'].data):
+ for fname in files:
+ basename = os.path.basename(root)
+ if (re.search(r'^%s\.G\d\d_' % basename, fname) and
+ not re.search(r'^%s\.G\d\d_' % basename + self.pattern,
+ fname)):
+ self.LintError("invalid-group-name",
+ "Invalid group name referenced in %s" %
+ os.path.join(root, fname))
+
+ def check_entries(self, entries, fname):
+ for grp in entries:
+ if not self.valid.search(grp.get("name")):
+ self.LintError("invalid-group-name",
+ "Invalid group name in %s: %s" %
+ (fname, self.RenderXML(grp)))
diff --git a/src/lib/Bcfg2/Server/Lint/GroupPatterns.py b/src/lib/Bcfg2/Server/Lint/GroupPatterns.py
deleted file mode 100644
index 431ba4056..000000000
--- a/src/lib/Bcfg2/Server/Lint/GroupPatterns.py
+++ /dev/null
@@ -1,35 +0,0 @@
-import sys
-import Bcfg2.Server.Lint
-from Bcfg2.Server.Plugins.GroupPatterns import PatternMap
-
-class GroupPatterns(Bcfg2.Server.Lint.ServerPlugin):
- """ Check Genshi templates for syntax errors """
-
- def Run(self):
- """ run plugin """
- if 'GroupPatterns' in self.core.plugins:
- cfg = self.core.plugins['GroupPatterns'].config
- for entry in cfg.xdata.xpath('//GroupPattern'):
- groups = [g.text for g in entry.findall('Group')]
- self.check(entry, groups, ptype='NamePattern')
- self.check(entry, groups, ptype='NameRange')
-
- @classmethod
- def Errors(cls):
- return {"pattern-fails-to-initialize":"error"}
-
- def check(self, entry, groups, ptype="NamePattern"):
- if ptype == "NamePattern":
- pmap = lambda p: PatternMap(p, None, groups)
- else:
- pmap = lambda p: PatternMap(None, p, groups)
-
- for el in entry.findall(ptype):
- pat = el.text
- try:
- pmap(pat)
- except:
- err = sys.exc_info()[1]
- self.LintError("pattern-fails-to-initialize",
- "Failed to initialize %s %s for %s: %s" %
- (ptype, pat, entry.get('pattern'), err))
diff --git a/src/lib/Bcfg2/Server/Lint/InfoXML.py b/src/lib/Bcfg2/Server/Lint/InfoXML.py
index db6aeea73..5e4e21e18 100644
--- a/src/lib/Bcfg2/Server/Lint/InfoXML.py
+++ b/src/lib/Bcfg2/Server/Lint/InfoXML.py
@@ -1,28 +1,41 @@
-import os.path
+import os
import Bcfg2.Options
import Bcfg2.Server.Lint
from Bcfg2.Server.Plugins.Cfg.CfgInfoXML import CfgInfoXML
+from Bcfg2.Server.Plugins.Cfg.CfgLegacyInfo import CfgLegacyInfo
class InfoXML(Bcfg2.Server.Lint.ServerPlugin):
""" ensure that all config files have an info.xml file"""
def Run(self):
- if 'Cfg' in self.core.plugins:
- for filename, entryset in self.core.plugins['Cfg'].entries.items():
- infoxml_fname = os.path.join(entryset.path, "info.xml")
- if self.HandlesFile(infoxml_fname):
- found = False
- for entry in entryset.entries.values():
- if isinstance(entry, CfgInfoXML):
- self.check_infoxml(infoxml_fname,
- entry.infoxml.pnode.data)
- found = True
- if not found:
- self.LintError("no-infoxml",
- "No info.xml found for %s" % filename)
+ if 'Cfg' not in self.core.plugins:
+ return
+
+ for filename, entryset in self.core.plugins['Cfg'].entries.items():
+ infoxml_fname = os.path.join(entryset.path, "info.xml")
+ if self.HandlesFile(infoxml_fname):
+ found = False
+ for entry in entryset.entries.values():
+ if isinstance(entry, CfgInfoXML):
+ self.check_infoxml(infoxml_fname,
+ entry.infoxml.pnode.data)
+ found = True
+ if not found:
+ self.LintError("no-infoxml",
+ "No info.xml found for %s" % filename)
+
+ for entry in entryset.entries.values():
+ if isinstance(entry, CfgLegacyInfo):
+ if not self.HandlesFile(entry.path):
+ continue
+ self.LintError("deprecated-info-file",
+ "Deprecated %s file found at %s" %
+ (os.path.basename(entry.name),
+ entry.path))
@classmethod
def Errors(cls):
return {"no-infoxml":"warning",
+ "deprecated-info-file":"warning",
"paranoid-false":"warning",
"broken-xinclude-chain":"warning",
"required-infoxml-attrs-missing":"error"}
diff --git a/src/lib/Bcfg2/Server/Lint/Pkgmgr.py b/src/lib/Bcfg2/Server/Lint/Pkgmgr.py
deleted file mode 100644
index ceb46238a..000000000
--- a/src/lib/Bcfg2/Server/Lint/Pkgmgr.py
+++ /dev/null
@@ -1,38 +0,0 @@
-import glob
-import lxml.etree
-import Bcfg2.Server.Lint
-
-class Pkgmgr(Bcfg2.Server.Lint.ServerlessPlugin):
- """ find duplicate Pkgmgr entries with the same priority """
- def Run(self):
- pset = set()
- for pfile in glob.glob("%s/Pkgmgr/*.xml" % self.config['repo']):
- if self.HandlesFile(pfile):
- xdata = lxml.etree.parse(pfile).getroot()
- # get priority, type, group
- priority = xdata.get('priority')
- ptype = xdata.get('type')
- for pkg in xdata.xpath("//Package"):
- if pkg.getparent().tag == 'Group':
- grp = pkg.getparent().get('name')
- if (type(grp) is not str and
- grp.getparent().tag == 'Group'):
- pgrp = grp.getparent().get('name')
- else:
- pgrp = 'none'
- else:
- grp = 'none'
- pgrp = 'none'
- ptuple = (pkg.get('name'), priority, ptype, grp, pgrp)
- # check if package is already listed with same
- # priority, type, grp
- if ptuple in pset:
- self.LintError("duplicate-package",
- "Duplicate Package %s, priority:%s, type:%s" %
- (pkg.get('name'), priority, ptype))
- else:
- pset.add(ptuple)
-
- @classmethod
- def Errors(cls):
- return {"duplicate-packages":"error"}
diff --git a/src/lib/Bcfg2/Server/Lint/RequiredAttrs.py b/src/lib/Bcfg2/Server/Lint/RequiredAttrs.py
index 6f76cf2db..fcb7c6c28 100644
--- a/src/lib/Bcfg2/Server/Lint/RequiredAttrs.py
+++ b/src/lib/Bcfg2/Server/Lint/RequiredAttrs.py
@@ -1,32 +1,105 @@
-import os.path
+import os
+import re
import lxml.etree
import Bcfg2.Server.Lint
+import Bcfg2.Client.Tools.POSIX
+import Bcfg2.Client.Tools.VCS
from Bcfg2.Server.Plugins.Packages import Apt, Yum
+try:
+ from Bcfg2.Server.Plugins.Bundler import BundleTemplateFile
+ has_genshi = True
+except ImportError:
+ has_genshi = False
+
+# format verifying functions
+def is_filename(val):
+ return val.startswith("/") and len(val) > 1
+
+def is_selinux_type(val):
+ return re.match(r'^[a-z_]+_t', val)
+
+def is_selinux_user(val):
+ return re.match(r'^[a-z_]+_u', val)
+
+def is_octal_mode(val):
+ return re.match(r'[0-7]{3,4}', val)
+
+def is_username(val):
+ return re.match(r'^([a-z]\w{0,30}|\d+)$', val)
+
+def is_device_mode(val):
+ try:
+ # checking upper bound seems like a good way to discover some
+ # obscure OS with >8-bit device numbers
+ return int(val) > 0
+ except:
+ return False
class RequiredAttrs(Bcfg2.Server.Lint.ServerPlugin):
""" verify attributes for configuration entries (as defined in
doc/server/configurationentries) """
def __init__(self, *args, **kwargs):
Bcfg2.Server.Lint.ServerPlugin.__init__(self, *args, **kwargs)
- self.required_attrs = {
- 'Path': {
- 'device': ['name', 'owner', 'group', 'dev_type'],
- 'directory': ['name', 'owner', 'group', 'perms'],
- 'file': ['name', 'owner', 'group', 'perms', '__text__'],
- 'hardlink': ['name', 'to'],
- 'symlink': ['name', 'to'],
- 'ignore': ['name'],
- 'nonexistent': ['name'],
- 'permissions': ['name', 'owner', 'group', 'perms'],
- 'vcs': ['vcstype', 'revision', 'sourceurl']},
- 'Service': {
- 'chkconfig': ['name'],
- 'deb': ['name'],
- 'rc-update': ['name'],
- 'smf': ['name', 'FMRI'],
- 'upstart': ['name']},
- 'Action': ['name', 'timing', 'when', 'status', 'command'],
- 'Package': ['name']}
+ self.required_attrs = dict(
+ Path=dict(
+ device=dict(name=is_filename, owner=is_username,
+ group=is_username,
+ dev_type=lambda v: \
+ v in Bcfg2.Client.Tools.POSIX.device_map),
+ directory=dict(name=is_filename, owner=is_username,
+ group=is_username, perms=is_octal_mode),
+ file=dict(name=is_filename, owner=is_username,
+ group=is_username, perms=is_octal_mode,
+ __text__=None),
+ hardlink=dict(name=is_filename, to=is_filename),
+ symlink=dict(name=is_filename, to=is_filename),
+ ignore=dict(name=is_filename),
+ nonexistent=dict(name=is_filename),
+ permissions=dict(name=is_filename, owner=is_username,
+ group=is_username, perms=is_octal_mode),
+ vcs=dict(vcstype=lambda v: (v != 'Path' and
+ hasattr(Bcfg2.Client.Tools.VCS,
+ "Install%s" % v)),
+ revision=None, sourceurl=None)),
+ Service={
+ "chkconfig": dict(name=None),
+ "deb": dict(name=None),
+ "rc-update": dict(name=None),
+ "smf": dict(name=None, FMRI=None),
+ "upstart": dict(name=None)},
+ Action={None: dict(name=None,
+ timing=lambda v: v in ['pre', 'post', 'both'],
+ when=lambda v: v in ['modified', 'always'],
+ status=lambda v: v in ['ignore', 'check'],
+ command=None)},
+ ACL=dict(
+ default=dict(scope=lambda v: v in ['user', 'group'],
+ perms=lambda v: re.match('^([0-7]|[rwx\-]{0,3}',
+ v)),
+ access=dict(scope=lambda v: v in ['user', 'group'],
+ perms=lambda v: re.match('^([0-7]|[rwx\-]{0,3}',
+ v)),
+ mask=dict(perms=lambda v: re.match('^([0-7]|[rwx\-]{0,3}', v))),
+ Package={None: dict(name=None)},
+ SELinux=dict(
+ boolean=dict(name=None,
+ value=lambda v: v in ['on', 'off']),
+ module=dict(name=None, __text__=None),
+ port=dict(name=lambda v: re.match(r'^\d+(-\d+)?/(tcp|udp)', v),
+ selinuxtype=is_selinux_type),
+ fcontext=dict(name=None, selinuxtype=is_selinux_type),
+ node=dict(name=lambda v: "/" in v,
+ selinuxtype=is_selinux_type,
+ proto=lambda v: v in ['ipv6', 'ipv4']),
+ login=dict(name=is_username,
+ selinuxuser=is_selinux_user),
+ user=dict(name=is_selinux_user,
+ roles=lambda v: all(is_selinux_user(u)
+ for u in " ".split(v)),
+ prefix=None),
+ interface=dict(name=None, selinuxtype=is_selinux_type),
+ permissive=dict(name=is_selinux_type))
+ )
def Run(self):
self.check_packages()
@@ -42,9 +115,9 @@ class RequiredAttrs(Bcfg2.Server.Lint.ServerPlugin):
return {"unknown-entry-type":"error",
"unknown-entry-tag":"error",
"required-attrs-missing":"error",
+ "required-attr-format":"error",
"extra-attrs":"warning"}
-
def check_packages(self):
""" check package sources for Source entries with missing attrs """
if 'Packages' in self.core.plugins:
@@ -85,13 +158,17 @@ class RequiredAttrs(Bcfg2.Server.Lint.ServerPlugin):
""" check bundles for BoundPath entries with missing attrs """
if 'Bundler' in self.core.plugins:
for bundle in self.core.plugins['Bundler'].entries.values():
- try:
- xdata = lxml.etree.XML(bundle.data)
- except (lxml.etree.XMLSyntaxError, AttributeError):
- xdata = lxml.etree.parse(bundle.template.filepath).getroot()
+ if (self.HandlesFile(bundle.name) and
+ (not has_genshi or
+ not isinstance(bundle, BundleTemplateFile))):
+ try:
+ xdata = lxml.etree.XML(bundle.data)
+ except (lxml.etree.XMLSyntaxError, AttributeError):
+ xdata = \
+ lxml.etree.parse(bundle.template.filepath).getroot()
- for path in xdata.xpath("//*[substring(name(), 1, 5) = 'Bound']"):
- self.check_entry(path, bundle.name)
+ for path in xdata.xpath("//*[substring(name(), 1, 5) = 'Bound']"):
+ self.check_entry(path, bundle.name)
def check_entry(self, entry, filename):
""" generic entry check """
@@ -103,43 +180,55 @@ class RequiredAttrs(Bcfg2.Server.Lint.ServerPlugin):
if tag not in self.required_attrs:
self.LintError("unknown-entry-tag",
"Unknown entry tag '%s': %s" %
- (entry.tag, self.RenderXML(entry)))
+ (tag, self.RenderXML(entry)))
if isinstance(self.required_attrs[tag], dict):
etype = entry.get('type')
if etype in self.required_attrs[tag]:
- required_attrs = set(self.required_attrs[tag][etype] +
- ['type'])
+ required_attrs = self.required_attrs[tag][etype]
else:
self.LintError("unknown-entry-type",
"Unknown %s type %s: %s" %
(tag, etype, self.RenderXML(entry)))
return
else:
- required_attrs = set(self.required_attrs[tag])
+ required_attrs = self.required_attrs[tag]
attrs = set(entry.attrib.keys())
if 'dev_type' in required_attrs:
dev_type = entry.get('dev_type')
if dev_type in ['block', 'char']:
# check if major/minor are specified
- required_attrs |= set(['major', 'minor'])
+ required_attrs['major'] = is_device_mode
+ required_attrs['minor'] = is_device_mode
+
+ if tag == 'ACL' and 'scope' in required_attrs:
+ required_attrs[entry.get('scope')] = is_username
if '__text__' in required_attrs:
- required_attrs.remove('__text__')
+ del required_attrs['__text__']
if (not entry.text and
not entry.get('empty', 'false').lower() == 'true'):
self.LintError("required-attrs-missing",
"Text missing for %s %s in %s: %s" %
- (entry.tag, name, filename,
+ (tag, name, filename,
self.RenderXML(entry)))
- if not attrs.issuperset(required_attrs):
+ if not attrs.issuperset(required_attrs.keys()):
self.LintError("required-attrs-missing",
"The following required attribute(s) are "
"missing for %s %s in %s: %s\n%s" %
- (entry.tag, name, filename,
+ (tag, name, filename,
", ".join([attr
for attr in
- required_attrs.difference(attrs)]),
+ set(required_attrs.keys()).difference(attrs)]),
self.RenderXML(entry)))
+
+ for attr, fmt in required_attrs.items():
+ if fmt and attr in attrs and not fmt(entry.attrib[attr]):
+ self.LintError("required-attr-format",
+ "The %s attribute of %s %s in %s is "
+ "malformed\n%s" %
+ (attr, tag, name, filename,
+ self.RenderXML(entry)))
+
diff --git a/src/lib/Bcfg2/Server/Lint/TemplateHelper.py b/src/lib/Bcfg2/Server/Lint/TemplateHelper.py
deleted file mode 100644
index be270a59c..000000000
--- a/src/lib/Bcfg2/Server/Lint/TemplateHelper.py
+++ /dev/null
@@ -1,64 +0,0 @@
-import sys
-import imp
-import glob
-import Bcfg2.Server.Lint
-from Bcfg2.Server.Plugins.TemplateHelper import HelperModule
-
-class TemplateHelper(Bcfg2.Server.Lint.ServerlessPlugin):
- """ find duplicate Pkgmgr entries with the same priority """
- def __init__(self, *args, **kwargs):
- Bcfg2.Server.Lint.ServerlessPlugin.__init__(self, *args, **kwargs)
- hm = HelperModule("foo.py", None, None)
- self.reserved_keywords = dir(hm)
-
- def Run(self):
- for helper in glob.glob("%s/TemplateHelper/*.py" % self.config['repo']):
- if not self.HandlesFile(helper):
- continue
-
- match = HelperModule._module_name_re.search(helper)
- if match:
- module_name = match.group(1)
- else:
- module_name = helper
-
- try:
- module = imp.load_source(module_name, helper)
- except:
- err = sys.exc_info()[1]
- self.LintError("templatehelper-import-error",
- "Failed to import %s: %s" %
- (helper, err))
- continue
-
- if not hasattr(module, "__export__"):
- self.LintError("templatehelper-no-export",
- "%s has no __export__ list" % helper)
- continue
- elif not isinstance(module.__export__, list):
- self.LintError("templatehelper-nonlist-export",
- "__export__ is not a list in %s" % helper)
- continue
-
- for sym in module.__export__:
- if not hasattr(module, sym):
- self.LintError("templatehelper-nonexistent-export",
- "%s: exported symbol %s does not exist" %
- (helper, sym))
- elif sym in self.reserved_keywords:
- self.LintError("templatehelper-reserved-export",
- "%s: exported symbol %s is reserved" %
- (helper, sym))
- elif sym.startswith("_"):
- self.LintError("templatehelper-underscore-export",
- "%s: exported symbol %s starts with underscore" %
- (helper, sym))
-
- @classmethod
- def Errors(cls):
- return {"templatehelper-import-error":"error",
- "templatehelper-no-export":"error",
- "templatehelper-nonlist-export":"error",
- "templatehelper-nonexistent-export":"error",
- "templatehelper-reserved-export":"error",
- "templatehelper-underscore-export":"warning"}
diff --git a/src/lib/Bcfg2/Server/Lint/Validate.py b/src/lib/Bcfg2/Server/Lint/Validate.py
index 05fedc313..b8bdb4755 100644
--- a/src/lib/Bcfg2/Server/Lint/Validate.py
+++ b/src/lib/Bcfg2/Server/Lint/Validate.py
@@ -1,10 +1,10 @@
-import fnmatch
+import os
+import sys
import glob
+import fnmatch
import lxml.etree
-import os
from subprocess import Popen, PIPE, STDOUT
-import sys
-
+from Bcfg2.Server import XI, XI_NAMESPACE
import Bcfg2.Server.Lint
class Validate(Bcfg2.Server.Lint.ServerlessPlugin):
@@ -22,7 +22,6 @@ class Validate(Bcfg2.Server.Lint.ServerlessPlugin):
"%s/Rules/*.xml":"%s/rules.xsd",
"%s/Defaults/*.xml":"%s/defaults.xsd",
"%s/etc/report-configuration.xml":"%s/report-configuration.xsd",
- "%s/Svcmgr/*.xml":"%s/services.xsd",
"%s/Deps/*.xml":"%s/deps.xsd",
"%s/Decisions/*.xml":"%s/decisions.xsd",
"%s/Packages/sources.xml":"%s/packages.xsd",
@@ -46,20 +45,10 @@ class Validate(Bcfg2.Server.Lint.ServerlessPlugin):
if filelist:
# avoid loading schemas for empty file lists
schemafile = schemaname % schemadir
- try:
- schema = lxml.etree.XMLSchema(lxml.etree.parse(schemafile))
- except IOError:
- e = sys.exc_info()[1]
- self.LintError("input-output-error", str(e))
- continue
- except lxml.etree.XMLSchemaParseError:
- e = sys.exc_info()[1]
- self.LintError("schema-failed-to-parse",
- "Failed to process schema %s: %s" %
- (schemafile, e))
- continue
- for filename in filelist:
- self.validate(filename, schemafile, schema=schema)
+ schema = self._load_schema(schemafile)
+ if schema:
+ for filename in filelist:
+ self.validate(filename, schemafile, schema=schema)
self.check_properties()
@@ -88,11 +77,8 @@ class Validate(Bcfg2.Server.Lint.ServerlessPlugin):
return True on success, False on failure """
if schema is None:
# if no schema object was provided, instantiate one
- try:
- schema = lxml.etree.XMLSchema(lxml.etree.parse(schemafile))
- except:
- self.LintError("schema-failed-to-parse",
- "Failed to process schema %s" % schemafile)
+ schema = self._load_schema(schemafile)
+ if not schema:
return False
try:
@@ -187,24 +173,42 @@ class Validate(Bcfg2.Server.Lint.ServerlessPlugin):
def follow_xinclude(self, xfile):
""" follow xincludes in the given file """
xdata = lxml.etree.parse(xfile)
- included = set([ent.get('href') for ent in
- xdata.findall('./{http://www.w3.org/2001/XInclude}include')])
+ included = set([el
+ for el in xdata.findall('./%sinclude' % XI_NAMESPACE)])
rv = []
while included:
try:
- filename = included.pop()
+ el = included.pop()
except KeyError:
continue
+ filename = el.get("href")
path = os.path.join(os.path.dirname(xfile), filename)
- if self.HandlesFile(path):
+ if not os.path.exists(path):
+ if not el.findall('./%sfallback' % XI_NAMESPACE):
+ self.LintError("broken-xinclude-chain",
+ "XInclude %s does not exist in %s: %s" %
+ (filename, xfile, self.RenderXML(el)))
+ elif self.HandlesFile(path):
rv.append(path)
groupdata = lxml.etree.parse(path)
[included.add(el.get('href'))
for el in
- groupdata.findall('./{http://www.w3.org/2001/XInclude}include')]
+ groupdata.findall('./%sinclude' % XI_NAMESPACE)]
included.discard(filename)
return rv
+ def _load_schema(self, filename):
+ try:
+ return lxml.etree.XMLSchema(lxml.etree.parse(filename))
+ except IOError:
+ e = sys.exc_info()[1]
+ self.LintError("input-output-error", str(e))
+ except lxml.etree.XMLSchemaParseError:
+ e = sys.exc_info()[1]
+ self.LintError("schema-failed-to-parse",
+ "Failed to process schema %s: %s" %
+ (filename, e))
+ return None
diff --git a/src/lib/Bcfg2/Server/Lint/__init__.py b/src/lib/Bcfg2/Server/Lint/__init__.py
index 5d7dd707b..e3b4c8ea7 100644
--- a/src/lib/Bcfg2/Server/Lint/__init__.py
+++ b/src/lib/Bcfg2/Server/Lint/__init__.py
@@ -81,18 +81,20 @@ class Plugin (object):
def LintError(self, err, msg):
self.errorhandler.dispatch(err, msg)
- def RenderXML(self, element):
+ def RenderXML(self, element, keep_text=False):
"""render an XML element for error output -- line number
prefixed, no children"""
xml = None
if len(element) or element.text:
el = copy(element)
- if el.text:
+ if el.text and not keep_text:
el.text = '...'
[el.remove(c) for c in el.iterchildren()]
- xml = lxml.etree.tostring(el).strip()
+ xml = lxml.etree.tostring(el,
+ xml_declaration=False).decode("UTF-8").strip()
else:
- xml = lxml.etree.tostring(element).strip()
+ xml = lxml.etree.tostring(element,
+ xml_declaration=False).decode("UTF-8").strip()
return " line %s: %s" % (element.sourceline, xml)
diff --git a/src/lib/Bcfg2/Server/Plugin.py b/src/lib/Bcfg2/Server/Plugin.py
index ca37431a2..910bc0108 100644
--- a/src/lib/Bcfg2/Server/Plugin.py
+++ b/src/lib/Bcfg2/Server/Plugin.py
@@ -1,54 +1,60 @@
"""This module provides the baseclass for Bcfg2 Server Plugins."""
-import copy
-import logging
-import lxml.etree
import os
-import pickle
-import posixpath
import re
import sys
+import copy
+import logging
+import operator
import threading
-from Bcfg2.Bcfg2Py3k import ConfigParser
-
-from lxml.etree import XML, XMLSyntaxError
-
+import lxml.etree
+import Bcfg2.Server
import Bcfg2.Options
+from Bcfg2.Bcfg2Py3k import ConfigParser, CmpMixin, reduce, Queue, Empty, \
+ Full, cPickle
-# py3k compatibility
-if sys.hexversion >= 0x03000000:
- from functools import reduce
- from io import FileIO as BUILTIN_FILE_TYPE
-else:
- BUILTIN_FILE_TYPE = file
-from Bcfg2.Bcfg2Py3k import Queue
-from Bcfg2.Bcfg2Py3k import Empty
-from Bcfg2.Bcfg2Py3k import Full
+try:
+ import django
+ has_django = True
+except ImportError:
+ has_django = False
# grab default metadata info from bcfg2.conf
opts = {'owner': Bcfg2.Options.MDATA_OWNER,
'group': Bcfg2.Options.MDATA_GROUP,
- 'important': Bcfg2.Options.MDATA_IMPORTANT,
'perms': Bcfg2.Options.MDATA_PERMS,
+ 'secontext': Bcfg2.Options.MDATA_SECONTEXT,
+ 'important': Bcfg2.Options.MDATA_IMPORTANT,
'paranoid': Bcfg2.Options.MDATA_PARANOID,
'sensitive': Bcfg2.Options.MDATA_SENSITIVE}
-mdata_setup = Bcfg2.Options.OptionParser(opts)
-mdata_setup.parse([])
-del mdata_setup['args']
+default_file_metadata = Bcfg2.Options.OptionParser(opts)
+default_file_metadata.parse([])
+del default_file_metadata['args']
logger = logging.getLogger('Bcfg2.Server.Plugin')
-default_file_metadata = mdata_setup
-
-info_regex = re.compile( \
- 'encoding:(\s)*(?P<encoding>\w+)|' +
- 'group:(\s)*(?P<group>\S+)|' +
- 'important:(\s)*(?P<important>\S+)|' +
- 'mtime:(\s)*(?P<mtime>\w+)|' +
- 'owner:(\s)*(?P<owner>\S+)|' +
- 'paranoid:(\s)*(?P<paranoid>\S+)|' +
- 'perms:(\s)*(?P<perms>\w+)|' +
- 'sensitive:(\s)*(?P<sensitive>\S+)|')
+info_regex = re.compile('owner:(\s)*(?P<owner>\S+)|' +
+ 'group:(\s)*(?P<group>\S+)|' +
+ 'perms:(\s)*(?P<perms>\w+)|' +
+ 'secontext:(\s)*(?P<secontext>\S+)|' +
+ 'paranoid:(\s)*(?P<paranoid>\S+)|' +
+ 'sensitive:(\s)*(?P<sensitive>\S+)|' +
+ 'encoding:(\s)*(?P<encoding>\S+)|' +
+ 'important:(\s)*(?P<important>\S+)|' +
+ 'mtime:(\s)*(?P<mtime>\w+)|')
+
+def bind_info(entry, metadata, infoxml=None, default=default_file_metadata):
+ for attr, val in list(default.items()):
+ entry.set(attr, val)
+ if infoxml:
+ mdata = dict()
+ infoxml.pnode.Match(metadata, mdata, entry=entry)
+ if 'Info' not in mdata:
+ msg = "Failed to set metadata for file %s" % entry.get('name')
+ logger.error(msg)
+ raise PluginExecutionError(msg)
+ for attr, val in list(mdata['Info'][None].items()):
+ entry.set(attr, val)
class PluginInitError(Exception):
@@ -61,6 +67,18 @@ class PluginExecutionError(Exception):
pass
+class MetadataConsistencyError(Exception):
+ """This error gets raised when metadata is internally inconsistent."""
+ pass
+
+
+class MetadataRuntimeError(Exception):
+ """This error is raised when the metadata engine
+ is called prior to reading enough data.
+ """
+ pass
+
+
class Debuggable(object):
__rmi__ = ['toggle_debug']
@@ -73,6 +91,10 @@ class Debuggable(object):
def toggle_debug(self):
self.debug_flag = not self.debug_flag
+ self.debug_log("%s: debug_flag = %s" % (self.__class__.__name__,
+ self.debug_flag),
+ flag=True)
+ return self.debug_flag
def debug_log(self, message, flag=None):
if (flag is None and self.debug_flag) or flag:
@@ -116,8 +138,7 @@ class Plugin(Debuggable):
@classmethod
def init_repo(cls, repo):
- path = "%s/%s" % (repo, cls.name)
- os.makedirs(path)
+ os.makedirs(os.path.join(repo, cls.name))
def shutdown(self):
self.running = False
@@ -126,6 +147,26 @@ class Plugin(Debuggable):
return "%s Plugin" % self.__class__.__name__
+class DatabaseBacked(Plugin):
+ @property
+ def _use_db(self):
+ use_db = self.core.setup.cfp.getboolean(self.name.lower(),
+ "use_database",
+ default=False)
+ if use_db and has_django and self.core.database_available:
+ return True
+ elif not use_db:
+ return False
+ else:
+ self.logger.error("use_database is true but django not found")
+ return False
+
+
+class PluginDatabaseModel(object):
+ class Meta:
+ app_label = "Server"
+
+
class Generator(object):
"""Generator plugins contribute to literal client configurations."""
def HandlesEntry(self, entry, metadata):
@@ -134,19 +175,19 @@ class Generator(object):
def HandleEntry(self, entry, metadata):
"""This is the slow-path handler for configuration entry binding."""
- raise PluginExecutionError
+ return entry
class Structure(object):
"""Structure Plugins contribute to abstract client configurations."""
def BuildStructures(self, metadata):
"""Return a list of abstract goal structures for client."""
- raise PluginExecutionError
+ raise NotImplementedError
class Metadata(object):
"""Signal metadata capabilities for this plugin"""
- def add_client(self, client_name, attribs):
+ def add_client(self, client_name):
"""Add client."""
pass
@@ -158,11 +199,17 @@ class Metadata(object):
"""Create viz str for viz admin mode."""
pass
+ def _handle_default_event(self, event):
+ pass
+
def get_initial_metadata(self, client_name):
- raise PluginExecutionError
+ raise NotImplementedError
- def merge_additional_data(self, imd, source, groups, data):
- raise PluginExecutionError
+ def merge_additional_data(self, imd, source, data):
+ raise NotImplementedError
+
+ def merge_additional_groups(self, imd, groups):
+ raise NotImplementedError
class Connector(object):
@@ -187,23 +234,23 @@ class Probing(object):
pass
-class Statistics(object):
+class Statistics(Plugin):
"""Signal statistics handling capability."""
def process_statistics(self, client, xdata):
pass
-class ThreadedStatistics(Statistics,
- threading.Thread):
+class ThreadedStatistics(Statistics, threading.Thread):
"""Threaded statistics handling capability."""
def __init__(self, core, datastore):
- Statistics.__init__(self)
+ Statistics.__init__(self, core, datastore)
threading.Thread.__init__(self)
# Event from the core signaling an exit
self.terminate = core.terminate
self.work_queue = Queue(100000)
- self.pending_file = "%s/etc/%s.pending" % (datastore, self.__class__.__name__)
- self.daemon = True
+ self.pending_file = os.path.join(datastore, "etc",
+ "%s.pending" % self.name)
+ self.daemon = False
self.start()
def save(self):
@@ -213,32 +260,38 @@ class ThreadedStatistics(Statistics,
while not self.work_queue.empty():
(metadata, data) = self.work_queue.get_nowait()
try:
- pending_data.append((metadata.hostname, lxml.etree.tostring(data)))
+ pending_data.append((metadata.hostname,
+ lxml.etree.tostring(data,
+ xml_declaration=False).decode("UTF-8")))
except:
- self.logger.warning("Dropping interaction for %s" % metadata.hostname)
+ err = sys.exc_info()[1]
+ self.logger.warning("Dropping interaction for %s: %s" %
+ (metadata.hostname, err))
except Empty:
pass
try:
savefile = open(self.pending_file, 'w')
- pickle.dump(pending_data, savefile)
+ cPickle.dump(pending_data, savefile)
savefile.close()
- self.logger.info("Saved pending %s data" % self.__class__.__name__)
+ self.logger.info("Saved pending %s data" % self.name)
except:
- self.logger.warning("Failed to save pending data")
+ err = sys.exc_info()[1]
+ self.logger.warning("Failed to save pending data: %s" % err)
def load(self):
- """Load any pending data to a file."""
+ """Load any pending data from a file."""
if not os.path.exists(self.pending_file):
return True
pending_data = []
try:
savefile = open(self.pending_file, 'r')
- pending_data = pickle.load(savefile)
+ pending_data = cPickle.load(savefile)
savefile.close()
except Exception:
e = sys.exc_info()[1]
self.logger.warning("Failed to load pending data: %s" % e)
+ return False
for (pmetadata, pdata) in pending_data:
# check that shutdown wasnt called early
if self.terminate.isSet():
@@ -249,56 +302,58 @@ class ThreadedStatistics(Statistics,
try:
metadata = self.core.build_metadata(pmetadata)
break
- except Bcfg2.Server.Plugins.Metadata.MetadataRuntimeError:
+ except MetadataRuntimeError:
pass
self.terminate.wait(5)
if self.terminate.isSet():
return False
- self.work_queue.put_nowait((metadata, lxml.etree.fromstring(pdata)))
+ self.work_queue.put_nowait((metadata,
+ lxml.etree.XML(pdata,
+ parser=Bcfg2.Server.XMLParser)))
except Full:
self.logger.warning("Queue.Full: Failed to load queue data")
break
except lxml.etree.LxmlError:
lxml_error = sys.exc_info()[1]
- self.logger.error("Unable to load save interaction: %s" % lxml_error)
- except Bcfg2.Server.Plugins.Metadata.MetadataConsistencyError:
- self.logger.error("Unable to load metadata for save interaction: %s" % pmetadata)
+ self.logger.error("Unable to load saved interaction: %s" %
+ lxml_error)
+ except MetadataConsistencyError:
+ self.logger.error("Unable to load metadata for save "
+ "interaction: %s" % pmetadata)
try:
os.unlink(self.pending_file)
except:
- self.logger.error("Failed to unlink save file: %s" % self.pending_file)
- self.logger.info("Loaded pending %s data" % self.__class__.__name__)
+ self.logger.error("Failed to unlink save file: %s" %
+ self.pending_file)
+ self.logger.info("Loaded pending %s data" % self.name)
return True
def run(self):
if not self.load():
return
- while not self.terminate.isSet():
+ while not self.terminate.isSet() and self.work_queue != None:
try:
- (xdata, client) = self.work_queue.get(block=True, timeout=2)
+ (client, xdata) = self.work_queue.get(block=True, timeout=2)
except Empty:
continue
except Exception:
e = sys.exc_info()[1]
self.logger.error("ThreadedStatistics: %s" % e)
continue
- self.handle_statistic(xdata, client)
- if not self.work_queue.empty():
+ self.handle_statistic(client, xdata)
+ if self.work_queue != None and not self.work_queue.empty():
self.save()
def process_statistics(self, metadata, data):
- warned = False
try:
self.work_queue.put_nowait((metadata, copy.copy(data)))
- warned = False
except Full:
- if not warned:
- self.logger.warning("%s: Queue is full. Dropping interactions." % self.__class__.__name__)
- warned = True
+ self.logger.warning("%s: Queue is full. Dropping interactions." %
+ self.name)
- def handle_statistics(self, metadata, data):
+ def handle_statistic(self, metadata, data):
"""Handle stats here."""
pass
@@ -308,17 +363,17 @@ class PullSource(object):
return []
def GetCurrentEntry(self, client, e_type, e_name):
- raise PluginExecutionError
+ raise NotImplementedError
class PullTarget(object):
def AcceptChoices(self, entry, metadata):
- raise PluginExecutionError
+ raise NotImplementedError
def AcceptPullData(self, specific, new_entry, verbose):
"""This is the null per-plugin implementation
of bcfg2-admin pull."""
- raise PluginExecutionError
+ raise NotImplementedError
class Decision(object):
@@ -334,13 +389,13 @@ class ValidationError(Exception):
class StructureValidator(object):
"""Validate/modify goal structures."""
def validate_structures(self, metadata, structures):
- raise ValidationError("not implemented")
+ raise NotImplementedError
class GoalValidator(object):
"""Validate/modify configuration goals."""
def validate_goals(self, metadata, goals):
- raise ValidationError("not implemented")
+ raise NotImplementedError
class Version(object):
@@ -352,6 +407,17 @@ class Version(object):
pass
+class ClientRunHooks(object):
+ """ Provides hooks to interact with client runs """
+ def start_client_run(self, metadata):
+ pass
+
+ def end_client_run(self, metadata):
+ pass
+
+ def end_statistics(self, metadata):
+ pass
+
# the rest of the file contains classes for coherent file caching
class FileBacked(object):
@@ -361,17 +427,18 @@ class FileBacked(object):
This object is meant to be used as a part of DirectoryBacked.
"""
- def __init__(self, name):
+ def __init__(self, name, fam=None):
object.__init__(self)
self.data = ''
self.name = name
+ self.fam = fam
def HandleEvent(self, event=None):
"""Read file upon update."""
if event and event.code2str() not in ['exists', 'changed', 'created']:
return
try:
- self.data = BUILTIN_FILE_TYPE(self.name).read()
+ self.data = open(self.name).read()
self.Index()
except IOError:
err = sys.exc_info()[1]
@@ -382,16 +449,14 @@ class FileBacked(object):
pass
def __repr__(self):
- return "%s: %s" % (self.__class__.__name__, str(self))
-
- def __str__(self):
- return "%s: %s" % (self.name, self.data)
+ return "%s: %s" % (self.__class__.__name__, self.name)
class DirectoryBacked(object):
"""This object is a coherent cache for a filesystem hierarchy of files."""
__child__ = FileBacked
patterns = re.compile('.*')
+ ignore = None
def __init__(self, data, fam):
"""Initialize the DirectoryBacked object.
@@ -438,8 +503,8 @@ class DirectoryBacked(object):
"""
dirpathname = os.path.join(self.data, relative)
if relative not in self.handles.values():
- if not posixpath.isdir(dirpathname):
- logger.error("Failed to open directory %s" % (dirpathname))
+ if not os.path.isdir(dirpathname):
+ logger.error("%s is not a directory" % dirpathname)
return
reqid = self.fam.AddMonitor(dirpathname, self)
self.handles[reqid] = relative
@@ -453,7 +518,8 @@ class DirectoryBacked(object):
added.
"""
self.entries[relative] = self.__child__(os.path.join(self.data,
- relative))
+ relative),
+ self.fam)
self.entries[relative].HandleEvent(event)
def HandleEvent(self, event):
@@ -470,27 +536,33 @@ class DirectoryBacked(object):
"""
action = event.code2str()
- # Clean up the absolute path names passed in
- event.filename = os.path.normpath(event.filename)
- if event.filename.startswith(self.data):
- event.filename = event.filename[len(self.data)+1:]
-
# Exclude events for actions we don't care about
if action == 'endExist':
return
if event.requestID not in self.handles:
- logger.warn("Got %s event with unknown handle (%s) for %s"
- % (action, event.requestID, abspath))
+ logger.warn("Got %s event with unknown handle (%s) for %s" %
+ (action, event.requestID, event.filename))
+ return
+
+ # Clean up path names
+ event.filename = os.path.normpath(event.filename)
+ if event.filename.startswith(self.data):
+ # the first event we get is on the data directory itself
+ event.filename = event.filename[len(self.data) + 1:]
+
+ if self.ignore and self.ignore.search(event.filename):
+ logger.debug("Ignoring event %s" % event.filename)
return
# Calculate the absolute and relative paths this event refers to
abspath = os.path.join(self.data, self.handles[event.requestID],
event.filename)
- relpath = os.path.join(self.handles[event.requestID], event.filename)
+ relpath = os.path.join(self.handles[event.requestID],
+ event.filename).lstrip('/')
if action == 'deleted':
- for key in self.entries.keys():
+ for key in list(self.entries.keys()):
if key.startswith(relpath):
del self.entries[key]
# We remove values from self.entries, but not
@@ -498,7 +570,7 @@ class DirectoryBacked(object):
# watching a directory just because it gets deleted. If it
# is recreated, we will start getting notifications for it
# again without having to add a new monitor.
- elif posixpath.isdir(abspath):
+ elif os.path.isdir(abspath):
# Deal with events for directories
if action in ['exists', 'created']:
self.add_directory_monitor(relpath)
@@ -522,21 +594,13 @@ class DirectoryBacked(object):
# didn't know about. Go ahead and treat it like a
# "created" event, but log a warning, because this
# is unexpected.
- logger.warn("Got %s event for unexpected dir %s" % (action,
- abspath))
+ logger.warn("Got %s event for unexpected dir %s" %
+ (action, abspath))
self.add_directory_monitor(relpath)
else:
- logger.warn("Got unknown dir event %s %s %s" % (event.requestID,
- event.code2str(),
- abspath))
- else:
- # Deal with events for non-directories
- if ((event.filename[-1] == '~') or
- (event.filename[:2] == '.#') or
- (event.filename[-4:] == '.swp') or
- (event.filename in ['SCCS', '.svn', '4913']) or
- (not self.patterns.match(event.filename))):
- return
+ logger.warn("Got unknown dir event %s %s %s" %
+ (event.requestID, event.code2str(), abspath))
+ elif self.patterns.search(event.filename):
if action in ['exists', 'created']:
self.add_entry(relpath, event)
elif action == 'changed':
@@ -547,13 +611,16 @@ class DirectoryBacked(object):
# know about. Go ahead and treat it like a
# "created" event, but log a warning, because this
# is unexpected.
- logger.warn("Got %s event for unexpected file %s" % (action,
- abspath))
+ logger.warn("Got %s event for unexpected file %s" %
+ (action,
+ abspath))
self.add_entry(relpath, event)
else:
- logger.warn("Got unknown file event %s %s %s" % (event.requestID,
- event.code2str(),
- abspath))
+ logger.warn("Got unknown file event %s %s %s" %
+ (event.requestID, event.code2str(), abspath))
+ else:
+ logger.warn("Could not process filename %s; ignoring" %
+ event.filename)
class XMLFileBacked(FileBacked):
@@ -563,68 +630,55 @@ class XMLFileBacked(FileBacked):
"""
__identifier__ = 'name'
- def __init__(self, filename):
- self.label = "dummy"
- self.entries = []
+ def __init__(self, filename, fam=None, should_monitor=False):
FileBacked.__init__(self, filename)
-
- def Index(self):
- """Build local data structures."""
- try:
- self.xdata = XML(self.data)
- except XMLSyntaxError:
- logger.error("Failed to parse %s" % (self.name))
- return
- self.entries = self.xdata.getchildren()
- if self.__identifier__ is not None:
- self.label = self.xdata.attrib[self.__identifier__]
-
- def __iter__(self):
- return iter(self.entries)
-
- def __str__(self):
- return "%s: %s" % (self.name, lxml.etree.tostring(self.xdata))
-
-
-class SingleXMLFileBacked(XMLFileBacked):
- """This object is a coherent cache for an independent XML file."""
- def __init__(self, filename, fam):
- XMLFileBacked.__init__(self, filename)
+ self.label = ""
+ self.entries = []
self.extras = []
self.fam = fam
- self.fam.AddMonitor(filename, self)
+ self.should_monitor = should_monitor
+ if fam and should_monitor:
+ self.fam.AddMonitor(filename, self)
def _follow_xincludes(self, fname=None, xdata=None):
- ''' follow xincludes, adding included files to fam and to
- self.extras '''
+ ''' follow xincludes, adding included files to self.extras '''
if xdata is None:
if fname is None:
xdata = self.xdata.getroottree()
else:
xdata = lxml.etree.parse(fname)
- included = [ent.get('href')
- for ent in xdata.findall('//{http://www.w3.org/2001/XInclude}include')]
- for name in included:
- if name not in self.extras:
- if name.startswith("/"):
- fpath = name
+ included = [el for el in xdata.findall('//%sinclude' %
+ Bcfg2.Server.XI_NAMESPACE)]
+ for el in included:
+ name = el.get("href")
+ if name.startswith("/"):
+ fpath = name
+ else:
+ if fname:
+ rel = fname
else:
- fpath = os.path.join(os.path.dirname(self.name), name)
- self.add_monitor(fpath, name)
- self._follow_xincludes(fname=fpath)
-
- def add_monitor(self, fpath, fname):
- self.fam.AddMonitor(fpath, self)
- self.extras.append(fname)
+ rel = self.name
+ fpath = os.path.join(os.path.dirname(rel), name)
+ if fpath not in self.extras:
+ if os.path.exists(fpath):
+ self._follow_xincludes(fname=fpath)
+ self.add_monitor(fpath)
+ else:
+ msg = "%s: %s does not exist, skipping" % (self.name, name)
+ if el.findall('./%sfallback' % Bcfg2.Server.XI_NAMESPACE):
+ self.logger.debug(msg)
+ else:
+ self.logger.warning(msg)
def Index(self):
"""Build local data structures."""
try:
- self.xdata = lxml.etree.XML(self.data, base_url=self.name)
+ self.xdata = lxml.etree.XML(self.data, base_url=self.name,
+ parser=Bcfg2.Server.XMLParser)
except lxml.etree.XMLSyntaxError:
- err = sys.exc_info()[1]
- logger.error("Failed to parse %s: %s" % (self.name, err))
- raise Bcfg2.Server.Plugin.PluginInitError
+ msg = "Failed to parse %s: %s" % (self.name, sys.exc_info()[1])
+ logger.error(msg)
+ raise PluginInitError(msg)
self._follow_xincludes()
if self.extras:
@@ -638,43 +692,52 @@ class SingleXMLFileBacked(XMLFileBacked):
if self.__identifier__ is not None:
self.label = self.xdata.attrib[self.__identifier__]
+ def add_monitor(self, fpath):
+ self.extras.append(fpath)
+ if self.fam and self.should_monitor:
+ self.fam.AddMonitor(fpath, self)
+
+ def __iter__(self):
+ return iter(self.entries)
+
+ def __str__(self):
+ return "%s at %s" % (self.__class__.__name__, self.name)
+
class StructFile(XMLFileBacked):
"""This file contains a set of structure file formatting logic."""
__identifier__ = None
- def __init__(self, name):
- XMLFileBacked.__init__(self, name)
+ def _include_element(self, item, metadata):
+ """ determine if an XML element matches the metadata """
+ if isinstance(item, lxml.etree._Comment):
+ return False
+ negate = item.get('negate', 'false').lower() == 'true'
+ if item.tag == 'Group':
+ return negate == (item.get('name') not in metadata.groups)
+ elif item.tag == 'Client':
+ return negate == (item.get('name') != metadata.hostname)
+ else:
+ return True
def _match(self, item, metadata):
""" recursive helper for Match() """
- if isinstance(item, lxml.etree._Comment):
- return []
- elif item.tag == 'Group':
- rv = []
- if ((item.get('negate', 'false').lower() == 'true' and
- item.get('name') not in metadata.groups) or
- (item.get('negate', 'false').lower() == 'false' and
- item.get('name') in metadata.groups)):
- for child in item.iterchildren():
- rv.extend(self._match(child, metadata))
- return rv
- elif item.tag == 'Client':
- rv = []
- if ((item.get('negate', 'false').lower() == 'true' and
- item.get('name') != metadata.hostname) or
- (item.get('negate', 'false').lower() == 'false' and
- item.get('name') == metadata.hostname)):
+ if self._include_element(item, metadata):
+ if item.tag == 'Group' or item.tag == 'Client':
+ rv = []
+ if self._include_element(item, metadata):
+ for child in item.iterchildren():
+ rv.extend(self._match(child, metadata))
+ return rv
+ else:
+ rv = copy.deepcopy(item)
+ for child in rv.iterchildren():
+ rv.remove(child)
for child in item.iterchildren():
rv.extend(self._match(child, metadata))
- return rv
+ return [rv]
else:
- rv = copy.copy(item)
- for child in rv.iterchildren():
- rv.remove(child)
- for child in item.iterchildren():
- rv.extend(self._match(child, metadata))
- return [rv]
+ return []
def Match(self, metadata):
"""Return matching fragments of independent."""
@@ -683,27 +746,52 @@ class StructFile(XMLFileBacked):
rv.extend(self._match(child, metadata))
return rv
+ def _xml_match(self, item, metadata):
+ """ recursive helper for XMLMatch """
+ if self._include_element(item, metadata):
+ if item.tag == 'Group' or item.tag == 'Client':
+ for child in item.iterchildren():
+ item.remove(child)
+ item.getparent().append(child)
+ self._xml_match(child, metadata)
+ item.getparent().remove(item)
+ else:
+ for child in item.iterchildren():
+ self._xml_match(child, metadata)
+ else:
+ item.getparent().remove(item)
+
+ def XMLMatch(self, metadata):
+ """ Return a rebuilt XML document that only contains the
+ matching portions """
+ rv = copy.deepcopy(self.xdata)
+ for child in rv.iterchildren():
+ self._xml_match(child, metadata)
+ return rv
+
-class INode:
+class INode(object):
"""
LNodes provide lists of things available at a particular
group intersection.
"""
- raw = {'Client': "lambda m, e:'%(name)s' == m.hostname and predicate(m, e)",
- 'Group': "lambda m, e:'%(name)s' in m.groups and predicate(m, e)"}
- nraw = {'Client': "lambda m, e:'%(name)s' != m.hostname and predicate(m, e)",
- 'Group': "lambda m, e:'%(name)s' not in m.groups and predicate(m, e)"}
+ raw = dict(
+ Client="lambda m, e:'%(name)s' == m.hostname and predicate(m, e)",
+ Group="lambda m, e:'%(name)s' in m.groups and predicate(m, e)")
+ nraw = dict(
+ Client="lambda m, e:'%(name)s' != m.hostname and predicate(m, e)",
+ Group="lambda m, e:'%(name)s' not in m.groups and predicate(m, e)")
containers = ['Group', 'Client']
ignore = []
def __init__(self, data, idict, parent=None):
self.data = data
self.contents = {}
- if parent == None:
- self.predicate = lambda m, d: True
+ if parent is None:
+ self.predicate = lambda m, e: True
else:
predicate = parent.predicate
- if data.get('negate', 'false') in ['true', 'True']:
+ if data.get('negate', 'false').lower() == 'true':
psrc = self.nraw
else:
psrc = self.raw
@@ -712,21 +800,29 @@ class INode:
{'name': data.get('name')},
{'predicate': predicate})
else:
- raise Exception
- mytype = self.__class__
+ raise PluginExecutionError("Unknown tag: %s" % data.tag)
self.children = []
+ self._load_children(data, idict)
+
+ def _load_children(self, data, idict):
for item in data.getchildren():
if item.tag in self.ignore:
continue
elif item.tag in self.containers:
- self.children.append(mytype(item, idict, self))
+ self.children.append(self.__class__(item, idict, self))
else:
try:
- self.contents[item.tag][item.get('name')] = item.attrib
+ self.contents[item.tag][item.get('name')] = \
+ dict(item.attrib)
except KeyError:
- self.contents[item.tag] = {item.get('name'): item.attrib}
+ self.contents[item.tag] = \
+ {item.get('name'): dict(item.attrib)}
if item.text:
- self.contents[item.tag]['__text__'] = item.text
+ self.contents[item.tag][item.get('name')]['__text__'] = \
+ item.text
+ if item.getchildren():
+ self.contents[item.tag][item.get('name')]['__children__'] =\
+ item.getchildren()
try:
idict[item.tag].append(item.get('name'))
except KeyError:
@@ -760,43 +856,48 @@ class XMLSrc(XMLFileBacked):
"""XMLSrc files contain a LNode hierarchy that returns matching entries."""
__node__ = INode
__cacheobj__ = dict
+ __priority_required__ = True
- def __init__(self, filename, noprio=False):
- XMLFileBacked.__init__(self, filename)
+ def __init__(self, filename, fam=None, should_monitor=False):
+ XMLFileBacked.__init__(self, filename, fam, should_monitor)
self.items = {}
self.cache = None
self.pnode = None
self.priority = -1
- self.noprio = noprio
def HandleEvent(self, _=None):
"""Read file upon update."""
try:
- data = BUILTIN_FILE_TYPE(self.name).read()
+ data = open(self.name).read()
except IOError:
- logger.error("Failed to read file %s" % (self.name))
- return
+ msg = "Failed to read file %s: %s" % (self.name, sys.exc_info()[1])
+ logger.error(msg)
+ raise PluginExecutionError(msg)
self.items = {}
try:
- xdata = lxml.etree.XML(data)
+ xdata = lxml.etree.XML(data, parser=Bcfg2.Server.XMLParser)
except lxml.etree.XMLSyntaxError:
- logger.error("Failed to parse file %s" % (self.name))
- return
+ msg = "Failed to parse file %s" % (self.name, sys.exc_info()[1])
+ logger.error(msg)
+ raise PluginExecutionError(msg)
self.pnode = self.__node__(xdata, self.items)
self.cache = None
try:
self.priority = int(xdata.get('priority'))
except (ValueError, TypeError):
- if not self.noprio:
- logger.error("Got bogus priority %s for file %s" %
- (xdata.get('priority'), self.name))
+ if self.__priority_required__:
+ msg = "Got bogus priority %s for file %s" % \
+ (xdata.get('priority'), self.name)
+ logger.error(msg)
+ raise PluginExecutionError(msg)
+
del xdata, data
def Cache(self, metadata):
"""Build a package dict for a given host."""
- if self.cache == None or self.cache[0] != metadata:
+ if self.cache is None or self.cache[0] != metadata:
cache = (metadata, self.__cacheobj__())
- if self.pnode == None:
+ if self.pnode is None:
logger.error("Cache method called early for %s; forcing data load" % (self.name))
self.HandleEvent()
return
@@ -809,11 +910,13 @@ class XMLSrc(XMLFileBacked):
class InfoXML(XMLSrc):
__node__ = InfoNode
+ __priority_required__ = False
class XMLDirectoryBacked(DirectoryBacked):
"""Directorybacked for *.xml."""
- patterns = re.compile('.*\.xml')
+ patterns = re.compile('^.*\.xml$')
+ __child__ = XMLFileBacked
class PrioDir(Plugin, Generator, XMLDirectoryBacked):
@@ -824,11 +927,7 @@ class PrioDir(Plugin, Generator, XMLDirectoryBacked):
def __init__(self, core, datastore):
Plugin.__init__(self, core, datastore)
Generator.__init__(self)
- try:
- XMLDirectoryBacked.__init__(self, self.data, self.core.fam)
- except OSError:
- self.logger.error("Failed to load %s indices" % (self.name))
- raise PluginInitError
+ XMLDirectoryBacked.__init__(self, self.data, self.core.fam)
def HandleEvent(self, event):
"""Handle events and update dispatch table."""
@@ -867,19 +966,22 @@ class PrioDir(Plugin, Generator, XMLDirectoryBacked):
else:
prio = [int(src.priority) for src in matching]
if prio.count(max(prio)) > 1:
- self.logger.error("Found conflicting sources with "
- "same priority for %s, %s %s" %
- (metadata.hostname,
- entry.tag.lower(), entry.get('name')))
+ msg = "Found conflicting sources with same priority for " + \
+ "%s:%s for %s" % (entry.tag, entry.get("name"),
+ metadata.hostname)
+ self.logger.error(msg)
self.logger.error([item.name for item in matching])
self.logger.error("Priority was %s" % max(prio))
- raise PluginExecutionError
+ raise PluginExecutionError(msg)
index = prio.index(max(prio))
for rname in list(matching[index].cache[1][entry.tag].keys()):
if self._matches(entry, metadata, [rname]):
data = matching[index].cache[1][entry.tag][rname]
break
+ else:
+ # Fall back on __getitem__. Required if override used
+ data = matching[index].cache[1][entry.tag][entry.get('name')]
if '__text__' in data:
entry.text = data['__text__']
if '__children__' in data:
@@ -896,18 +998,16 @@ class SpecificityError(Exception):
pass
-class Specificity:
-
- def __init__(self, all=False, group=False, hostname=False, prio=0, delta=False):
+class Specificity(CmpMixin):
+ def __init__(self, all=False, group=False, hostname=False, prio=0,
+ delta=False):
+ CmpMixin.__init__(self)
self.hostname = hostname
self.all = all
self.group = group
self.prio = prio
self.delta = delta
- def __lt__(self, other):
- return self.__cmp__(other) < 0
-
def matches(self, metadata):
return self.all or \
self.hostname == metadata.hostname or \
@@ -916,26 +1016,36 @@ class Specificity:
def __cmp__(self, other):
"""Sort most to least specific."""
if self.all:
- return 1
- if self.group:
+ if other.all:
+ return 0
+ else:
+ return 1
+ elif other.all:
+ return -1
+ elif self.group:
if other.hostname:
return 1
if other.group and other.prio > self.prio:
return 1
if other.group and other.prio == self.prio:
return 0
+ elif other.group:
+ return -1
+ elif self.hostname and other.hostname:
+ return 0
return -1
- def more_specific(self, other):
- """Test if self is more specific than other."""
+ def __str__(self):
+ rv = [self.__class__.__name__, ': ']
if self.all:
- True
+ rv.append("all")
elif self.group:
- if other.hostname:
- return True
- elif other.group and other.prio > self.prio:
- return True
- return False
+ rv.append("Group %s, priority %s" % (self.group, self.prio))
+ elif self.hostname:
+ rv.append("Host %s" % self.hostname)
+ if self.delta:
+ rv.append(", delta=%s" % self.delta)
+ return "".join(rv)
class SpecificData(object):
@@ -957,6 +1067,7 @@ class SpecificData(object):
class EntrySet(Debuggable):
"""Entry sets deal with the host- and group-specific entries."""
ignore = re.compile("^(\.#.*|.*~|\\..*\\.(sw[px])|.*\\.genshi_include)$")
+ basename_is_regex=False
def __init__(self, basename, path, entry_type, encoding):
Debuggable.__init__(self, name=basename)
@@ -966,14 +1077,15 @@ class EntrySet(Debuggable):
self.metadata = default_file_metadata.copy()
self.infoxml = None
self.encoding = encoding
- pattern = '(.*/)?%s(\.((H_(?P<hostname>\S+))|' % basename
+
+ if self.basename_is_regex:
+ base_pat = basename
+ else:
+ base_pat = re.escape(basename)
+ pattern = '(.*/)?%s(\.((H_(?P<hostname>\S+))|' % base_pat
pattern += '(G(?P<prio>\d+)_(?P<group>\S+))))?$'
self.specific = re.compile(pattern)
- def debug_log(self, message, flag=None):
- if (flag is None and self.debug_flag) or flag:
- logger.error(message)
-
def sort_by_specific(self, one, other):
return cmp(one.specific, other.specific)
@@ -987,20 +1099,13 @@ class EntrySet(Debuggable):
if matching is None:
matching = self.get_matching(metadata)
- hspec = [ent for ent in matching if ent.specific.hostname]
- if hspec:
- return hspec[0]
-
- gspec = [ent for ent in matching if ent.specific.group]
- if gspec:
- gspec.sort(self.group_sortfunc)
- return gspec[-1]
-
- aspec = [ent for ent in matching if ent.specific.all]
- if aspec:
- return aspec[0]
-
- raise PluginExecutionError
+ if matching:
+ matching.sort(key=operator.attrgetter("specific"))
+ return matching[0]
+ else:
+ raise PluginExecutionError("No matching entries available for %s "
+ "for %s" % (self.path,
+ metadata.hostname))
def handle_event(self, event):
"""Handle FAM events for the TemplateSet."""
@@ -1074,7 +1179,7 @@ class EntrySet(Debuggable):
fpath = os.path.join(self.path, event.filename)
if event.filename == 'info.xml':
if not self.infoxml:
- self.infoxml = InfoXML(fpath, True)
+ self.infoxml = InfoXML(fpath)
self.infoxml.HandleEvent(event)
elif event.filename in [':info', 'info']:
for line in open(fpath).readlines():
@@ -1089,8 +1194,7 @@ class EntrySet(Debuggable):
if value:
self.metadata[key] = value
if len(self.metadata['perms']) == 3:
- self.metadata['perms'] = "0%s" % \
- (self.metadata['perms'])
+ self.metadata['perms'] = "0%s" % self.metadata['perms']
def reset_metadata(self, event):
"""Reset metadata to defaults if info or info.xml removed."""
@@ -1099,26 +1203,12 @@ class EntrySet(Debuggable):
elif event.filename in [':info', 'info']:
self.metadata = default_file_metadata.copy()
- def group_sortfunc(self, x, y):
- """sort groups by their priority"""
- return cmp(x.specific.prio, y.specific.prio)
-
def bind_info_to_entry(self, entry, metadata):
- # first set defaults from global metadata/:info
- for key in self.metadata:
- entry.set(key, self.metadata[key])
- if self.infoxml:
- mdata = {}
- self.infoxml.pnode.Match(metadata, mdata, entry=entry)
- if 'Info' not in mdata:
- logger.error("Failed to set metadata for file %s" % \
- (entry.get('name')))
- raise PluginExecutionError
- [entry.attrib.__setitem__(key, value) \
- for (key, value) in list(mdata['Info'][None].items())]
+ bind_info(entry, metadata, infoxml=self.infoxml, default=self.metadata)
def bind_entry(self, entry, metadata):
- """Return the appropriate interpreted template from the set of available templates."""
+ """Return the appropriate interpreted template from the set of
+ available templates."""
self.bind_info_to_entry(entry, metadata)
return self.best_matching(metadata).bind_entry(entry, metadata)
@@ -1130,13 +1220,14 @@ class GroupSpool(Plugin, Generator):
filename_pattern = ""
es_child_cls = object
es_cls = EntrySet
+ entry_type = 'Path'
def __init__(self, core, datastore):
Plugin.__init__(self, core, datastore)
Generator.__init__(self)
if self.data[-1] == '/':
self.data = self.data[:-1]
- self.Entries['Path'] = {}
+ self.Entries[self.entry_type] = {}
self.entries = {}
self.handles = {}
self.AddDirectoryMonitor('')
@@ -1145,29 +1236,38 @@ class GroupSpool(Plugin, Generator):
def add_entry(self, event):
epath = self.event_path(event)
ident = self.event_id(event)
- if posixpath.isdir(epath):
+ if os.path.isdir(epath):
self.AddDirectoryMonitor(epath[len(self.data):])
- if ident not in self.entries and posixpath.isfile(epath):
- dirpath = "".join([self.data, ident])
+ if ident not in self.entries and os.path.isfile(epath):
+ dirpath = self.data + ident
self.entries[ident] = self.es_cls(self.filename_pattern,
dirpath,
self.es_child_cls,
self.encoding)
- self.Entries['Path'][ident] = self.entries[ident].bind_entry
- if not posixpath.isdir(epath):
+ self.Entries[self.entry_type][ident] = \
+ self.entries[ident].bind_entry
+ if not os.path.isdir(epath):
# do not pass through directory events
self.entries[ident].handle_event(event)
def event_path(self, event):
- return "".join([self.data, self.handles[event.requestID],
- event.filename])
+ return os.path.join(self.data,
+ self.handles[event.requestID].lstrip("/"),
+ event.filename)
def event_id(self, event):
epath = self.event_path(event)
- if posixpath.isdir(epath):
- return self.handles[event.requestID] + event.filename
+ if os.path.isdir(epath):
+ return os.path.join(self.handles[event.requestID].lstrip("/"),
+ event.filename)
else:
- return self.handles[event.requestID][:-1]
+ return self.handles[event.requestID].rstrip("/")
+
+ def toggle_debug(self):
+ for entry in self.entries.values():
+ if hasattr(entry, "toggle_debug"):
+ entry.toggle_debug()
+ return Plugin.toggle_debug(self)
def HandleEvent(self, event):
"""Unified FAM event handler for GroupSpool."""
@@ -1178,7 +1278,7 @@ class GroupSpool(Plugin, Generator):
if action in ['exists', 'created']:
self.add_entry(event)
- if action == 'changed':
+ elif action == 'changed':
if ident in self.entries:
self.entries[ident].handle_event(event)
else:
@@ -1193,7 +1293,7 @@ class GroupSpool(Plugin, Generator):
if fbase in self.entries:
# a directory was deleted
del self.entries[fbase]
- del self.Entries['Path'][fbase]
+ del self.Entries[self.entry_type][fbase]
elif ident in self.entries:
self.entries[ident].handle_event(event)
elif ident not in self.entries:
@@ -1206,8 +1306,8 @@ class GroupSpool(Plugin, Generator):
relative += '/'
name = self.data + relative
if relative not in list(self.handles.values()):
- if not posixpath.isdir(name):
- print("Failed to open directory %s" % (name))
+ if not os.path.isdir(name):
+ self.logger.error("Failed to open directory %s" % name)
return
reqid = self.core.fam.AddMonitor(name, self)
self.handles[reqid] = relative
diff --git a/src/lib/Bcfg2/Server/Plugins/BB.py b/src/lib/Bcfg2/Server/Plugins/BB.py
deleted file mode 100644
index c015ec47c..000000000
--- a/src/lib/Bcfg2/Server/Plugins/BB.py
+++ /dev/null
@@ -1,83 +0,0 @@
-import lxml.etree
-import Bcfg2.Server.Plugin
-import glob
-import os
-import socket
-
-#manage boot symlinks
- #add statistics check to do build->boot mods
-
-#map profiles: first array is not empty we replace the -p with a determined profile.
-logger = Bcfg2.Server.Plugin.logger
-
-class BBfile(Bcfg2.Server.Plugin.XMLFileBacked):
- """Class for bb files."""
- def Index(self):
- """Build data into an xml object."""
-
- try:
- self.data = lxml.etree.XML(self.data)
- except lxml.etree.XMLSyntaxError:
- Bcfg2.Server.Plugin.logger.error("Failed to parse %s" % self.name)
- return
- self.tftppath = self.data.get('tftp', '/tftpboot')
- self.macs = {}
- self.users = {}
- self.actions = {}
- self.bootlinks = []
-
- for node in self.data.findall('Node'):
- iface = node.find('Interface')
- if iface != None:
- mac = "01-%s" % (iface.get('mac'.replace(':','-').lower()))
- self.actions[node.get('name')] = node.get('action')
- self.bootlinks.append((mac, node.get('action')))
- try:
- ip = socket.gethostbyname(node.get('name'))
- except:
- logger.error("failed host resolution for %s" % node.get('name'))
-
- self.macs[node.get('name')] = (iface.get('mac'), ip)
- else:
- logger.error("%s" % lxml.etree.tostring(node))
- self.users[node.get('name')] = node.get('user',"").split(':')
-
- def enforce_bootlinks(self):
- for mac, target in self.bootlinks:
- path = self.tftppath + '/' + mac
- if not os.path.islink(path):
- logger.error("Boot file %s not a link" % path)
- if target != os.readlink(path):
- try:
- os.unlink(path)
- os.symlink(target, path)
- except:
- logger.error("Failed to modify link %s" % path)
-
-class BBDirectoryBacked(Bcfg2.Server.Plugin.DirectoryBacked):
- __child__ = BBfile
-
-
-class BB(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Connector):
- """The BB plugin maps users to machines and metadata to machines."""
- name = 'BB'
- deprecated = True
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Connector.__init__(self)
- self.store = BBDirectoryBacked(self.data, core.fam)
-
- def get_additional_data(self, metadata):
-
- users = {}
- for user in self.store.entries['bb.xml'].users.get(metadata.hostname.split(".")[0], []):
- pubkeys = []
- for fname in glob.glob('/home/%s/.ssh/*.pub'%user):
- pubkeys.append(open(fname).read())
-
- users[user] = pubkeys
-
- return dict([('users', users),
- ('macs', self.store.entries['bb.xml'].macs)])
diff --git a/src/lib/Bcfg2/Server/Plugins/Base.py b/src/lib/Bcfg2/Server/Plugins/Base.py
index 389ca7a95..2020f7795 100644
--- a/src/lib/Bcfg2/Server/Plugins/Base.py
+++ b/src/lib/Bcfg2/Server/Plugins/Base.py
@@ -3,10 +3,7 @@
import copy
import lxml.etree
import sys
-# py3k compatibility
-if sys.hexversion >= 0x03000000:
- from functools import reduce
-
+from Bcfg2.Bcfg2Py3k import reduce
import Bcfg2.Server.Plugin
diff --git a/src/lib/Bcfg2/Server/Plugins/Bundler.py b/src/lib/Bcfg2/Server/Plugins/Bundler.py
index ccb99481e..65914c371 100644
--- a/src/lib/Bcfg2/Server/Plugins/Bundler.py
+++ b/src/lib/Bcfg2/Server/Plugins/Bundler.py
@@ -1,25 +1,25 @@
"""This provides bundle clauses with translation functionality."""
import copy
+import logging
import lxml.etree
import os
import os.path
import re
import sys
-
+import Bcfg2.Server
import Bcfg2.Server.Plugin
+import Bcfg2.Server.Lint
try:
- import genshi.template
import genshi.template.base
- import Bcfg2.Server.Plugins.SGenshi
+ import Bcfg2.Server.Plugins.TGenshi
have_genshi = True
except:
have_genshi = False
class BundleFile(Bcfg2.Server.Plugin.StructFile):
-
def get_xml_value(self, metadata):
bundlename = os.path.splitext(os.path.basename(self.name))[0]
bundle = lxml.etree.Element('Bundle', name=bundlename)
@@ -27,6 +27,58 @@ class BundleFile(Bcfg2.Server.Plugin.StructFile):
return bundle
+if have_genshi:
+ class BundleTemplateFile(Bcfg2.Server.Plugins.TGenshi.TemplateFile,
+ Bcfg2.Server.Plugin.StructFile):
+ def __init__(self, name, specific, encoding):
+ Bcfg2.Server.Plugins.TGenshi.TemplateFile.__init__(self, name,
+ specific,
+ encoding)
+ Bcfg2.Server.Plugin.StructFile.__init__(self, name)
+ self.logger = logging.getLogger(name)
+
+ def get_xml_value(self, metadata):
+ if not hasattr(self, 'template'):
+ self.logger.error("No parsed template information for %s" %
+ self.name)
+ raise Bcfg2.Server.Plugin.PluginExecutionError
+ try:
+ stream = self.template.generate(metadata=metadata).filter(
+ Bcfg2.Server.Plugins.TGenshi.removecomment)
+ data = lxml.etree.XML(stream.render('xml',
+ strip_whitespace=False),
+ parser=Bcfg2.Server.XMLParser)
+ bundlename = os.path.splitext(os.path.basename(self.name))[0]
+ bundle = lxml.etree.Element('Bundle', name=bundlename)
+ for item in self.Match(metadata, data):
+ bundle.append(copy.deepcopy(item))
+ return bundle
+ except LookupError:
+ lerror = sys.exc_info()[1]
+ self.logger.error('Genshi lookup error: %s' % lerror)
+ except genshi.template.TemplateError:
+ terror = sys.exc_info()[1]
+ self.logger.error('Genshi template error: %s' % terror)
+ raise
+ except genshi.input.ParseError:
+ perror = sys.exc_info()[1]
+ self.logger.error('Genshi parse error: %s' % perror)
+ raise
+
+ def Match(self, metadata, xdata):
+ """Return matching fragments of parsed template."""
+ rv = []
+ for child in xdata.getchildren():
+ rv.extend(self._match(child, metadata))
+ self.logger.debug("File %s got %d match(es)" % (self.name, len(rv)))
+ return rv
+
+
+ class SGenshiTemplateFile(BundleTemplateFile):
+ # provided for backwards compat
+ pass
+
+
class Bundler(Bcfg2.Server.Plugin.Plugin,
Bcfg2.Server.Plugin.Structure,
Bcfg2.Server.Plugin.XMLDirectoryBacked):
@@ -50,25 +102,20 @@ class Bundler(Bcfg2.Server.Plugin.Plugin,
self.logger.error("Failed to load Bundle repository")
raise Bcfg2.Server.Plugin.PluginInitError
- def template_dispatch(self, name):
- bundle = lxml.etree.parse(name)
+ def template_dispatch(self, name, _):
+ bundle = lxml.etree.parse(name,
+ parser=Bcfg2.Server.XMLParser)
nsmap = bundle.getroot().nsmap
- if name.endswith('.xml'):
- if have_genshi and \
- (nsmap == {'py': 'http://genshi.edgewall.org/'}):
- # allow for genshi bundles with .xml extensions
- spec = Bcfg2.Server.Plugin.Specificity()
- return Bcfg2.Server.Plugins.SGenshi.SGenshiTemplateFile(name,
- spec,
- self.encoding)
- else:
- return BundleFile(name)
- elif name.endswith('.genshi'):
+ if (name.endswith('.genshi') or
+ ('py' in nsmap and
+ nsmap['py'] == 'http://genshi.edgewall.org/')):
if have_genshi:
spec = Bcfg2.Server.Plugin.Specificity()
- return Bcfg2.Server.Plugins.SGenshi.SGenshiTemplateFile(name,
- spec,
- self.encoding)
+ return BundleTemplateFile(name, spec, self.encoding)
+ else:
+ raise Bcfg2.Server.Plugin.PluginExecutionError("Genshi not available: %s" % name)
+ else:
+ return BundleFile(name, self.fam)
def BuildStructures(self, metadata):
"""Build all structures for client (metadata)."""
@@ -97,3 +144,54 @@ class Bundler(Bcfg2.Server.Plugin.Plugin,
self.logger.error("Bundler: Unexpected bundler error for %s" %
bundlename, exc_info=1)
return bundleset
+
+
+class BundlerLint(Bcfg2.Server.Lint.ServerPlugin):
+ """ Perform various bundle checks """
+ def Run(self):
+ """ run plugin """
+ self.missing_bundles()
+ for bundle in self.core.plugins['Bundler'].entries.values():
+ if (self.HandlesFile(bundle.name) and
+ (not have_genshi or
+ not isinstance(bundle, BundleTemplateFile))):
+ self.bundle_names(bundle)
+
+ @classmethod
+ def Errors(cls):
+ return {"bundle-not-found":"error",
+ "inconsistent-bundle-name":"warning"}
+
+ def missing_bundles(self):
+ """ find bundles listed in Metadata but not implemented in Bundler """
+ if self.files is None:
+ # when given a list of files on stdin, this check is
+ # useless, so skip it
+ groupdata = self.metadata.groups_xml.xdata
+ ref_bundles = set([b.get("name")
+ for b in groupdata.findall("//Bundle")])
+
+ allbundles = self.core.plugins['Bundler'].entries.keys()
+ for bundle in ref_bundles:
+ xmlbundle = "%s.xml" % bundle
+ genshibundle = "%s.genshi" % bundle
+ if (xmlbundle not in allbundles and
+ genshibundle not in allbundles):
+ self.LintError("bundle-not-found",
+ "Bundle %s referenced, but does not exist" %
+ bundle)
+
+ def bundle_names(self, bundle):
+ """ verify bundle name attribute matches filename """
+ try:
+ xdata = lxml.etree.XML(bundle.data)
+ except AttributeError:
+ # genshi template
+ xdata = lxml.etree.parse(bundle.template.filepath).getroot()
+
+ fname = bundle.name.split('Bundler/')[1].split('.')[0]
+ bname = xdata.get('name')
+ if fname != bname:
+ self.LintError("inconsistent-bundle-name",
+ "Inconsistent bundle name: filename is %s, "
+ "bundle name is %s" % (fname, bname))
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgCheetahGenerator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgCheetahGenerator.py
index 3edd1d8cb..f02461673 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgCheetahGenerator.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgCheetahGenerator.py
@@ -6,8 +6,7 @@ from Bcfg2.Server.Plugins.Cfg import CfgGenerator
logger = logging.getLogger(__name__)
try:
- import Cheetah.Template
- import Cheetah.Parser
+ from Cheetah.Template import Template
have_cheetah = True
except ImportError:
have_cheetah = False
@@ -25,9 +24,9 @@ class CfgCheetahGenerator(CfgGenerator):
raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
def get_data(self, entry, metadata):
- template = Cheetah.Template.Template(self.data,
- compilerSettings=self.settings)
+ template = Template(self.data.decode(self.encoding),
+ compilerSettings=self.settings)
template.metadata = metadata
template.path = entry.get('realname', entry.get('name'))
- template.source_path = self.path
+ template.source_path = self.name
return template.respond()
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedCheetahGenerator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedCheetahGenerator.py
new file mode 100644
index 000000000..a75329d2a
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedCheetahGenerator.py
@@ -0,0 +1,14 @@
+import logging
+from Bcfg2.Server.Plugins.Cfg.CfgCheetahGenerator import CfgCheetahGenerator
+from Bcfg2.Server.Plugins.Cfg.CfgEncryptedGenerator import CfgEncryptedGenerator
+
+logger = logging.getLogger(__name__)
+
+class CfgEncryptedCheetahGenerator(CfgCheetahGenerator, CfgEncryptedGenerator):
+ __extensions__ = ['cheetah.crypt', 'crypt.cheetah']
+
+ def handle_event(self, event):
+ CfgEncryptedGenerator.handle_event(self, event)
+
+ def get_data(self, entry, metadata):
+ return CfgCheetahGenerator.get_data(self, entry, metadata)
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenerator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenerator.py
new file mode 100644
index 000000000..2c926fae7
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenerator.py
@@ -0,0 +1,63 @@
+import logging
+import Bcfg2.Server.Plugin
+from Bcfg2.Server.Plugins.Cfg import CfgGenerator, SETUP
+try:
+ from Bcfg2.Encryption import ssl_decrypt, EVPError
+ have_crypto = True
+except ImportError:
+ have_crypto = False
+
+logger = logging.getLogger(__name__)
+
+def passphrases():
+ section = "encryption"
+ if SETUP.cfp.has_section(section):
+ return dict([(o, SETUP.cfp.get(section, o))
+ for o in SETUP.cfp.options(section)])
+ else:
+ return dict()
+
+def decrypt(crypted):
+ if not have_crypto:
+ msg = "Cfg: M2Crypto is not available: %s" % entry.get("name")
+ logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+ for passwd in passphrases().values():
+ try:
+ return ssl_decrypt(crypted, passwd)
+ except EVPError:
+ pass
+ raise EVPError("Failed to decrypt")
+
+class CfgEncryptedGenerator(CfgGenerator):
+ __extensions__ = ["crypt"]
+
+ def __init__(self, fname, spec, encoding):
+ CfgGenerator.__init__(self, fname, spec, encoding)
+ if not have_crypto:
+ msg = "Cfg: M2Crypto is not available: %s" % entry.get("name")
+ logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+
+ def handle_event(self, event):
+ if event.code2str() == 'deleted':
+ return
+ try:
+ crypted = open(self.name).read()
+ except UnicodeDecodeError:
+ crypted = open(self.name, mode='rb').read()
+ except:
+ logger.error("Failed to read %s" % self.name)
+ return
+ # todo: let the user specify a passphrase by name
+ try:
+ self.data = decrypt(crypted)
+ except EVPError:
+ msg = "Failed to decrypt %s" % self.name
+ logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+
+ def get_data(self, entry, metadata):
+ if self.data is None:
+ raise Bcfg2.Server.Plugin.PluginExecutionError("Failed to decrypt %s" % self.name)
+ return CfgGenerator.get_data(self, entry, metadata)
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenshiGenerator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenshiGenerator.py
new file mode 100644
index 000000000..6605cca7c
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgEncryptedGenshiGenerator.py
@@ -0,0 +1,26 @@
+import logging
+from Bcfg2.Bcfg2Py3k import StringIO
+from Bcfg2.Server.Plugins.Cfg.CfgGenshiGenerator import CfgGenshiGenerator
+from Bcfg2.Server.Plugins.Cfg.CfgEncryptedGenerator import decrypt, \
+ CfgEncryptedGenerator
+
+logger = logging.getLogger(__name__)
+
+try:
+ from genshi.template import TemplateLoader
+except ImportError:
+ # CfgGenshiGenerator will raise errors if genshi doesn't exist
+ TemplateLoader = object
+
+
+class EncryptedTemplateLoader(TemplateLoader):
+ def _instantiate(self, cls, fileobj, filepath, filename, encoding=None):
+ plaintext = StringIO(decrypt(fileobj.read()))
+ return TemplateLoader._instantiate(self, cls, plaintext, filepath,
+ filename, encoding=encoding)
+
+
+class CfgEncryptedGenshiGenerator(CfgGenshiGenerator):
+ __extensions__ = ['genshi.crypt', 'crypt.genshi']
+ __loader_cls__ = EncryptedTemplateLoader
+
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgGenshiGenerator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgGenshiGenerator.py
index 2c0a076d7..277a26f97 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgGenshiGenerator.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgGenshiGenerator.py
@@ -1,5 +1,7 @@
+import re
import sys
import logging
+import traceback
import Bcfg2.Server.Plugin
from Bcfg2.Server.Plugins.Cfg import CfgGenerator
@@ -8,8 +10,10 @@ logger = logging.getLogger(__name__)
try:
import genshi.core
from genshi.template import TemplateLoader, NewTextTemplate
+ from genshi.template.eval import UndefinedError
have_genshi = True
except ImportError:
+ TemplateLoader = None
have_genshi = False
# snipped from TGenshi
@@ -23,14 +27,17 @@ def removecomment(stream):
class CfgGenshiGenerator(CfgGenerator):
__extensions__ = ['genshi']
+ __loader_cls__ = TemplateLoader
+ pyerror_re = re.compile('<\w+ u?[\'"](.*?)\s*\.\.\.[\'"]>')
def __init__(self, fname, spec, encoding):
CfgGenerator.__init__(self, fname, spec, encoding)
- self.loader = TemplateLoader()
if not have_genshi:
- msg = "Cfg: Genshi is not available: %s" % entry.get("name")
+ msg = "Cfg: Genshi is not available: %s" % fname
logger.error(msg)
raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+ self.loader = self.__loader_cls__()
+ self.template = None
@classmethod
def ignore(cls, event, basename=None):
@@ -44,10 +51,63 @@ class CfgGenshiGenerator(CfgGenerator):
metadata=metadata,
path=self.name).filter(removecomment)
try:
- return stream.render('text', encoding=self.encoding,
- strip_whitespace=False)
- except TypeError:
- return stream.render('text', encoding=self.encoding)
+ try:
+ return stream.render('text', encoding=self.encoding,
+ strip_whitespace=False)
+ except TypeError:
+ return stream.render('text', encoding=self.encoding)
+ except UndefinedError:
+ # a failure in a genshi expression _other_ than %{ python ... %}
+ err = sys.exc_info()[1]
+ stack = traceback.extract_tb(sys.exc_info()[2])
+ for quad in stack:
+ if quad[0] == self.name:
+ logger.error("Cfg: Error rendering %s at %s: %s" %
+ (fname, quad[2], err))
+ break
+ raise
+ except:
+ # a failure in a %{ python ... %} block -- the snippet in
+ # the traceback is just the beginning of the block.
+ err = sys.exc_info()[1]
+ stack = traceback.extract_tb(sys.exc_info()[2])
+ (filename, lineno, func, text) = stack[-1]
+ # this is horrible, and I deeply apologize to whoever gets
+ # to maintain this after I go to the Great Beer Garden in
+ # the Sky. genshi is incredibly opaque about what's being
+ # executed, so the only way I can find to determine which
+ # {% python %} block is being executed -- if there are
+ # multiples -- is to iterate through them and match the
+ # snippet of the first line that's in the traceback with
+ # the first non-empty line of the block.
+ execs = [contents
+ for etype, contents, loc in self.template.stream
+ if etype == self.template.EXEC]
+ contents = None
+ if len(execs) == 1:
+ contents = execs[0]
+ elif len(execs) > 1:
+ match = pyerror_re.match(func)
+ if match:
+ firstline = match.group(0)
+ for pyblock in execs:
+ if pyblock.startswith(firstline):
+ contents = pyblock
+ break
+ # else, no EXEC blocks -- WTF?
+ if contents:
+ # we now have the bogus block, but we need to get the
+ # offending line. To get there, we do (line number
+ # given in the exception) - (firstlineno from the
+ # internal genshi code object of the snippet) + 1 =
+ # (line number of the line with an error within the
+ # block, with all multiple line breaks elided to a
+ # single line break)
+ real_lineno = lineno - contents.code.co_firstlineno
+ src = re.sub(r'\n\n+', '\n', contents.source).splitlines()
+ logger.error("Cfg: Error rendering %s at %s: %s" %
+ (fname, src[real_lineno], err))
+ raise
def handle_event(self, event):
if event.code2str() == 'deleted':
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgInfoXML.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgInfoXML.py
index 8e962efb4..956ebfe17 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgInfoXML.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgInfoXML.py
@@ -9,7 +9,7 @@ class CfgInfoXML(CfgInfo):
def __init__(self, path):
CfgInfo.__init__(self, path)
- self.infoxml = Bcfg2.Server.Plugin.InfoXML(path, noprio=True)
+ self.infoxml = Bcfg2.Server.Plugin.InfoXML(path)
def bind_info_to_entry(self, entry, metadata):
mdata = dict()
@@ -22,3 +22,9 @@ class CfgInfoXML(CfgInfo):
def handle_event(self, event):
self.infoxml.HandleEvent()
+
+ def _set_info(self, entry, info):
+ CfgInfo._set_info(self, entry, info)
+ if '__children__' in info:
+ for child in info['__children__']:
+ entry.append(child)
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgLegacyInfo.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgLegacyInfo.py
index 54c17c6c5..85c13c1ac 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgLegacyInfo.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgLegacyInfo.py
@@ -7,6 +7,10 @@ logger = logging.getLogger(__name__)
class CfgLegacyInfo(CfgInfo):
__basenames__ = ['info', ':info']
+ def __init__(self, path):
+ CfgInfo.__init__(self, path)
+ self.path = path
+
def bind_info_to_entry(self, entry, metadata):
self._set_info(entry, self.metadata)
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py b/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py
index 6c7585993..fe993ab54 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py
@@ -6,11 +6,11 @@ import sys
import stat
import pkgutil
import logging
-import binascii
import lxml.etree
import Bcfg2.Options
import Bcfg2.Server.Plugin
-from Bcfg2.Bcfg2Py3k import u_str
+from Bcfg2.Bcfg2Py3k import u_str, unicode, b64encode
+import Bcfg2.Server.Lint
logger = logging.getLogger(__name__)
@@ -113,7 +113,8 @@ class CfgInfo(CfgBaseFileMatcher):
def _set_info(self, entry, info):
for key, value in list(info.items()):
- entry.attrib.__setitem__(key, value)
+ if not key.startswith("__"):
+ entry.attrib.__setitem__(key, value)
class CfgVerifier(CfgBaseFileMatcher):
@@ -152,7 +153,19 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet):
global PROCESSORS
if PROCESSORS is None:
PROCESSORS = []
- for submodule in pkgutil.walk_packages(path=__path__):
+ if hasattr(pkgutil, 'walk_packages'):
+ submodules = pkgutil.walk_packages(path=__path__)
+ else:
+ #python 2.4
+ import glob
+ submodules = []
+ for path in __path__:
+ for submodule in glob.glob(os.path.join(path, "*.py")):
+ mod = '.'.join(submodule.split("/")[-1].split('.')[:-1])
+ if mod != '__init__':
+ submodules.append((None, mod, True))
+
+ for submodule in submodules:
module = getattr(__import__("%s.%s" %
(__name__,
submodule[1])).Server.Plugins.Cfg,
@@ -185,6 +198,7 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet):
return
elif action == 'changed':
self.entries[event.filename].handle_event(event)
+ return
elif action == 'deleted':
del self.entries[event.filename]
return
@@ -192,6 +206,11 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet):
logger.error("Could not process event %s for %s; ignoring" %
(action, event.filename))
+ def get_matching(self, metadata):
+ return [item for item in list(self.entries.values())
+ if (isinstance(item, CfgGenerator) and
+ item.specific.matches(metadata))]
+
def entry_init(self, event, proc):
if proc.__specific__:
Bcfg2.Server.Plugin.EntrySet.entry_init(
@@ -270,10 +289,11 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet):
raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
if entry.get('encoding') == 'base64':
- data = binascii.b2a_base64(data)
+ data = b64encode(data)
else:
try:
- data = u_str(data, self.encoding)
+ if not isinstance(data, unicode):
+ data = u_str(data, self.encoding)
except UnicodeDecodeError:
msg = "Failed to decode %s: %s" % (entry.get('name'),
sys.exc_info()[1])
@@ -287,6 +307,10 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet):
logger.error("You need to specify base64 encoding for %s." %
entry.get('name'))
raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+ except TypeError:
+ # data is already unicode; newer versions of Cheetah
+ # seem to return unicode
+ pass
if data:
entry.text = data
@@ -298,7 +322,7 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet):
generators = [ent for ent in list(self.entries.values())
if (isinstance(ent, CfgGenerator) and
ent.specific.matches(metadata))]
- if not matching:
+ if not generators:
msg = "No base file found for %s" % entry.get('name')
logger.error(msg)
raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
@@ -347,26 +371,26 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet):
if attr in new_entry]
if badattr:
# check for info files and inform user of their removal
- if os.path.exists(self.path + "/:info"):
- logger.info("Removing :info file and replacing with "
- "info.xml")
- os.remove(self.path + "/:info")
- if os.path.exists(self.path + "/info"):
- logger.info("Removing info file and replacing with "
- "info.xml")
- os.remove(self.path + "/info")
+ for ifile in ['info', ':info']:
+ info = os.path.join(self.path, ifile)
+ if os.path.exists(info):
+ logger.info("Removing %s and replacing with info.xml" %
+ info)
+ os.remove(info)
metadata_updates = {}
metadata_updates.update(self.metadata)
for attr in badattr:
metadata_updates[attr] = new_entry.get(attr)
infoxml = lxml.etree.Element('FileInfo')
infotag = lxml.etree.SubElement(infoxml, 'Info')
- [infotag.attrib.__setitem__(attr, metadata_updates[attr]) \
- for attr in metadata_updates]
+ [infotag.attrib.__setitem__(attr, metadata_updates[attr])
+ for attr in metadata_updates]
ofile = open(self.path + "/info.xml", "w")
- ofile.write(lxml.etree.tostring(infoxml, pretty_print=True))
+ ofile.write(lxml.etree.tostring(infoxml, xml_declaration=False,
+ pretty_print=True).decode('UTF-8'))
ofile.close()
- self.debug_log("Wrote file %s" % (self.path + "/info.xml"),
+ self.debug_log("Wrote file %s" % os.path.join(self.path,
+ "info.xml"),
flag=log)
@@ -385,9 +409,22 @@ class Cfg(Bcfg2.Server.Plugin.GroupSpool,
SETUP = core.setup
if 'validate' not in SETUP:
- SETUP['validate'] = Bcfg2.Options.CFG_VALIDATION
+ SETUP.add_option('validate', Bcfg2.Options.CFG_VALIDATION)
SETUP.reparse()
+ def has_generator(self, entry, metadata):
+ """ return True if the given entry can be generated for the
+ given metadata; False otherwise """
+ if entry.get('name') not in self.entries:
+ return False
+
+ for ent in self.entries[entry.get('name')].entries.values():
+ if ent.__specific__ and not ent.specific.matches(metadata):
+ continue
+ if isinstance(ent, CfgGenerator):
+ return True
+ return False
+
def AcceptChoices(self, entry, metadata):
return self.entries[entry.get('name')].list_accept_choices(entry,
metadata)
@@ -396,3 +433,26 @@ class Cfg(Bcfg2.Server.Plugin.GroupSpool,
return self.entries[new_entry.get('name')].write_update(specific,
new_entry,
log)
+
+class CfgLint(Bcfg2.Server.Lint.ServerPlugin):
+ """ warn about usage of .cat and .diff files """
+
+ def Run(self):
+ for basename, entry in list(self.core.plugins['Cfg'].entries.items()):
+ self.check_entry(basename, entry)
+
+
+ @classmethod
+ def Errors(cls):
+ return {"cat-file-used":"warning",
+ "diff-file-used":"warning"}
+
+ def check_entry(self, basename, entry):
+ cfg = self.core.plugins['Cfg']
+ for basename, entry in list(cfg.entries.items()):
+ for fname, processor in entry.entries.items():
+ if self.HandlesFile(fname) and isinstance(processor, CfgFilter):
+ extension = fname.split(".")[-1]
+ self.LintError("%s-file-used" % extension,
+ "%s file used on %s: %s" %
+ (extension, basename, fname))
diff --git a/src/lib/Bcfg2/Server/Plugins/DBStats.py b/src/lib/Bcfg2/Server/Plugins/DBStats.py
index 999e078b9..63c590f0f 100644
--- a/src/lib/Bcfg2/Server/Plugins/DBStats.py
+++ b/src/lib/Bcfg2/Server/Plugins/DBStats.py
@@ -1,8 +1,8 @@
-import binascii
import difflib
import logging
import lxml.etree
import platform
+import sys
import time
try:
@@ -11,61 +11,47 @@ except ImportError:
pass
import Bcfg2.Server.Plugin
-import Bcfg2.Server.Reports.importscript
+from Bcfg2.Server.Reports.importscript import load_stat
from Bcfg2.Server.Reports.reports.models import Client
-import Bcfg2.Server.Reports.settings
-from Bcfg2.Server.Reports.updatefix import update_database
+from Bcfg2.Bcfg2Py3k import b64decode
+
# for debugging output only
logger = logging.getLogger('Bcfg2.Plugins.DBStats')
-class DBStats(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.ThreadedStatistics,
+
+class DBStats(Bcfg2.Server.Plugin.ThreadedStatistics,
Bcfg2.Server.Plugin.PullSource):
name = 'DBStats'
def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
Bcfg2.Server.Plugin.ThreadedStatistics.__init__(self, core, datastore)
Bcfg2.Server.Plugin.PullSource.__init__(self)
self.cpath = "%s/Metadata/clients.xml" % datastore
self.core = core
- logger.debug("Searching for new models to add to the statistics database")
- try:
- update_database()
- except Exception:
- inst = sys.exc_info()[1]
- logger.debug(str(inst))
- logger.debug(str(type(inst)))
+ if not self.core.database_available:
+ raise Bcfg2.Server.Plugin.PluginInitError
def handle_statistic(self, metadata, data):
newstats = data.find("Statistics")
newstats.set('time', time.asctime(time.localtime()))
- # ick
- data = lxml.etree.tostring(newstats)
- ndx = lxml.etree.XML(data)
- e = lxml.etree.Element('Node', name=metadata.hostname)
- e.append(ndx)
- container = lxml.etree.Element("ConfigStatistics")
- container.append(e)
- # FIXME need to build a metadata interface to expose a list of clients
start = time.time()
for i in [1, 2, 3]:
try:
- Bcfg2.Server.Reports.importscript.load_stats(self.core.metadata.clients_xml.xdata,
- container,
- self.core.encoding,
- 0,
- logger,
- True,
- platform.node())
+ load_stat(metadata,
+ newstats,
+ self.core.encoding,
+ 0,
+ logger,
+ True,
+ platform.node())
logger.info("Imported data for %s in %s seconds" \
% (metadata.hostname, time.time() - start))
return
except MultipleObjectsReturned:
e = sys.exc_info()[1]
- logger.error("DBStats: MultipleObjectsReturned while handling %s: %s" % \
- (metadata.hostname, e))
+ logger.error("DBStats: MultipleObjectsReturned while "
+ "handling %s: %s" % (metadata.hostname, e))
logger.error("DBStats: Data is inconsistent")
break
except:
@@ -100,10 +86,10 @@ class DBStats(Bcfg2.Server.Plugin.Plugin,
if entry.reason.is_sensitive:
raise Bcfg2.Server.Plugin.PluginExecutionError
elif len(entry.reason.unpruned) != 0:
- ret.append('\n'.join(entry.reason.unpruned))
+ ret.append('\n'.join(entry.reason.unpruned))
elif entry.reason.current_diff != '':
if entry.reason.is_binary:
- ret.append(binascii.a2b_base64(entry.reason.current_diff))
+ ret.append(b64decode(entry.reason.current_diff))
else:
ret.append('\n'.join(difflib.restore(\
entry.reason.current_diff.split('\n'), 1)))
diff --git a/src/lib/Bcfg2/Server/Plugins/Decisions.py b/src/lib/Bcfg2/Server/Plugins/Decisions.py
index b432474f2..90d9ecbe3 100644
--- a/src/lib/Bcfg2/Server/Plugins/Decisions.py
+++ b/src/lib/Bcfg2/Server/Plugins/Decisions.py
@@ -14,6 +14,8 @@ class DecisionFile(Bcfg2.Server.Plugin.SpecificData):
return [(x.get('type'), x.get('name')) for x in self.contents.xpath('.//Decision')]
class DecisionSet(Bcfg2.Server.Plugin.EntrySet):
+ basename_is_regex = True
+
def __init__(self, path, fam, encoding):
"""Container for decision specification files.
@@ -23,8 +25,7 @@ class DecisionSet(Bcfg2.Server.Plugin.EntrySet):
- `encoding`: XML character encoding
"""
- pattern = '(white|black)list'
- Bcfg2.Server.Plugin.EntrySet.__init__(self, pattern, path, \
+ Bcfg2.Server.Plugin.EntrySet.__init__(self, '(white|black)list', path,
DecisionFile, encoding)
try:
fam.AddMonitor(path, self)
diff --git a/src/lib/Bcfg2/Server/Plugins/Deps.py b/src/lib/Bcfg2/Server/Plugins/Deps.py
index 9b848baae..d3a1ee871 100644
--- a/src/lib/Bcfg2/Server/Plugins/Deps.py
+++ b/src/lib/Bcfg2/Server/Plugins/Deps.py
@@ -7,27 +7,10 @@ import Bcfg2.Server.Plugin
class DNode(Bcfg2.Server.Plugin.INode):
"""DNode provides supports for single predicate types for dependencies."""
- raw = {'Group': "lambda m, e:'%(name)s' in m.groups and predicate(m, e)"}
- containers = ['Group']
-
- def __init__(self, data, idict, parent=None):
- self.data = data
- self.contents = {}
- if parent == None:
- self.predicate = lambda x, d: True
- else:
- predicate = parent.predicate
- if data.tag in list(self.raw.keys()):
- self.predicate = eval(self.raw[data.tag] %
- {'name': data.get('name')},
- {'predicate': predicate})
- else:
- raise Exception
- mytype = self.__class__
- self.children = []
+ def _load_children(self, data, idict):
for item in data.getchildren():
if item.tag in self.containers:
- self.children.append(mytype(item, idict, self))
+ self.children.append(self.__class__(item, idict, self))
else:
data = [(child.tag, child.get('name'))
for child in item.getchildren()]
diff --git a/src/lib/Bcfg2/Server/Plugins/FileProbes.py b/src/lib/Bcfg2/Server/Plugins/FileProbes.py
index 5beec7be0..632d586e8 100644
--- a/src/lib/Bcfg2/Server/Plugins/FileProbes.py
+++ b/src/lib/Bcfg2/Server/Plugins/FileProbes.py
@@ -7,23 +7,24 @@ the client """
import os
import sys
import errno
-import binascii
import lxml.etree
import Bcfg2.Options
+import Bcfg2.Server
import Bcfg2.Server.Plugin
+from Bcfg2.Bcfg2Py3k import b64decode
probecode = """#!/usr/bin/env python
import os
import pwd
import grp
-import binascii
import lxml.etree
+from Bcfg2.Bcfg2Py3k import b64encode
path = "%s"
if not os.path.exists(path):
- print "%%s does not exist" %% path
+ print("%%s does not exist" %% path)
raise SystemExit(1)
stat = os.stat(path)
@@ -32,18 +33,10 @@ data = lxml.etree.Element("ProbedFileData",
owner=pwd.getpwuid(stat[4])[0],
group=grp.getgrgid(stat[5])[0],
perms=oct(stat[0] & 07777))
-data.text = binascii.b2a_base64(open(path).read())
-print lxml.etree.tostring(data)
+data.text = b64encode(open(path).read())
+print(lxml.etree.tostring(data, xml_declaration=False).decode('UTF-8'))
"""
-class FileProbesConfig(Bcfg2.Server.Plugin.SingleXMLFileBacked,
- Bcfg2.Server.Plugin.StructFile):
- """ Config file handler for FileProbes """
- def __init__(self, filename, fam):
- Bcfg2.Server.Plugin.SingleXMLFileBacked.__init__(self, filename, fam)
- Bcfg2.Server.Plugin.StructFile.__init__(self, filename)
-
-
class FileProbes(Bcfg2.Server.Plugin.Plugin,
Bcfg2.Server.Plugin.Probing):
""" This module allows you to probe a client for a file, which is then
@@ -53,14 +46,15 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin,
the client """
name = 'FileProbes'
- experimental = True
__author__ = 'chris.a.st.pierre@gmail.com'
def __init__(self, core, datastore):
Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
Bcfg2.Server.Plugin.Probing.__init__(self)
- self.config = FileProbesConfig(os.path.join(self.data, 'config.xml'),
- core.fam)
+ self.config = Bcfg2.Server.Plugin.StructFile(os.path.join(self.data,
+ 'config.xml'),
+ fam=core.fam,
+ should_monitor=True)
self.entries = dict()
self.probes = dict()
@@ -75,13 +69,9 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin,
# do not probe for files that are already in Cfg and
# for which update is false; we can't possibly do
# anything with the data we get from such a probe
- try:
- if (entry.get('update', 'false').lower() == "false" and
- cfg.entries[path].get_pertinent_entries(entry,
- metadata)):
- continue
- except (KeyError, Bcfg2.Server.Plugin.PluginExecutionError):
- pass
+ if (entry.get('update', 'false').lower() == "false" and
+ not cfg.has_generator(entry, metadata)):
+ continue
self.entries[metadata.hostname][path] = entry
probe = lxml.etree.Element('probe', name=path,
source=self.name,
@@ -102,7 +92,9 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin,
(data.get('name'), metadata.hostname))
else:
try:
- self.write_data(lxml.etree.XML(data.text), metadata)
+ self.write_data(lxml.etree.XML(data.text,
+ parser=Bcfg2.Server.XMLParser),
+ metadata)
except lxml.etree.XMLSyntaxError:
# if we didn't get XML back from the probe, assume
# it's an error message
@@ -111,23 +103,24 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin,
def write_data(self, data, metadata):
"""Write the probed file data to the bcfg2 specification."""
filename = data.get("name")
- contents = binascii.a2b_base64(data.text)
+ contents = b64decode(data.text)
entry = self.entries[metadata.hostname][filename]
cfg = self.core.plugins['Cfg']
specific = "%s.H_%s" % (os.path.basename(filename), metadata.hostname)
# we can't use os.path.join() for this because specific
# already has a leading /, which confuses os.path.join()
- fileloc = "%s%s" % (cfg.data, os.path.join(filename, specific))
+ fileloc = os.path.join(cfg.data,
+ os.path.join(filename, specific).lstrip("/"))
create = False
try:
cfg.entries[filename].bind_entry(entry, metadata)
- except Bcfg2.Server.Plugin.PluginExecutionError:
+ except (KeyError, Bcfg2.Server.Plugin.PluginExecutionError):
create = True
# get current entry data
if entry.text and entry.get("encoding") == "base64":
- entrydata = binascii.a2b_base64(entry.text)
+ entrydata = b64decode(entry.text)
else:
entrydata = entry.text
@@ -135,7 +128,7 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin,
self.logger.info("Writing new probed file %s" % fileloc)
self.write_file(fileloc, contents)
self.verify_file(filename, contents, metadata)
- infoxml = os.path.join("%s%s" % (cfg.data, filename), "info.xml")
+ infoxml = os.path.join(cfg.data, filename.lstrip("/"), "info.xml")
self.write_infoxml(infoxml, entry, data)
elif entrydata == contents:
self.debug_log("Existing %s contents match probed contents" %
@@ -194,7 +187,7 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin,
# get current entry data
if entry.get("encoding") == "base64":
- entrydata = binascii.a2b_base64(entry.text)
+ entrydata = b64decode(entry.text)
else:
entrydata = entry.text
if entrydata == contents:
@@ -206,8 +199,7 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin,
if os.path.exists(infoxml):
return
- self.logger.info("Writing info.xml at %s for %s" %
- (infoxml, data.get("name")))
+ self.logger.info("Writing %s for %s" % (infoxml, data.get("name")))
info = \
lxml.etree.Element("Info",
owner=data.get("owner",
@@ -222,8 +214,10 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin,
root = lxml.etree.Element("FileInfo")
root.append(info)
try:
- open(infoxml, "w").write(lxml.etree.tostring(root,
- pretty_print=True))
+ open(infoxml,
+ "w").write(lxml.etree.tostring(root,
+ xml_declaration=False,
+ pretty_print=True).decode('UTF-8'))
except IOError:
err = sys.exc_info()[1]
self.logger.error("Could not write %s: %s" % (fileloc, err))
diff --git a/src/lib/Bcfg2/Server/Plugins/GroupPatterns.py b/src/lib/Bcfg2/Server/Plugins/GroupPatterns.py
index 58b4d4afb..837f47279 100644
--- a/src/lib/Bcfg2/Server/Plugins/GroupPatterns.py
+++ b/src/lib/Bcfg2/Server/Plugins/GroupPatterns.py
@@ -1,6 +1,9 @@
+import os
import re
+import sys
import logging
import lxml.etree
+import Bcfg2.Server.Lint
import Bcfg2.Server.Plugin
class PackedDigitRange(object):
@@ -58,7 +61,7 @@ class PatternMap(object):
return self.groups
def process_re(self, name):
- match = self.re.match(name)
+ match = self.re.search(name)
if not match:
return None
ret = list()
@@ -70,17 +73,22 @@ class PatternMap(object):
ret.append(newg)
return ret
+ def __str__(self):
+ return "%s: %s %s" % (self.__class__.__name__, self.pattern,
+ self.groups)
+
-class PatternFile(Bcfg2.Server.Plugin.SingleXMLFileBacked):
+class PatternFile(Bcfg2.Server.Plugin.XMLFileBacked):
__identifier__ = None
- def __init__(self, filename, fam):
- Bcfg2.Server.Plugin.SingleXMLFileBacked.__init__(self, filename, fam)
+ def __init__(self, filename, fam=None):
+ Bcfg2.Server.Plugin.XMLFileBacked.__init__(self, filename, fam=fam,
+ should_monitor=True)
self.patterns = []
self.logger = logging.getLogger(self.__class__.__name__)
def Index(self):
- Bcfg2.Server.Plugin.SingleXMLFileBacked.Index(self)
+ Bcfg2.Server.Plugin.XMLFileBacked.Index(self)
self.patterns = []
for entry in self.xdata.xpath('//GroupPattern'):
try:
@@ -112,13 +120,42 @@ class PatternFile(Bcfg2.Server.Plugin.SingleXMLFileBacked):
class GroupPatterns(Bcfg2.Server.Plugin.Plugin,
Bcfg2.Server.Plugin.Connector):
name = "GroupPatterns"
- experimental = True
def __init__(self, core, datastore):
Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
Bcfg2.Server.Plugin.Connector.__init__(self)
- self.config = PatternFile(self.data + '/config.xml',
- core.fam)
+ self.config = PatternFile(os.path.join(self.data, 'config.xml'),
+ fam=core.fam)
def get_additional_groups(self, metadata):
return self.config.process_patterns(metadata.hostname)
+
+
+class GroupPatternsLint(Bcfg2.Server.Lint.ServerPlugin):
+ def Run(self):
+ """ run plugin """
+ cfg = self.core.plugins['GroupPatterns'].config
+ for entry in cfg.xdata.xpath('//GroupPattern'):
+ groups = [g.text for g in entry.findall('Group')]
+ self.check(entry, groups, ptype='NamePattern')
+ self.check(entry, groups, ptype='NameRange')
+
+ @classmethod
+ def Errors(cls):
+ return {"pattern-fails-to-initialize":"error"}
+
+ def check(self, entry, groups, ptype="NamePattern"):
+ if ptype == "NamePattern":
+ pmap = lambda p: PatternMap(p, None, groups)
+ else:
+ pmap = lambda p: PatternMap(None, p, groups)
+
+ for el in entry.findall(ptype):
+ pat = el.text
+ try:
+ pmap(pat)
+ except:
+ err = sys.exc_info()[1]
+ self.LintError("pattern-fails-to-initialize",
+ "Failed to initialize %s %s for %s: %s" %
+ (ptype, pat, entry.get('pattern'), err))
diff --git a/src/lib/Bcfg2/Server/Plugins/Hostbase.py b/src/lib/Bcfg2/Server/Plugins/Hostbase.py
index e9c1c1cff..69b019160 100644
--- a/src/lib/Bcfg2/Server/Plugins/Hostbase.py
+++ b/src/lib/Bcfg2/Server/Plugins/Hostbase.py
@@ -3,19 +3,24 @@ This file provides the Hostbase plugin.
It manages dns/dhcp/nis host information
"""
+from lxml.etree import Element, SubElement
import os
+import re
+from time import strftime
os.environ['DJANGO_SETTINGS_MODULE'] = 'Bcfg2.Server.Hostbase.settings'
-from lxml.etree import Element, SubElement
import Bcfg2.Server.Plugin
from Bcfg2.Server.Plugin import PluginExecutionError, PluginInitError
-from time import strftime
-from sets import Set
from django.template import Context, loader
from django.db import connection
-import re
# Compatibility imports
from Bcfg2.Bcfg2Py3k import StringIO
+try:
+ set
+except NameError:
+ # deprecated since python 2.6
+ from sets import Set as set
+
class Hostbase(Bcfg2.Server.Plugin.Plugin,
Bcfg2.Server.Plugin.Structure,
@@ -383,7 +388,7 @@ class Hostbase(Bcfg2.Server.Plugin.Plugin,
""")
hostbase = cursor.fetchall()
domains = [host[0].split(".", 1)[1] for host in hostbase]
- domains_set = Set(domains)
+ domains_set = set(domains)
domain_data = [(domain, domains.count(domain)) for domain in domains_set]
domain_data.sort()
@@ -393,7 +398,7 @@ class Hostbase(Bcfg2.Server.Plugin.Plugin,
ips = cursor.fetchall()
three_octets = [ip[0].rstrip('0123456789').rstrip('.') \
for ip in ips]
- three_octets_set = Set(three_octets)
+ three_octets_set = set(three_octets)
three_octets_data = [(octet, three_octets.count(octet)) \
for octet in three_octets_set]
three_octets_data.sort()
@@ -412,7 +417,7 @@ class Hostbase(Bcfg2.Server.Plugin.Plugin,
append_data.append((three_octet, tuple(tosort)))
two_octets = [ip.rstrip('0123456789').rstrip('.') for ip in three_octets]
- two_octets_set = Set(two_octets)
+ two_octets_set = set(two_octets)
two_octets_data = [(octet, two_octets.count(octet))
for octet in two_octets_set]
two_octets_data.sort()
@@ -446,7 +451,7 @@ class Hostbase(Bcfg2.Server.Plugin.Plugin,
else:
if appenddata[0] == ip[0]:
simple = False
- ips.append((appenddata[2], appenddata[0], Set(namelist),
+ ips.append((appenddata[2], appenddata[0], set(namelist),
cnamelist, simple, appenddata[1]))
appenddata = ip
simple = True
@@ -455,7 +460,7 @@ class Hostbase(Bcfg2.Server.Plugin.Plugin,
if ip[4]:
cnamelist.append(ip[4].split('.', 1)[0])
simple = False
- ips.append((appenddata[2], appenddata[0], Set(namelist),
+ ips.append((appenddata[2], appenddata[0], set(namelist),
cnamelist, simple, appenddata[1]))
context = Context({
'subnet': subnet[0],
diff --git a/src/lib/Bcfg2/Server/Plugins/Ldap.py b/src/lib/Bcfg2/Server/Plugins/Ldap.py
index 29abf5b13..9883085db 100644
--- a/src/lib/Bcfg2/Server/Plugins/Ldap.py
+++ b/src/lib/Bcfg2/Server/Plugins/Ldap.py
@@ -139,7 +139,7 @@ class LdapConnection(object):
result = self.conn.search_s(
query.base,
SCOPE_MAP[query.scope],
- query.filter,
+ query.filter.replace("\\", "\\\\"),
query.attrs,
)
break
diff --git a/src/lib/Bcfg2/Server/Plugins/Metadata.py b/src/lib/Bcfg2/Server/Plugins/Metadata.py
index 970126b80..a5fa78088 100644
--- a/src/lib/Bcfg2/Server/Plugins/Metadata.py
+++ b/src/lib/Bcfg2/Server/Plugins/Metadata.py
@@ -2,17 +2,39 @@
This file stores persistent metadata for the Bcfg2 Configuration Repository.
"""
-import copy
-import fcntl
-import lxml.etree
+import re
import os
-import os.path
-import socket
import sys
import time
-
-import Bcfg2.Server.FileMonitor
+import copy
+import fcntl
+import socket
+import lxml.etree
+import Bcfg2.Server
+import Bcfg2.Server.Lint
import Bcfg2.Server.Plugin
+import Bcfg2.Server.FileMonitor
+from Bcfg2.Bcfg2Py3k import MutableMapping
+from Bcfg2.version import Bcfg2VersionInfo
+
+try:
+ from django.db import models
+ has_django = True
+except ImportError:
+ has_django = False
+
+
+try:
+ all
+except NameError:
+ # some of the crazy lexical closure stuff below works with all()
+ # but not with this loop inline. i really don't understand
+ # lexical closures some^Wmost days
+ def all(iterable):
+ for element in iterable:
+ if not element:
+ return False
+ return True
def locked(fd):
@@ -24,28 +46,68 @@ def locked(fd):
return False
-class MetadataConsistencyError(Exception):
- """This error gets raised when metadata is internally inconsistent."""
- pass
+if has_django:
+ class MetadataClientModel(models.Model,
+ Bcfg2.Server.Plugin.PluginDatabaseModel):
+ hostname = models.CharField(max_length=255, primary_key=True)
+ version = models.CharField(max_length=31, null=True)
+ class ClientVersions(MutableMapping):
+ def __getitem__(self, key):
+ try:
+ return MetadataClientModel.objects.get(hostname=key).version
+ except MetadataClientModel.DoesNotExist:
+ raise KeyError(key)
+
+ def __setitem__(self, key, value):
+ client = MetadataClientModel.objects.get_or_create(hostname=key)[0]
+ client.version = value
+ client.save()
+
+ def __delitem__(self, key):
+ # UserDict didn't require __delitem__, but MutableMapping
+ # does. we don't want deleting a client version record to
+ # delete the client, so we just set the version to None,
+ # which is kinda like deleting it, but not really.
+ try:
+ client = MetadataClientModel.objects.get(hostname=key)
+ except MetadataClientModel.DoesNotExist:
+ raise KeyError(key)
+ client.version = None
+ client.save()
-class MetadataRuntimeError(Exception):
- """This error is raised when the metadata engine
- is called prior to reading enough data.
- """
- pass
+ def __len__(self):
+ return MetadataClientModel.objects.count()
+ def __iter__(self):
+ for client in MetadataClientModel.objects.all():
+ yield client.hostname
-class XMLMetadataConfig(Bcfg2.Server.Plugin.SingleXMLFileBacked):
+ def keys(self):
+ return [c.hostname for c in MetadataClientModel.objects.all()]
+
+ def __contains__(self, key):
+ try:
+ client = MetadataClientModel.objects.get(hostname=key)
+ return True
+ except MetadataClientModel.DoesNotExist:
+ return False
+
+
+class XMLMetadataConfig(Bcfg2.Server.Plugin.XMLFileBacked):
"""Handles xml config files and all XInclude statements"""
def __init__(self, metadata, watch_clients, basefile):
- Bcfg2.Server.Plugin.SingleXMLFileBacked.__init__(self,
- os.path.join(metadata.data,
- basefile),
- metadata.core.fam)
+ # we tell XMLFileBacked _not_ to add a monitor for this file,
+ # because the main Metadata plugin has already added one.
+ # then we immediately set should_monitor to the proper value,
+ # so that XInclude'd files get properly watched
+ fpath = os.path.join(metadata.data, basefile)
+ Bcfg2.Server.Plugin.XMLFileBacked.__init__(self, fpath,
+ fam=metadata.core.fam,
+ should_monitor=False)
+ self.should_monitor = watch_clients
self.metadata = metadata
self.basefile = basefile
- self.should_monitor = watch_clients
self.data = None
self.basedata = None
self.basedir = metadata.data
@@ -56,25 +118,22 @@ class XMLMetadataConfig(Bcfg2.Server.Plugin.SingleXMLFileBacked):
@property
def xdata(self):
if not self.data:
- raise MetadataRuntimeError
+ raise Bcfg2.Server.Plugin.MetadataRuntimeError("%s has no data" %
+ self.basefile)
return self.data
@property
def base_xdata(self):
if not self.basedata:
- raise MetadataRuntimeError
+ raise Bcfg2.Server.Plugin.MetadataRuntimeError("%s has no data" %
+ self.basefile)
return self.basedata
- def add_monitor(self, fpath, fname):
- """Add a fam monitor for an included file"""
- if self.should_monitor:
- self.metadata.core.fam.AddMonitor(fpath, self.metadata)
- self.extras.append(fname)
-
def load_xml(self):
"""Load changes from XML"""
try:
- xdata = lxml.etree.parse(os.path.join(self.basedir, self.basefile))
+ xdata = lxml.etree.parse(os.path.join(self.basedir, self.basefile),
+ parser=Bcfg2.Server.XMLParser)
except lxml.etree.XMLSyntaxError:
self.logger.error('Failed to parse %s' % self.basefile)
return
@@ -100,12 +159,14 @@ class XMLMetadataConfig(Bcfg2.Server.Plugin.SingleXMLFileBacked):
try:
datafile = open(tmpfile, 'w')
except IOError:
- e = sys.exc_info()[1]
- self.logger.error("Failed to write %s: %s" % (tmpfile, e))
- raise MetadataRuntimeError
+ msg = "Failed to write %s: %s" % (tmpfile, sys.exc_info()[1])
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.MetadataRuntimeError(msg)
# prep data
dataroot = xmltree.getroot()
- newcontents = lxml.etree.tostring(dataroot, pretty_print=True)
+ newcontents = lxml.etree.tostring(dataroot, xml_declaration=False,
+ pretty_print=True).decode('UTF-8')
+
fd = datafile.fileno()
while locked(fd) == True:
@@ -114,21 +175,24 @@ class XMLMetadataConfig(Bcfg2.Server.Plugin.SingleXMLFileBacked):
datafile.write(newcontents)
except:
fcntl.lockf(fd, fcntl.LOCK_UN)
- self.logger.error("Metadata: Failed to write new xml data to %s" %
- tmpfile, exc_info=1)
+ msg = "Metadata: Failed to write new xml data to %s: %s" % \
+ (tmpfile, sys.exc_info()[1])
+ self.logger.error(msg, exc_info=1)
os.unlink(tmpfile)
- raise MetadataRuntimeError
+ raise Bcfg2.Server.Plugin.MetadataRuntimeError(msg)
datafile.close()
-
# check if clients.xml is a symlink
if os.path.islink(fname):
fname = os.readlink(fname)
try:
os.rename(tmpfile, fname)
+
except:
- self.logger.error("Metadata: Failed to rename %s" % tmpfile)
- raise MetadataRuntimeError
+ msg = "Metadata: Failed to rename %s: %s" % (tmpfile,
+ sys.exc_info()[1])
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.MetadataRuntimeError(msg)
def find_xml_for_xpath(self, xpath):
"""Find and load xml file containing the xpath query"""
@@ -144,22 +208,26 @@ class XMLMetadataConfig(Bcfg2.Server.Plugin.SingleXMLFileBacked):
"""Try to find the data in included files"""
for included in self.extras:
try:
- xdata = lxml.etree.parse(os.path.join(self.basedir,
- included))
+ xdata = lxml.etree.parse(included,
+ parser=Bcfg2.Server.XMLParser)
cli = xdata.xpath(xpath)
if len(cli) > 0:
- return {'filename': os.path.join(self.basedir,
- included),
+ return {'filename': included,
'xmltree': xdata,
'xquery': cli}
except lxml.etree.XMLSyntaxError:
- self.logger.error('Failed to parse %s' % (included))
+ self.logger.error('Failed to parse %s' % included)
return {}
+ def add_monitor(self, fpath):
+ self.extras.append(fpath)
+ if self.fam and self.should_monitor:
+ self.fam.AddMonitor(fpath, self.metadata)
+
def HandleEvent(self, event):
"""Handle fam events"""
- filename = event.filename.split('/')[-1]
- if filename in self.extras:
+ filename = os.path.basename(event.filename)
+ if event.filename in self.extras:
if event.code2str() == 'exists':
return False
elif filename != self.basefile:
@@ -172,8 +240,8 @@ class XMLMetadataConfig(Bcfg2.Server.Plugin.SingleXMLFileBacked):
class ClientMetadata(object):
"""This object contains client metadata."""
- def __init__(self, client, profile, groups, bundles,
- aliases, addresses, categories, uuid, password, query):
+ def __init__(self, client, profile, groups, bundles, aliases, addresses,
+ categories, uuid, password, version, query):
self.hostname = client
self.profile = profile
self.bundles = bundles
@@ -184,6 +252,11 @@ class ClientMetadata(object):
self.uuid = uuid
self.password = password
self.connectors = []
+ self.version = version
+ try:
+ self.version_info = Bcfg2VersionInfo(version)
+ except:
+ self.version_info = None
self.query = query
def inGroup(self, group):
@@ -198,7 +271,8 @@ class ClientMetadata(object):
class MetadataQuery(object):
- def __init__(self, by_name, get_clients, by_groups, by_profiles, all_groups, all_groups_in_category):
+ def __init__(self, by_name, get_clients, by_groups, by_profiles,
+ all_groups, all_groups_in_category):
# resolver is set later
self.by_name = by_name
self.names_by_groups = by_groups
@@ -217,74 +291,125 @@ class MetadataQuery(object):
return [self.by_name(name) for name in self.all_clients()]
-class Metadata(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Metadata,
- Bcfg2.Server.Plugin.Statistics):
+class MetadataGroup(tuple):
+ def __new__(cls, name, bundles=None, category=None,
+ is_profile=False, is_public=False, is_private=False):
+ if bundles is None:
+ bundles = set()
+ return tuple.__new__(cls, (bundles, category))
+
+ def __init__(self, name, bundles=None, category=None,
+ is_profile=False, is_public=False, is_private=False):
+ if bundles is None:
+ bundles = set()
+ tuple.__init__(self)
+ self.name = name
+ self.bundles = bundles
+ self.category = category
+ self.is_profile = is_profile
+ self.is_public = is_public
+ self.is_private = is_private
+
+ def __str__(self):
+ return repr(self)
+
+ def __repr__(self):
+ return "%s %s (bundles=%s, category=%s)" % \
+ (self.__class__.__name__, self.name, self.bundles,
+ self.category)
+
+ def __hash__(self):
+ return hash(self.name)
+
+class Metadata(Bcfg2.Server.Plugin.Metadata,
+ Bcfg2.Server.Plugin.Statistics,
+ Bcfg2.Server.Plugin.DatabaseBacked):
"""This class contains data for bcfg2 server metadata."""
__author__ = 'bcfg-dev@mcs.anl.gov'
name = "Metadata"
sort_order = 500
def __init__(self, core, datastore, watch_clients=True):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
Bcfg2.Server.Plugin.Metadata.__init__(self)
- Bcfg2.Server.Plugin.Statistics.__init__(self)
- if watch_clients:
- try:
- core.fam.AddMonitor(os.path.join(self.data, "groups.xml"), self)
- core.fam.AddMonitor(os.path.join(self.data, "clients.xml"), self)
- except:
- print("Unable to add file monitor for groups.xml or clients.xml")
- raise Bcfg2.Server.Plugin.PluginInitError
-
- self.clients_xml = XMLMetadataConfig(self, watch_clients, 'clients.xml')
- self.groups_xml = XMLMetadataConfig(self, watch_clients, 'groups.xml')
- self.states = {}
- if watch_clients:
- self.states = {"groups.xml": False,
- "clients.xml": False}
- self.addresses = {}
+ Bcfg2.Server.Plugin.Statistics.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.DatabaseBacked.__init__(self, core, datastore)
+ self.watch_clients = watch_clients
+ self.states = dict()
+ self.extra = dict()
+ self.handlers = []
+ self._handle_file("groups.xml")
+ if (self._use_db and
+ os.path.exists(os.path.join(self.data, "clients.xml"))):
+ self.logger.warning("Metadata: database enabled but clients.xml"
+ "found, parsing in compatibility mode")
+ self._handle_file("clients.xml")
+ elif not self._use_db:
+ self._handle_file("clients.xml")
+
+ # mapping of clientname -> authtype
self.auth = dict()
- self.clients = {}
- self.aliases = {}
- self.groups = {}
- self.cgroups = {}
- self.public = []
- self.private = []
- self.profiles = []
- self.categories = {}
- self.bad_clients = {}
- self.uuid = {}
+ # list of clients required to have non-global password
self.secure = []
+ # list of floating clients
self.floating = []
+ # mapping of clientname -> password
self.passwords = {}
+ self.addresses = {}
+ self.raddresses = {}
+ # mapping of clientname -> [groups]
+ self.clientgroups = {}
+ # list of clients
+ self.clients = []
+ self.aliases = {}
+ self.raliases = {}
+ # mapping of groupname -> MetadataGroup object
+ self.groups = {}
+ # mappings of predicate -> MetadataGroup object
+ self.group_membership = dict()
+ self.negated_groups = dict()
+ # mapping of hostname -> version string
+ if self._use_db:
+ self.versions = ClientVersions()
+ else:
+ self.versions = dict()
+ self.uuid = {}
self.session_cache = {}
self.default = None
self.pdirty = False
- self.extra = {'groups.xml': [],
- 'clients.xml': []}
self.password = core.password
self.query = MetadataQuery(core.build_metadata,
- lambda: list(self.clients.keys()),
+ lambda: list(self.clients),
self.get_client_names_by_groups,
self.get_client_names_by_profiles,
self.get_all_group_names,
self.get_all_groups_in_category)
@classmethod
- def init_repo(cls, repo, groups, os_selection, clients):
- path = os.path.join(repo, cls.name)
- os.makedirs(path)
- open(os.path.join(repo, "Metadata", "groups.xml"),
- "w").write(groups % os_selection)
- open(os.path.join(repo, "Metadata", "clients.xml"),
- "w").write(clients % socket.getfqdn())
-
- def get_groups(self):
- '''return groups xml tree'''
- groups_tree = lxml.etree.parse(os.path.join(self.data, "groups.xml"))
- root = groups_tree.getroot()
- return root
+ def init_repo(cls, repo, **kwargs):
+ # must use super here; inheritance works funny with class methods
+ super(Metadata, cls).init_repo(repo)
+
+ for fname in ["clients.xml", "groups.xml"]:
+ aname = re.sub(r'[^A-z0-9_]', '_', fname)
+ if aname in kwargs:
+ open(os.path.join(repo, cls.name, fname),
+ "w").write(kwargs[aname])
+
+ def _handle_file(self, fname):
+ if self.watch_clients:
+ try:
+ self.core.fam.AddMonitor(os.path.join(self.data, fname), self)
+ except:
+ err = sys.exc_info()[1]
+ msg = "Unable to add file monitor for %s: %s" % (fname, err)
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginInitError(msg)
+ self.states[fname] = False
+ aname = re.sub(r'[^A-z0-9_]', '_', fname)
+ xmlcfg = XMLMetadataConfig(self, self.watch_clients, fname)
+ setattr(self, aname, xmlcfg)
+ self.handlers.append(xmlcfg.HandleEvent)
+ self.extra[fname] = []
def _search_xdata(self, tag, name, tree, alias=False):
for node in tree.findall("//%s" % tag):
@@ -312,7 +437,7 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
node = self._search_xdata(tag, name, config.xdata, alias=alias)
if node != None:
self.logger.error("%s \"%s\" already exists" % (tag, name))
- raise MetadataConsistencyError
+ raise Bcfg2.Server.Plugin.MetadataConsistencyError
element = lxml.etree.SubElement(config.base_xdata.getroot(),
tag, name=name)
if attribs:
@@ -322,70 +447,130 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
def add_group(self, group_name, attribs):
"""Add group to groups.xml."""
- return self._add_xdata(self.groups_xml, "Group", group_name,
- attribs=attribs)
+ if self._use_db:
+ msg = "Metadata does not support adding groups with use_database enabled"
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+ else:
+ return self._add_xdata(self.groups_xml, "Group", group_name,
+ attribs=attribs)
def add_bundle(self, bundle_name):
"""Add bundle to groups.xml."""
- return self._add_xdata(self.groups_xml, "Bundle", bundle_name)
+ if self._use_db:
+ msg = "Metadata does not support adding bundles with use_database enabled"
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+ else:
+ return self._add_xdata(self.groups_xml, "Bundle", bundle_name)
- def add_client(self, client_name, attribs):
+ def add_client(self, client_name, attribs=None):
"""Add client to clients.xml."""
- return self._add_xdata(self.clients_xml, "Client", client_name,
- attribs=attribs, alias=True)
+ if attribs is None:
+ attribs = dict()
+ if self._use_db:
+ client = MetadataClientModel(hostname=client_name)
+ client.save()
+ self.clients = self.list_clients()
+ return client
+ else:
+ return self._add_xdata(self.clients_xml, "Client", client_name,
+ attribs=attribs, alias=True)
def _update_xdata(self, config, tag, name, attribs, alias=False):
node = self._search_xdata(tag, name, config.xdata, alias=alias)
if node == None:
self.logger.error("%s \"%s\" does not exist" % (tag, name))
- raise MetadataConsistencyError
+ raise Bcfg2.Server.Plugin.MetadataConsistencyError
xdict = config.find_xml_for_xpath('.//%s[@name="%s"]' %
(tag, node.get('name')))
if not xdict:
self.logger.error("Unexpected error finding %s \"%s\"" %
(tag, name))
- raise MetadataConsistencyError
+ raise Bcfg2.Server.Plugin.MetadataConsistencyError
for key, val in list(attribs.items()):
xdict['xquery'][0].set(key, val)
config.write_xml(xdict['filename'], xdict['xmltree'])
def update_group(self, group_name, attribs):
"""Update a groups attributes."""
- return self._update_xdata(self.groups_xml, "Group", group_name, attribs)
+ if self._use_db:
+ msg = "Metadata does not support updating groups with use_database enabled"
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+ else:
+ return self._update_xdata(self.groups_xml, "Group", group_name,
+ attribs)
def update_client(self, client_name, attribs):
"""Update a clients attributes."""
- return self._update_xdata(self.clients_xml, "Client", client_name,
- attribs, alias=True)
+ if self._use_db:
+ msg = "Metadata does not support updating clients with use_database enabled"
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+ else:
+ return self._update_xdata(self.clients_xml, "Client", client_name,
+ attribs, alias=True)
+
+ def list_clients(self):
+ """ List all clients in client database """
+ if self._use_db:
+ return set([c.hostname for c in MetadataClientModel.objects.all()])
+ else:
+ return self.clients
def _remove_xdata(self, config, tag, name, alias=False):
node = self._search_xdata(tag, name, config.xdata)
if node == None:
self.logger.error("%s \"%s\" does not exist" % (tag, name))
- raise MetadataConsistencyError
+ raise Bcfg2.Server.Plugin.MetadataConsistencyError
xdict = config.find_xml_for_xpath('.//%s[@name="%s"]' %
(tag, node.get('name')))
if not xdict:
self.logger.error("Unexpected error finding %s \"%s\"" %
(tag, name))
- raise MetadataConsistencyError
+ raise Bcfg2.Server.Plugin.MetadataConsistencyError
xdict['xquery'][0].getparent().remove(xdict['xquery'][0])
- self.groups_xml.write_xml(xdict['filename'], xdict['xmltree'])
+ config.write_xml(xdict['filename'], xdict['xmltree'])
def remove_group(self, group_name):
"""Remove a group."""
- return self._remove_xdata(self.groups_xml, "Group", group_name)
+ if self._use_db:
+ msg = "Metadata does not support removing groups with use_database enabled"
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+ else:
+ return self._remove_xdata(self.groups_xml, "Group", group_name)
def remove_bundle(self, bundle_name):
"""Remove a bundle."""
- return self._remove_xdata(self.groups_xml, "Bundle", bundle_name)
+ if self._use_db:
+ msg = "Metadata does not support removing bundles with use_database enabled"
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+ else:
+ return self._remove_xdata(self.groups_xml, "Bundle", bundle_name)
+
+ def remove_client(self, client_name):
+ """Remove a bundle."""
+ if self._use_db:
+ try:
+ client = MetadataClientModel.objects.get(hostname=client_name)
+ except MetadataClientModel.DoesNotExist:
+ msg = "Client %s does not exist" % client_name
+ self.logger.warning(msg)
+ raise Bcfg2.Server.Plugin.MetadataConsistencyError(msg)
+ client.delete()
+ self.clients = self.list_clients()
+ else:
+ return self._remove_xdata(self.clients_xml, "Client", client_name)
def _handle_clients_xml_event(self, event):
xdata = self.clients_xml.xdata
- self.clients = {}
+ self.clients = []
+ self.clientgroups = {}
self.aliases = {}
self.raliases = {}
- self.bad_clients = {}
self.secure = []
self.floating = []
self.addresses = {}
@@ -406,12 +591,15 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
'cert+password')
if 'uuid' in client.attrib:
self.uuid[client.get('uuid')] = clname
- if client.get('secure', 'false') == 'true':
+ if client.get('secure', 'false').lower() == 'true':
self.secure.append(clname)
- if client.get('location', 'fixed') == 'floating':
+ if (client.get('location', 'fixed') == 'floating' or
+ client.get('floating', 'false').lower() == 'true'):
self.floating.append(clname)
if 'password' in client.attrib:
self.passwords[clname] = client.get('password')
+ if 'version' in client.attrib:
+ self.versions[clname] = client.get('version')
self.raliases[clname] = set()
for alias in client.findall('Alias'):
@@ -426,115 +614,199 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
if clname not in self.raddresses:
self.raddresses[clname] = set()
self.raddresses[clname].add(alias.get('address'))
- self.clients.update({clname: client.get('profile')})
+ self.clients.append(clname)
+ try:
+ self.clientgroups[clname].append(client.get('profile'))
+ except KeyError:
+ self.clientgroups[clname] = [client.get('profile')]
self.states['clients.xml'] = True
+ if self._use_db:
+ self.clients = self.list_clients()
def _handle_groups_xml_event(self, event):
- xdata = self.groups_xml.xdata
- self.public = []
- self.private = []
- self.profiles = []
self.groups = {}
- grouptmp = {}
- self.categories = {}
- groupseen = list()
- for group in xdata.xpath('//Groups/Group'):
- if group.get('name') not in groupseen:
- groupseen.append(group.get('name'))
+
+ # get_condition and aggregate_conditions must be separate
+ # functions in order to ensure that the scope is right for the
+ # closures they return
+ def get_condition(element):
+ negate = element.get('negate', 'false').lower() == 'true'
+ pname = element.get("name")
+ if element.tag == 'Group':
+ return lambda c, g, _: negate != (pname in g)
+ elif element.tag == 'Client':
+ return lambda c, g, _: negate != (pname == c)
+
+ def aggregate_conditions(conditions):
+ return lambda client, groups, cats: \
+ all(cond(client, groups, cats) for cond in conditions)
+
+ # first, we get a list of all of the groups declared in the
+ # file. we do this in two stages because the old way of
+ # parsing groups.xml didn't support nested groups; in the old
+ # way, only Group tags under a Groups tag counted as
+ # declarative. so we parse those first, and then parse the
+ # other Group tags if they haven't already been declared.
+ # this lets you set options on a group (e.g., public="false")
+ # at the top level and then just use the name elsewhere, which
+ # is the original behavior
+ for grp in self.groups_xml.xdata.xpath("//Groups/Group") + \
+ self.groups_xml.xdata.xpath("//Groups/Group//Group"):
+ if grp.get("name") in self.groups:
+ continue
+ self.groups[grp.get("name")] = \
+ MetadataGroup(grp.get("name"),
+ bundles=[b.get("name")
+ for b in grp.findall("Bundle")],
+ category=grp.get("category"),
+ is_profile=grp.get("profile", "false") == "true",
+ is_public=grp.get("public", "false") == "true",
+ is_private=grp.get("public", "true") == "false")
+ if grp.get('default', 'false') == 'true':
+ self.default = grp.get('name')
+
+ self.group_membership = dict()
+ self.negated_groups = dict()
+ self.options = dict()
+ # confusing loop condition; the XPath query asks for all
+ # elements under a Group tag under a Groups tag; that is
+ # infinitely recursive, so "all" elements really means _all_
+ # elements. We then manually filter out non-Group elements
+ # since there doesn't seem to be a way to get Group elements
+ # of arbitrary depth with particular ultimate ancestors in
+ # XPath. We do the same thing for Client tags.
+ for el in self.groups_xml.xdata.xpath("//Groups/Group//*") + \
+ self.groups_xml.xdata.xpath("//Groups/Client//*"):
+ if ((el.tag != 'Group' and el.tag != 'Client') or
+ el.getchildren()):
+ continue
+
+ conditions = []
+ for parent in el.iterancestors():
+ cond = get_condition(parent)
+ if cond:
+ conditions.append(cond)
+
+ gname = el.get("name")
+ if el.get("negate", "false").lower() == "true":
+ self.negated_groups[aggregate_conditions(conditions)] = \
+ self.groups[gname]
else:
- self.logger.error("Metadata: Group %s defined multiply" %
- group.get('name'))
- grouptmp[group.get('name')] = \
- ([item.get('name') for item in group.findall('./Bundle')],
- [item.get('name') for item in group.findall('./Group')])
- grouptmp[group.get('name')][1].append(group.get('name'))
- if group.get('default', 'false') == 'true':
- self.default = group.get('name')
- if group.get('profile', 'false') == 'true':
- self.profiles.append(group.get('name'))
- if group.get('public', 'false') == 'true':
- self.public.append(group.get('name'))
- elif group.get('public', 'true') == 'false':
- self.private.append(group.get('name'))
- if 'category' in group.attrib:
- self.categories[group.get('name')] = group.get('category')
-
- for group in grouptmp:
- # self.groups[group] => (bundles, groups, categories)
- self.groups[group] = (set(), set(), {})
- tocheck = [group]
- group_cat = self.groups[group][2]
- while tocheck:
- now = tocheck.pop()
- self.groups[group][1].add(now)
- if now in grouptmp:
- (bundles, groups) = grouptmp[now]
- for ggg in groups:
- if ggg in self.groups[group][1]:
- continue
- if (ggg not in self.categories or \
- self.categories[ggg] not in self.groups[group][2]):
- self.groups[group][1].add(ggg)
- tocheck.append(ggg)
- if ggg in self.categories:
- group_cat[self.categories[ggg]] = ggg
- elif ggg in self.categories:
- self.logger.info("Group %s: %s cat-suppressed %s" % \
- (group,
- group_cat[self.categories[ggg]],
- ggg))
- [self.groups[group][0].add(bund) for bund in bundles]
+ if self.groups[gname].category and gname in self.groups:
+ category = self.groups[gname].category
+
+ def in_cat(client, groups, categories):
+ if category in categories:
+ # this is debug, not warning, because it
+ # gets called a _lot_ -- every time a
+ # group in a category is processed for
+ # every creation of client metadata. this
+ # message is produced in two other places,
+ # so the user should get warned by one of
+ # those.
+ self.logger.debug("%s: Group %s suppressed by "
+ "category %s; %s already a "
+ "member of %s" %
+ (self.name, gname, category,
+ client, categories[category]))
+ return False
+ return True
+ conditions.append(in_cat)
+
+ self.group_membership[aggregate_conditions(conditions)] = \
+ self.groups[gname]
self.states['groups.xml'] = True
def HandleEvent(self, event):
"""Handle update events for data files."""
- if self.clients_xml.HandleEvent(event):
- self._handle_clients_xml_event(event)
- elif self.groups_xml.HandleEvent(event):
- self._handle_groups_xml_event(event)
-
- if False not in list(self.states.values()):
- # check that all client groups are real and complete
- real = list(self.groups.keys())
- for client in list(self.clients.keys()):
- if self.clients[client] not in self.profiles:
- self.logger.error("Client %s set as nonexistent or "
- "incomplete group %s" %
- (client, self.clients[client]))
- self.logger.error("Removing client mapping for %s" % client)
- self.bad_clients[client] = self.clients[client]
- del self.clients[client]
- for bclient in list(self.bad_clients.keys()):
- if self.bad_clients[bclient] in self.profiles:
- self.logger.info("Restored profile mapping for client %s" %
- bclient)
- self.clients[bclient] = self.bad_clients[bclient]
- del self.bad_clients[bclient]
-
- def set_profile(self, client, profile, addresspair):
+ for hdlr in self.handlers:
+ aname = re.sub(r'[^A-z0-9_]', '_', os.path.basename(event.filename))
+ if hdlr(event):
+ try:
+ proc = getattr(self, "_handle_%s_event" % aname)
+ except AttributeError:
+ proc = self._handle_default_event
+ proc(event)
+
+ if False not in list(self.states.values()) and self.debug_flag:
+ # check that all groups are real and complete. this is
+ # just logged at a debug level because many groups might
+ # be probed, and we don't want to warn about them.
+ for client, groups in list(self.clientgroups.items()):
+ for group in groups:
+ if group not in self.groups:
+ self.debug_log("Client %s set as nonexistent group %s" %
+ (client, group))
+ for gname, ginfo in list(self.groups.items()):
+ for group in ginfo.groups:
+ if group not in self.groups:
+ self.debug_log("Group %s set as nonexistent group %s" %
+ (gname, group))
+
+
+ def set_profile(self, client, profile, addresspair, force=False):
"""Set group parameter for provided client."""
- self.logger.info("Asserting client %s profile to %s" % (client, profile))
+ self.logger.info("Asserting client %s profile to %s" %
+ (client, profile))
if False in list(self.states.values()):
- raise MetadataRuntimeError
- if profile not in self.public:
- self.logger.error("Failed to set client %s to private group %s" %
- (client, profile))
- raise MetadataConsistencyError
+ raise Bcfg2.Server.Plugin.MetadataRuntimeError("Metadata has not been read yet")
+ if not force and profile not in self.groups:
+ msg = "Profile group %s does not exist" % profile
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.MetadataConsistencyError(msg)
+ group = self.groups[profile]
+ if not force and not group.is_public:
+ msg = "Cannot set client %s to private group %s" % (client, profile)
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.MetadataConsistencyError(msg)
+
if client in self.clients:
- self.logger.info("Changing %s group from %s to %s" %
- (client, self.clients[client], profile))
+ if self._use_db:
+ msg = "DBMetadata does not support asserting client profiles"
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+
+ profiles = [g for g in self.clientgroups[client]
+ if g in self.groups and self.groups[g].is_profile]
+ self.logger.info("Changing %s profile from %s to %s" %
+ (client, profiles, profile))
self.update_client(client, dict(profile=profile))
+ if client in self.clientgroups:
+ for p in profiles:
+ self.clientgroups[client].remove(p)
+ self.clientgroups[client].append(profile)
+ else:
+ self.clientgroups[client] = [profile]
else:
self.logger.info("Creating new client: %s, profile %s" %
(client, profile))
- if addresspair in self.session_cache:
- # we are working with a uuid'd client
- self.add_client(self.session_cache[addresspair][1],
- dict(uuid=client, profile=profile,
- address=addresspair[0]))
+ if self._use_db:
+ self.add_client(client)
else:
- self.add_client(client, dict(profile=profile))
- self.clients[client] = profile
+ if addresspair in self.session_cache:
+ # we are working with a uuid'd client
+ self.add_client(self.session_cache[addresspair][1],
+ dict(uuid=client, profile=profile,
+ address=addresspair[0]))
+ else:
+ self.add_client(client, dict(profile=profile))
+ self.clients.append(client)
+ self.clientgroups[client] = [profile]
+ if not self._use_db:
+ self.clients_xml.write()
+
+ def set_version(self, client, version):
+ """Set group parameter for provided client."""
+ self.logger.info("Setting client %s version to %s" % (client, version))
+ if client in self.clients:
+ self.logger.info("Setting version on client %s to %s" %
+ (client, version))
+ self.update_client(client, dict(version=version))
+ else:
+ msg = "Cannot set version on non-existent client %s" % client
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.MetadataConsistencyError(msg)
+ self.versions[client] = version
self.clients_xml.write()
def resolve_client(self, addresspair, cleanup_cache=False):
@@ -549,7 +821,7 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
# _any_ port numbers - perhaps a priority queue could
# be faster?
curtime = time.time()
- for addrpair in self.session_cache.keys():
+ for addrpair in list(self.session_cache.keys()):
if addresspair[0] == addrpair[0]:
(stamp, _) = self.session_cache[addrpair]
if curtime - stamp > cache_ttl:
@@ -565,9 +837,9 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
address = addresspair[0]
if address in self.addresses:
if len(self.addresses[address]) != 1:
- self.logger.error("Address %s has multiple reverse assignments; "
- "a uuid must be used" % (address))
- raise MetadataConsistencyError
+ err = "Address %s has multiple reverse assignments; a uuid must be used" % address
+ self.logger.error(err)
+ raise Bcfg2.Server.Plugin.MetadataConsistencyError(err)
return self.addresses[address][0]
try:
cname = socket.gethostbyaddr(address)[0].lower()
@@ -575,34 +847,102 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
return self.aliases[cname]
return cname
except socket.herror:
- warning = "address resolution error for %s" % (address)
+ warning = "address resolution error for %s" % address
self.logger.warning(warning)
- raise MetadataConsistencyError
+ raise Bcfg2.Server.Plugin.MetadataConsistencyError(warning)
+
+ def _merge_groups(self, client, groups, categories=None):
+ """ set group membership based on the contents of groups.xml
+ and initial group membership of this client. Returns a tuple
+ of (allgroups, categories)"""
+ numgroups = -1 # force one initial pass
+ if categories is None:
+ categories = dict()
+ while numgroups != len(groups):
+ numgroups = len(groups)
+ for predicate, group in self.group_membership.items():
+ if group.name in groups:
+ continue
+ if predicate(client, groups, categories):
+ groups.add(group.name)
+ if group.category:
+ categories[group.category] = group.name
+ for predicate, group in self.negated_groups.items():
+ if group.name not in groups:
+ continue
+ if predicate(client, groups, categories):
+ groups.remove(group.name)
+ if group.category:
+ del categories[group.category]
+ return (groups, categories)
def get_initial_metadata(self, client):
"""Return the metadata for a given client."""
if False in list(self.states.values()):
- raise MetadataRuntimeError
+ raise Bcfg2.Server.Plugin.MetadataRuntimeError("Metadata has not been read yet")
client = client.lower()
if client in self.aliases:
client = self.aliases[client]
- if client in self.clients:
- profile = self.clients[client]
- (bundles, groups, categories) = self.groups[profile]
- else:
- if self.default == None:
- self.logger.error("Cannot set group for client %s; "
- "no default group set" % client)
- raise MetadataConsistencyError
- self.set_profile(client, self.default, (None, None))
- profile = self.default
- [bundles, groups, categories] = self.groups[self.default]
+
+ groups = set()
+ categories = dict()
+ profile = None
+
+ if client not in self.clients:
+ pgroup = None
+ if client in self.clientgroups:
+ pgroup = self.clientgroups[client][0]
+ elif self.default:
+ pgroup = self.default
+
+ if pgroup:
+ self.set_profile(client, pgroup, (None, None), force=True)
+ groups.add(pgroup)
+ category = self.groups[pgroup].category
+ if category:
+ categories[category] = pgroup
+ if (pgroup in self.groups and self.groups[pgroup].is_profile):
+ profile = pgroup
+ else:
+ msg = "Cannot add new client %s; no default group set" % client
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.MetadataConsistencyError(msg)
+
+ if client in self.clientgroups:
+ for cgroup in self.clientgroups[client]:
+ if cgroup in groups:
+ continue
+ if cgroup not in self.groups:
+ self.groups[cgroup] = MetadataGroup(cgroup)
+ category = self.groups[cgroup].category
+ if category and category in categories:
+ self.logger.warning("%s: Group %s suppressed by "
+ "category %s; %s already a member "
+ "of %s" %
+ (self.name, cgroup, category,
+ client, categories[category]))
+ continue
+ if category:
+ categories[category] = cgroup
+ groups.add(cgroup)
+ # favor client groups for setting profile
+ if not profile and self.groups[cgroup].is_profile:
+ profile = cgroup
+
+ groups, categories = self._merge_groups(client, groups,
+ categories=categories)
+
+ bundles = set()
+ for group in groups:
+ try:
+ bundles.update(self.groups[group].bundles)
+ except KeyError:
+ self.logger.warning("%s: %s is a member of undefined group %s" %
+ (self.name, client, group))
+
aliases = self.raliases.get(client, set())
addresses = self.raddresses.get(client, set())
- newgroups = set(groups)
- newbundles = set(bundles)
- newcategories = {}
- newcategories.update(categories)
+ version = self.versions.get(client, None)
if client in self.passwords:
password = self.passwords[client]
else:
@@ -613,61 +953,70 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
uuid = uuids[0]
else:
uuid = None
- for group in self.cgroups.get(client, []):
- if group in self.groups:
- nbundles, ngroups, ncategories = self.groups[group]
- else:
- nbundles, ngroups, ncategories = ([], [group], {})
- [newbundles.add(b) for b in nbundles if b not in newbundles]
- [newgroups.add(g) for g in ngroups if g not in newgroups]
- newcategories.update(ncategories)
- return ClientMetadata(client, profile, newgroups, newbundles, aliases,
- addresses, newcategories, uuid, password,
+ if not profile:
+ # one last ditch attempt at setting the profile
+ profiles = [g for g in groups
+ if g in self.groups and self.groups[g].is_profile]
+ if len(profiles) >= 1:
+ profile = profiles[0]
+
+ return ClientMetadata(client, profile, groups, bundles, aliases,
+ addresses, categories, uuid, password, version,
self.query)
def get_all_group_names(self):
all_groups = set()
- [all_groups.update(g[1]) for g in list(self.groups.values())]
+ all_groups.update(self.groups.keys())
+ all_groups.update([g.name for g in self.group_membership.values()])
+ all_groups.update([g.name for g in self.negated_groups.values()])
+ for grp in self.clientgroups.values():
+ all_groups.update(grp)
return all_groups
def get_all_groups_in_category(self, category):
- all_groups = set()
- [all_groups.add(g) for g in self.categories \
- if self.categories[g] == category]
- return all_groups
+ return set([g.name for g in self.groups.values()
+ if g.category == category])
def get_client_names_by_profiles(self, profiles):
- return [client for client, profile in list(self.clients.items()) \
- if profile in profiles]
+ rv = []
+ for client in list(self.clients):
+ mdata = self.get_initial_metadata(client)
+ if mdata.profile in profiles:
+ rv.append(client)
+ return rv
def get_client_names_by_groups(self, groups):
mdata = [self.core.build_metadata(client)
- for client in list(self.clients.keys())]
+ for client in list(self.clients)]
return [md.hostname for md in mdata if md.groups.issuperset(groups)]
+ def get_client_names_by_bundles(self, bundles):
+ mdata = [self.core.build_metadata(client)
+ for client in list(self.clients.keys())]
+ return [md.hostname for md in mdata if md.bundles.issuperset(bundles)]
+
def merge_additional_groups(self, imd, groups):
for group in groups:
- if (group in self.categories and
- self.categories[group] in imd.categories):
+ if group in imd.groups:
continue
- newbundles, newgroups, _ = self.groups.get(group,
- (list(),
- [group],
- dict()))
- for newbundle in newbundles:
- if newbundle not in imd.bundles:
- imd.bundles.add(newbundle)
- for newgroup in newgroups:
- if newgroup not in imd.groups:
- if (newgroup in self.categories and
- self.categories[newgroup] in imd.categories):
- continue
- if newgroup in self.private:
- self.logger.error("Refusing to add dynamic membership "
- "in private group %s for client %s" %
- (newgroup, imd.hostname))
- continue
- imd.groups.add(newgroup)
+ if group in self.groups and self.groups[group].category:
+ category = self.groups[group].category
+ if self.groups[group].category in imd.categories:
+ self.logger.warning("%s: Group %s suppressed by category "
+ "%s; %s already a member of %s" %
+ (self.name, group, category,
+ imd.hostname,
+ imd.categories[category]))
+ continue
+ imd.categories[group] = category
+ imd.groups.add(group)
+
+ self._merge_groups(imd.hostname, imd.groups,
+ categories=imd.categories)
+
+ for group in imd.groups:
+ if group in self.groups:
+ imd.bundles.update(self.groups[group].bundles)
def merge_additional_data(self, imd, source, data):
if not hasattr(imd, source):
@@ -686,8 +1035,8 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
(client, address))
return True
else:
- self.logger.error("Got request for non-float client %s from %s" %
- (client, address))
+ self.logger.error("Got request for non-float client %s from %s"
+ % (client, address))
return False
resolved = self.resolve_client(addresspair)
if resolved.lower() == client.lower():
@@ -711,9 +1060,10 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
id_method = 'address'
try:
client = self.resolve_client(address)
- except MetadataConsistencyError:
- self.logger.error("Client %s failed to resolve; metadata problem"
- % address[0])
+ except Bcfg2.Server.Plugin.MetadataConsistencyError:
+ err = sys.exc_info()[1]
+ self.logger.error("Client %s failed to resolve: %s" %
+ (address[0], err))
return False
else:
id_method = 'uuid'
@@ -768,7 +1118,7 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
"secure mode" % address[0])
return False
# populate the session cache
- if user.decode('utf-8') != 'root':
+ if user != 'root':
self.session_cache[address] = (time.time(), client)
return True
@@ -792,7 +1142,8 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
def include_group(group):
return not only_client or group in clientmeta.groups
- groups_tree = lxml.etree.parse(os.path.join(self.data, "groups.xml"))
+ groups_tree = lxml.etree.parse(os.path.join(self.data, "groups.xml"),
+ parser=Bcfg2.Server.XMLParser)
try:
groups_tree.xinclude()
except lxml.etree.XIncludeError:
@@ -810,20 +1161,26 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
del categories[None]
if hosts:
instances = {}
- clients = self.clients
- for client, profile in list(clients.items()):
+ for client in list(self.clients):
if include_client(client):
continue
- if profile in instances:
- instances[profile].append(client)
+ if client in self.clientgroups:
+ groups = self.clientgroups[client]
+ elif self.default:
+ groups = [self.default]
else:
- instances[profile] = [client]
- for profile, clist in list(instances.items()):
+ continue
+ for group in groups:
+ try:
+ instances[group].append(client)
+ except KeyError:
+ instances[group] = [client]
+ for group, clist in list(instances.items()):
clist.sort()
viz_str.append('"%s-instances" [ label="%s", shape="record" ];' %
- (profile, '|'.join(clist)))
+ (group, '|'.join(clist)))
viz_str.append('"%s-instances" -> "group-%s";' %
- (profile, profile))
+ (group, group))
if bundles:
bundles = []
[bundles.append(bund.get('name')) \
@@ -864,3 +1221,35 @@ class Metadata(Bcfg2.Server.Plugin.Plugin,
viz_str.append('"%s" [label="%s", shape="record", style="filled", fillcolor="%s"];' %
(category, category, categories[category]))
return "\n".join("\t" + s for s in viz_str)
+
+
+class MetadataLint(Bcfg2.Server.Lint.ServerPlugin):
+ def Run(self):
+ self.nested_clients()
+ self.deprecated_options()
+
+ @classmethod
+ def Errors(cls):
+ return {"nested-client-tags": "warning",
+ "deprecated-clients-options": "warning"}
+
+ def deprecated_options(self):
+ clientdata = self.metadata.clients_xml.xdata
+ for el in groupdata.xpath("//Client"):
+ loc = el.get("location")
+ if loc:
+ if loc == "floating":
+ floating = True
+ else:
+ floating = False
+ self.LintError("deprecated-clients-options",
+ "The location='%s' option is deprecated. "
+ "Please use floating='%s' instead: %s" %
+ (loc, floating, self.RenderXML(el)))
+
+ def nested_clients(self):
+ groupdata = self.metadata.groups_xml.xdata
+ for el in groupdata.xpath("//Client//Client"):
+ self.LintError("nested-client-tags",
+ "Client %s nested within Client tag: %s" %
+ (el.get("name"), self.RenderXML(el)))
diff --git a/src/lib/Bcfg2/Server/Plugins/NagiosGen.py b/src/lib/Bcfg2/Server/Plugins/NagiosGen.py
index 4dbd57d16..f2b8336e0 100644
--- a/src/lib/Bcfg2/Server/Plugins/NagiosGen.py
+++ b/src/lib/Bcfg2/Server/Plugins/NagiosGen.py
@@ -7,18 +7,23 @@ import glob
import socket
import logging
import lxml.etree
-
+import Bcfg2.Server
import Bcfg2.Server.Plugin
LOGGER = logging.getLogger('Bcfg2.Plugins.NagiosGen')
line_fmt = '\t%-32s %s'
-class NagiosGenConfig(Bcfg2.Server.Plugin.SingleXMLFileBacked,
- Bcfg2.Server.Plugin.StructFile):
+class NagiosGenConfig(Bcfg2.Server.Plugin.StructFile):
def __init__(self, filename, fam):
- Bcfg2.Server.Plugin.SingleXMLFileBacked.__init__(self, filename, fam)
- Bcfg2.Server.Plugin.StructFile.__init__(self, filename)
+ # create config.xml if missing
+ if not os.path.exists(filename):
+ LOGGER.warning("NagiosGen: %s missing. "
+ "Creating empty one for you." % filename)
+ open(filename, "w").write("<NagiosGen></NagiosGen>")
+
+ Bcfg2.Server.Plugin.StructFile.__init__(self, filename, fam=fam,
+ should_monitor=True)
class NagiosGen(Bcfg2.Server.Plugin.Plugin,
@@ -51,7 +56,12 @@ class NagiosGen(Bcfg2.Server.Plugin.Plugin,
def createhostconfig(self, entry, metadata):
"""Build host specific configuration file."""
- host_address = socket.gethostbyname(metadata.hostname)
+ try:
+ host_address = socket.gethostbyname(metadata.hostname)
+ except socket.gaierror:
+ LOGGER.error("Failed to find IP address for %s" %
+ metadata.hostname)
+ raise Bcfg2.Server.Plugin.PluginExecutionError
host_groups = [grp for grp in metadata.groups
if os.path.isfile('%s/%s-group.cfg' % (self.data, grp))]
host_config = ['define host {',
@@ -84,7 +94,8 @@ class NagiosGen(Bcfg2.Server.Plugin.Plugin,
LOGGER.warn("Parsing deprecated NagiosGen/parents.xml. "
"Update to the new-style config with "
"nagiosgen-convert.py.")
- parents = lxml.etree.parse(pfile)
+ parents = lxml.etree.parse(pfile,
+ parser=Bcfg2.Server.XMLParser)
for el in parents.xpath("//Depend[@name='%s']" % metadata.hostname):
if 'parent' in xtra:
xtra['parent'] += "," + el.get("on")
diff --git a/src/lib/Bcfg2/Server/Plugins/Ohai.py b/src/lib/Bcfg2/Server/Plugins/Ohai.py
index 5fff20d98..20f9ba877 100644
--- a/src/lib/Bcfg2/Server/Plugins/Ohai.py
+++ b/src/lib/Bcfg2/Server/Plugins/Ohai.py
@@ -41,7 +41,7 @@ class OhaiCache(object):
# simply return if the client returned nothing
return
self.cache[item] = json.loads(value)
- file("%s/%s.json" % (self.dirname, item), 'w').write(value)
+ open("%s/%s.json" % (self.dirname, item), 'w').write(value)
def __getitem__(self, item):
if item not in self.cache:
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py b/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py
index 49e9d417b..685cd5c1d 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py
@@ -2,13 +2,15 @@ import re
import gzip
from Bcfg2.Server.Plugins.Packages.Collection import Collection
from Bcfg2.Server.Plugins.Packages.Source import Source
-from Bcfg2.Bcfg2Py3k import cPickle, file
+from Bcfg2.Bcfg2Py3k import cPickle
class AptCollection(Collection):
def get_group(self, group):
- self.logger.warning("Packages: Package groups are not supported by APT")
+ self.logger.warning("Packages: Package groups are not "
+ "supported by APT")
return []
+
class AptSource(Source):
basegroups = ['apt', 'debian', 'ubuntu', 'nexenta']
ptype = 'deb'
@@ -22,14 +24,15 @@ class AptSource(Source):
'components': self.components, 'arches': self.arches}]
def save_state(self):
- cache = file(self.cachefile, 'wb')
- cPickle.dump((self.pkgnames, self.deps, self.provides),
- cache, 2)
+ cache = open(self.cachefile, 'wb')
+ cPickle.dump((self.pkgnames, self.deps, self.provides,
+ self.essentialpkgs), cache, 2)
cache.close()
def load_state(self):
- data = file(self.cachefile)
- self.pkgnames, self.deps, self.provides = cPickle.load(data)
+ data = open(self.cachefile)
+ (self.pkgnames, self.deps, self.provides,
+ self.essentialpkgs) = cPickle.load(data)
def filter_unknown(self, unknown):
filtered = set([u for u in unknown if u.startswith('choice')])
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Collection.py b/src/lib/Bcfg2/Server/Plugins/Packages/Collection.py
index 3ea14ce75..b05a69d4a 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/Collection.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Collection.py
@@ -1,6 +1,7 @@
import sys
import copy
import logging
+import lxml
import Bcfg2.Server.Plugin
logger = logging.getLogger(__name__)
@@ -52,13 +53,40 @@ class Collection(Bcfg2.Server.Plugin.Debuggable):
@property
def cachekey(self):
- return md5(self.get_config()).hexdigest()
+ return md5(self.sourcelist()).hexdigest()
def get_config(self):
- self.logger.error("Packages: Cannot generate config for host with "
- "multiple source types (%s)" % self.metadata.hostname)
+ self.logger.error("Packages: Cannot generate config for host %s with "
+ "no sources or multiple source types" %
+ self.metadata.hostname)
return ""
+ def sourcelist(self):
+ srcs = []
+ for source in self.sources:
+ # get_urls() loads url_map as a side-effect
+ source.get_urls()
+ for url_map in source.url_map:
+ if url_map['arch'] not in metadata.groups:
+ continue
+ reponame = source.get_repo_name(url_map)
+ srcs.append("Name: %s" % reponame)
+ srcs.append(" Type: %s" % source.ptype)
+ if url_map['url']:
+ srcs.append(" URL: %s" % url_map['url'])
+ elif url_map['rawurl']:
+ srcs.append(" RAWURL: %s" % url_map['rawurl'])
+ if source.gpgkeys:
+ srcs.append(" GPG Key(s): %s" % ", ".join(source.gpgkeys))
+ else:
+ srcs.append(" GPG Key(s): None")
+ if len(source.blacklist):
+ srcs.append(" Blacklist: %s" % ", ".join(source.blacklist))
+ if len(source.whitelist):
+ srcs.append(" Whitelist: %s" % ", ".join(source.whitelist))
+ srcs.append("")
+ return "\n".join(srcs)
+
def get_relevant_groups(self):
groups = []
for source in self.sources:
@@ -79,6 +107,14 @@ class Collection(Bcfg2.Server.Plugin.Debuggable):
cachefiles.add(source.cachefile)
return list(cachefiles)
+ def get_groups(self, grouplist):
+ """ provided since some backends may be able to query multiple
+ groups at once faster than serially """
+ rv = dict()
+ for group, ptype in grouplist:
+ rv[group] = self.get_group(group, ptype)
+ return rv
+
def get_group(self, group, ptype=None):
for source in self.sources:
pkgs = source.get_group(self.metadata, group, ptype=ptype)
@@ -152,6 +188,28 @@ class Collection(Bcfg2.Server.Plugin.Debuggable):
""" do any collection-level data setup tasks """
pass
+ def packages_from_entry(self, entry):
+ """ given a Package or BoundPackage entry, get a list of the
+ package(s) described by it in a format appropriate for passing
+ to complete(). by default, that's just the name; only the Yum
+ backend supports getting versions"""
+ return [entry.get("name")]
+
+ def packages_to_entry(self, pkglist, entry):
+ for pkg in pkglist:
+ lxml.etree.SubElement(entry, 'BoundPackage', name=pkg,
+ version=self.setup.cfp.get("packages",
+ "version",
+ default="auto"),
+ type=self.ptype, origin='Packages')
+
+ def get_new_packages(self, initial, complete):
+ """ compute the difference between the complete package list
+ and the initial package list. this is necessary because the
+ format may be different between the two lists due to
+ packages_{to,from}_entry() """
+ return list(complete.difference(initial))
+
def complete(self, packagelist):
'''Build the transitive closure of all package dependencies
@@ -350,15 +408,7 @@ def factory(metadata, sources, basepath, debug=False):
",".join([s.__name__ for s in sclasses]))
cclass = Collection
elif len(sclasses) == 0:
- # you'd think this should be a warning, but it happens all the
- # freaking time if you have a) machines in your clients.xml
- # that do not have the proper groups set up yet (e.g., if you
- # have multiple Bcfg2 servers and Packages-relevant groups set
- # by probes); and b) templates that query all or multiple
- # machines (e.g., with metadata.query.all_clients())
- if debug:
- logger.error("Packages: No sources found for %s" %
- metadata.hostname)
+ logger.error("Packages: No sources found for %s" % metadata.hostname)
cclass = Collection
else:
cclass = get_collection_class(sclasses.pop().__name__.replace("Source",
@@ -373,4 +423,3 @@ def factory(metadata, sources, basepath, debug=False):
clients[metadata.hostname] = ckey
collections[ckey] = collection
return collection
-
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Pac.py b/src/lib/Bcfg2/Server/Plugins/Packages/Pac.py
index 99a090739..34c7b42c1 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/Pac.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Pac.py
@@ -1,6 +1,6 @@
import gzip
import tarfile
-from Bcfg2.Bcfg2Py3k import cPickle, file
+from Bcfg2.Bcfg2Py3k import cPickle
from Bcfg2.Server.Plugins.Packages.Collection import Collection
from Bcfg2.Server.Plugins.Packages.Source import Source
@@ -9,6 +9,7 @@ class PacCollection(Collection):
self.logger.warning("Packages: Package groups are not supported by Pacman")
return []
+
class PacSource(Source):
basegroups = ['arch', 'parabola']
ptype = 'pacman'
@@ -22,13 +23,13 @@ class PacSource(Source):
'components': self.components, 'arches': self.arches}]
def save_state(self):
- cache = file(self.cachefile, 'wb')
+ cache = open(self.cachefile, 'wb')
cPickle.dump((self.pkgnames, self.deps, self.provides),
cache, 2)
cache.close()
def load_state(self):
- data = file(self.cachefile)
+ data = open(self.cachefile)
self.pkgnames, self.deps, self.provides = cPickle.load(data)
def filter_unknown(self, unknown):
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py b/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py
index 7796b9e34..0d565be31 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py
@@ -4,17 +4,15 @@ import lxml.etree
import Bcfg2.Server.Plugin
from Bcfg2.Server.Plugins.Packages.Source import SourceInitError
-class PackagesSources(Bcfg2.Server.Plugin.SingleXMLFileBacked,
- Bcfg2.Server.Plugin.StructFile,
+class PackagesSources(Bcfg2.Server.Plugin.StructFile,
Bcfg2.Server.Plugin.Debuggable):
__identifier__ = None
def __init__(self, filename, cachepath, fam, packages, setup):
Bcfg2.Server.Plugin.Debuggable.__init__(self)
try:
- Bcfg2.Server.Plugin.SingleXMLFileBacked.__init__(self,
- filename,
- fam)
+ Bcfg2.Server.Plugin.StructFile.__init__(self, filename, fam=fam,
+ should_monitor=True)
except OSError:
err = sys.exc_info()[1]
msg = "Packages: Failed to read configuration file: %s" % err
@@ -22,7 +20,6 @@ class PackagesSources(Bcfg2.Server.Plugin.SingleXMLFileBacked,
msg += " Have you created it?"
self.logger.error(msg)
raise Bcfg2.Server.Plugin.PluginInitError(msg)
- Bcfg2.Server.Plugin.StructFile.__init__(self, filename)
self.cachepath = cachepath
self.setup = setup
if not os.path.exists(self.cachepath):
@@ -42,18 +39,11 @@ class PackagesSources(Bcfg2.Server.Plugin.SingleXMLFileBacked,
source.toggle_debug()
def HandleEvent(self, event=None):
- Bcfg2.Server.Plugin.SingleXMLFileBacked.HandleEvent(self, event=event)
+ Bcfg2.Server.Plugin.XMLFileBacked.HandleEvent(self, event=event)
if event and event.filename != self.name:
- for fname in self.extras:
- fpath = None
- if fname.startswith("/"):
- fpath = os.path.abspath(fname)
- else:
- fpath = \
- os.path.abspath(os.path.join(os.path.dirname(self.name),
- fname))
+ for fpath in self.extras:
if fpath == os.path.abspath(event.filename):
- self.parsed.add(fname)
+ self.parsed.add(fpath)
break
if self.loaded:
@@ -65,7 +55,7 @@ class PackagesSources(Bcfg2.Server.Plugin.SingleXMLFileBacked,
return sorted(list(self.parsed)) == sorted(self.extras)
def Index(self):
- Bcfg2.Server.Plugin.SingleXMLFileBacked.Index(self)
+ Bcfg2.Server.Plugin.XMLFileBacked.Index(self)
self.entries = []
for xsource in self.xdata.findall('.//Source'):
source = self.source_from_xml(xsource)
@@ -87,7 +77,8 @@ class PackagesSources(Bcfg2.Server.Plugin.SingleXMLFileBacked,
stype.title())
cls = getattr(module, "%sSource" % stype.title())
except (ImportError, AttributeError):
- self.logger.error("Packages: Unknown source type %s" % stype)
+ ex = sys.exc_info()[1]
+ self.logger.error("Packages: Unknown source type %s (%s)" % (stype, ex))
return None
try:
@@ -106,4 +97,7 @@ class PackagesSources(Bcfg2.Server.Plugin.SingleXMLFileBacked,
return "PackagesSources: %s" % repr(self.entries)
def __str__(self):
- return "PackagesSources: %s" % str(self.entries)
+ return "PackagesSources: %s sources" % len(self.entries)
+
+ def __len__(self):
+ return len(self.entries)
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Source.py b/src/lib/Bcfg2/Server/Plugins/Packages/Source.py
index edcdcd9f2..df3706fb1 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/Source.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Source.py
@@ -1,11 +1,10 @@
import os
import re
import sys
-import base64
import Bcfg2.Server.Plugin
from Bcfg2.Bcfg2Py3k import HTTPError, HTTPBasicAuthHandler, \
HTTPPasswordMgrWithDefaultRealm, install_opener, build_opener, \
- urlopen, file, cPickle
+ urlopen, cPickle
try:
from hashlib import md5
@@ -51,7 +50,18 @@ class Source(Bcfg2.Server.Plugin.Debuggable):
for key, tag in [('components', 'Component'), ('arches', 'Arch'),
('blacklist', 'Blacklist'),
('whitelist', 'Whitelist')]:
- self.__dict__[key] = [item.text for item in xsource.findall(tag)]
+ setattr(self, key, [item.text for item in xsource.findall(tag)])
+ self.server_options = dict()
+ self.client_options = dict()
+ opts = xsource.findall("Options")
+ for el in opts:
+ repoopts = dict([(k, v)
+ for k, v in el.attrib.items()
+ if k != "clientonly" and k != "serveronly"])
+ if el.get("clientonly", "false").lower() == "false":
+ self.server_options.update(repoopts)
+ if el.get("serveronly", "false").lower() == "false":
+ self.client_options.update(repoopts)
self.gpgkeys = [el.text for el in xsource.findall("GPGKey")]
@@ -137,9 +147,8 @@ class Source(Bcfg2.Server.Plugin.Debuggable):
def get_repo_name(self, url_map):
# try to find a sensible name for a repo
- if 'components' in url_map and url_map['components']:
- # use the first component as the name
- rname = url_map['components'][0]
+ if 'component' in url_map and url_map['component']:
+ rname = url_map['component']
else:
name = None
for repo_re in (self.mrepo_re,
@@ -149,12 +158,15 @@ class Source(Bcfg2.Server.Plugin.Debuggable):
if match:
name = match.group(1)
break
- if name is None:
- # couldn't figure out the name from the URL or URL map
- # (which probably means its a screwy URL), so we just
- # generate a random one
- name = base64.b64encode(os.urandom(16))[:-2]
- rname = "%s-%s" % (self.groups[0], name)
+ if name and self.groups:
+ rname = "%s-%s" % (self.groups[0], name)
+ elif self.groups:
+ rname = self.groups[0]
+ else:
+ # a global source with no reasonable name. just use
+ # the full url and let the regex below make it even
+ # uglier.
+ rname = url_map['url']
# see yum/__init__.py in the yum source, lines 441-449, for
# the source of this regex. yum doesn't like anything but
# string.ascii_letters, string.digits, and [-_.:]. There
@@ -169,6 +181,9 @@ class Source(Bcfg2.Server.Plugin.Debuggable):
else:
return self.__class__.__name__
+ def __repr__(self):
+ return str(self)
+
def get_urls(self):
return []
urls = property(get_urls)
@@ -182,6 +197,10 @@ class Source(Bcfg2.Server.Plugin.Debuggable):
if a in metadata.groups]
vdict = dict()
for agrp in agroups:
+ if agrp not in self.provides:
+ self.logger.warning("%s provides no packages for %s" %
+ (self, agrp))
+ continue
for key, value in list(self.provides[agrp].items()):
if key not in vdict:
vdict[key] = set(value)
@@ -213,7 +232,7 @@ class Source(Bcfg2.Server.Plugin.Debuggable):
fname = self.escape_url(url)
try:
data = fetch_url(url)
- file(fname, 'w').write(data)
+ open(fname, 'w').write(data)
except ValueError:
self.logger.error("Packages: Bad url string %s" % url)
raise
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py b/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py
index 53344e200..cba3373c1 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py
@@ -1,17 +1,14 @@
import os
+import re
import sys
-import time
import copy
-import glob
import socket
-import random
import logging
-import threading
import lxml.etree
-from UserDict import DictMixin
-from subprocess import Popen, PIPE, STDOUT
+from subprocess import Popen, PIPE
import Bcfg2.Server.Plugin
-from Bcfg2.Bcfg2Py3k import StringIO, cPickle, HTTPError, ConfigParser, file
+from Bcfg2.Bcfg2Py3k import StringIO, cPickle, HTTPError, URLError, \
+ ConfigParser
from Bcfg2.Server.Plugins.Packages.Collection import Collection
from Bcfg2.Server.Plugins.Packages.Source import SourceInitError, Source, \
fetch_url
@@ -96,19 +93,29 @@ class YumCollection(Collection):
if not os.path.exists(self.cachefile):
os.mkdir(self.cachefile)
- self.configdir = os.path.join(self.basepath, "yum")
- if not os.path.exists(self.configdir):
- os.mkdir(self.configdir)
- self.cfgfile = os.path.join(self.configdir,
- "%s-yum.conf" % self.cachekey)
+ self.cfgfile = os.path.join(self.cachefile, "yum.conf")
self.write_config()
if has_pulp and self.has_pulp_sources:
_setup_pulp(self.setup)
+ self._helper = None
+
@property
def helper(self):
- return self.setup.cfp.get("packages:yum", "helper",
- default="/usr/sbin/bcfg2-yum-helper")
+ try:
+ return self.setup.cfp.get("packages:yum", "helper")
+ except:
+ pass
+
+ if not self._helper:
+ # first see if bcfg2-yum-helper is in PATH
+ try:
+ Popen(['bcfg2-yum-helper'],
+ stdin=PIPE, stdout=PIPE, stderr=PIPE).wait()
+ self._helper = 'bcfg2-yum-helper'
+ except OSError:
+ self._helper = "/usr/sbin/bcfg2-yum-helper"
+ return self._helper
@property
def use_yum(self):
@@ -129,11 +136,21 @@ class YumCollection(Collection):
yumconf = self.get_config(raw=True)
yumconf.add_section("main")
- mainopts = dict(cachedir=self.cachefile,
+ # we set installroot to the cache directory so
+ # bcfg2-yum-helper works with an empty rpmdb. otherwise
+ # the rpmdb is so hopelessly intertwined with yum that we
+ # have to totally reinvent the dependency resolver.
+ mainopts = dict(cachedir='/',
+ installroot=self.cachefile,
keepcache="0",
- sslverify="0",
debuglevel="0",
+ sslverify="0",
reposdir="/dev/null")
+ if self.setup['debug']:
+ mainopts['debuglevel'] = "5"
+ elif self.setup['verbose']:
+ mainopts['debuglevel'] = "2"
+
try:
for opt in self.setup.cfp.options("packages:yum"):
if opt not in self.option_blacklist:
@@ -162,7 +179,7 @@ class YumCollection(Collection):
config.add_section(reponame)
added = True
except ConfigParser.DuplicateSectionError:
- match = re.match("-(\d)", reponame)
+ match = re.search("-(\d+)", reponame)
if match:
rid = int(match.group(1)) + 1
else:
@@ -186,6 +203,13 @@ class YumCollection(Collection):
config.set(reponame, "includepkgs",
" ".join(source.whitelist))
+ if raw:
+ opts = source.server_options
+ else:
+ opts = source.client_options
+ for opt, val in opts.items():
+ config.set(reponame, opt, val)
+
if raw:
return config
else:
@@ -346,6 +370,25 @@ class YumCollection(Collection):
# for API completeness
return self.call_helper("get_provides", package)
+ def get_groups(self, grouplist):
+ if not self.use_yum:
+ self.logger.warning("Packages: Package groups are not supported by "
+ "Bcfg2's internal Yum dependency generator")
+ return []
+
+ if not grouplist:
+ return dict()
+
+ gdicts = []
+ for group, ptype in grouplist:
+ if group.startswith("@"):
+ group = group[1:]
+ if not ptype:
+ ptype = "default"
+ gdicts.append(dict(group=group, type=ptype))
+
+ return self.call_helper("get_groups", gdicts)
+
def get_group(self, group, ptype="default"):
if not self.use_yum:
self.logger.warning("Packages: Package groups are not supported by "
@@ -355,32 +398,106 @@ class YumCollection(Collection):
if group.startswith("@"):
group = group[1:]
- pkgs = self.call_helper("get_group", dict(group=group, type=ptype))
- return pkgs
+ return self.call_helper("get_group", dict(group=group, type=ptype))
+
+ def packages_from_entry(self, entry):
+ rv = set()
+ name = entry.get("name")
+
+ def _tag_to_pkg(tag):
+ rv = (name, tag.get("arch"), tag.get("epoch"),
+ tag.get("version"), tag.get("release"))
+ # if a package requires no specific version, we just use
+ # the name, not the tuple. this limits the amount of JSON
+ # encoding/decoding that has to be done to pass the
+ # package list to bcfg2-yum-helper.
+ if rv[1:] == (None, None, None, None):
+ return name
+ else:
+ return rv
+
+ for inst in entry.getchildren():
+ if inst.tag != "Instance":
+ continue
+ rv.add(_tag_to_pkg(inst))
+ if not rv:
+ rv.add(_tag_to_pkg(entry))
+ return list(rv)
+
+ def packages_to_entry(self, pkglist, entry):
+ def _get_entry_attrs(pkgtup):
+ attrs = dict(version=self.setup.cfp.get("packages",
+ "version",
+ default="auto"))
+ if attrs['version'] == 'any':
+ return attrs
+
+ if pkgtup[1]:
+ attrs['arch'] = pkgtup[1]
+ if pkgtup[2]:
+ attrs['epoch'] = pkgtup[2]
+ if pkgtup[3]:
+ attrs['version'] = pkgtup[3]
+ if pkgtup[4]:
+ attrs['release'] = pkgtup[4]
+ return attrs
+
+ packages = dict()
+ for pkg in pkglist:
+ try:
+ packages[pkg[0]].append(pkg)
+ except KeyError:
+ packages[pkg[0]] = [pkg]
+ for name, instances in packages.items():
+ pkgattrs = dict(type=self.ptype,
+ origin='Packages',
+ name=name)
+ if len(instances) > 1:
+ pkg_el = lxml.etree.SubElement(entry, 'BoundPackage',
+ **pkgattrs)
+ for inst in instances:
+ lxml.etree.SubElement(pkg_el, "Instance",
+ _get_entry_attrs(inst))
+ else:
+ attrs = _get_entry_attrs(instances[0])
+ attrs.update(pkgattrs)
+ lxml.etree.SubElement(entry, 'BoundPackage', **attrs)
+
+ def get_new_packages(self, initial, complete):
+ initial_names = []
+ for pkg in initial:
+ if isinstance(pkg, tuple):
+ initial_names.append(pkg[0])
+ else:
+ initial_names.append(pkg)
+ new = []
+ for pkg in complete:
+ if pkg[0] not in initial_names:
+ new.append(pkg)
+ return new
def complete(self, packagelist):
if not self.use_yum:
return Collection.complete(self, packagelist)
- packages = set()
- unknown = set(packagelist)
-
- if unknown:
+ if packagelist:
result = \
self.call_helper("complete",
- dict(packages=list(unknown),
+ dict(packages=list(packagelist),
groups=list(self.get_relevant_groups())))
- if result and "packages" in result and "unknown" in result:
- # we stringify every package because it gets returned
- # in unicode; set.update() doesn't work if some
- # elements are unicode and other are strings. (I.e.,
- # u'foo' and 'foo' get treated as unique elements.)
- packages.update([str(p) for p in result['packages']])
- unknown = set([str(p) for p in result['unknown']])
-
+ if not result:
+ # some sort of error, reported by call_helper()
+ return set(), packagelist
+ # json doesn't understand sets or tuples, so we get back a
+ # lists of lists (packages) and a list of unicode strings
+ # (unknown). turn those into a set of tuples and a set of
+ # strings, respectively.
+ unknown = set([str(u) for u in result['unknown']])
+ packages = set([tuple(p) for p in result['packages']])
self.filter_unknown(unknown)
-
- return packages, unknown
+ return packages, unknown
+ else:
+ return set(), set()
def call_helper(self, command, input=None):
""" Make a call to bcfg2-yum-helper. The yum libs have
@@ -388,16 +505,12 @@ class YumCollection(Collection):
around that in long-running processes it to have a short-lived
helper. No, seriously -- check out the yum-updatesd code.
It's pure madness. """
- # it'd be nice if we could change this to be more verbose if
- # -v was given to bcfg2-server, but Collection objects don't
- # get the 'setup' variable, so we don't know how verbose
- # bcfg2-server is. It'd also be nice if we could tell yum to
- # log to syslog. So would a unicorn.
cmd = [self.helper, "-c", self.cfgfile]
- if self.debug_flag:
+ verbose = self.debug_flag or self.setup['verbose']
+ if verbose:
cmd.append("-v")
cmd.append(command)
- self.debug_log("Packages: running %s" % " ".join(cmd))
+ self.debug_log("Packages: running %s" % " ".join(cmd), flag=verbose)
try:
helper = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
except OSError:
@@ -415,9 +528,9 @@ class YumCollection(Collection):
if rv:
self.logger.error("Packages: error running bcfg2-yum-helper "
"(returned %d): %s" % (rv, stderr))
- elif self.debug_flag:
+ else:
self.debug_log("Packages: debug info from bcfg2-yum-helper: %s" %
- stderr)
+ stderr, flag=verbose)
try:
return json.loads(stdout)
except ValueError:
@@ -500,15 +613,14 @@ class YumSource(Source):
def save_state(self):
if not self.use_yum:
- cache = file(self.cachefile, 'wb')
+ cache = open(self.cachefile, 'wb')
cPickle.dump((self.packages, self.deps, self.provides,
self.filemap, self.url_map), cache, 2)
cache.close()
-
def load_state(self):
if not self.use_yum:
- data = file(self.cachefile)
+ data = open(self.cachefile)
(self.packages, self.deps, self.provides,
self.filemap, self.url_map) = cPickle.load(data)
@@ -520,7 +632,7 @@ class YumSource(Source):
usettings = [{'version':self.version, 'component':comp,
'arch':arch}
for comp in self.components]
- else: # rawurl given
+ else: # rawurl given
usettings = [{'version':self.version, 'component':None,
'arch':arch}]
@@ -546,6 +658,11 @@ class YumSource(Source):
except ValueError:
self.logger.error("Packages: Bad url string %s" % rmdurl)
return []
+ except URLError:
+ err = sys.exc_info()[1]
+ self.logger.error("Packages: Failed to fetch url %s. %s" %
+ (rmdurl, err))
+ return []
except HTTPError:
err = sys.exc_info()[1]
self.logger.error("Packages: Failed to fetch url %s. code=%s" %
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py b/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py
index d789a6d39..d3095300a 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py
@@ -11,14 +11,16 @@ from Bcfg2.Bcfg2Py3k import ConfigParser, urlopen
from Bcfg2.Server.Plugins.Packages import Collection
from Bcfg2.Server.Plugins.Packages.PackagesSources import PackagesSources
+yum_config_default = "/etc/yum.repos.d/bcfg2.repo"
+apt_config_default = "/etc/apt/sources.d/bcfg2"
+
class Packages(Bcfg2.Server.Plugin.Plugin,
Bcfg2.Server.Plugin.StructureValidator,
Bcfg2.Server.Plugin.Generator,
Bcfg2.Server.Plugin.Connector,
- Bcfg2.Server.Plugin.GoalValidator):
+ Bcfg2.Server.Plugin.ClientRunHooks):
name = 'Packages'
conflicts = ['Pkgmgr']
- experimental = True
__rmi__ = Bcfg2.Server.Plugin.Plugin.__rmi__ + ['Refresh', 'Reload']
def __init__(self, core, datastore):
@@ -26,11 +28,15 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
Bcfg2.Server.Plugin.StructureValidator.__init__(self)
Bcfg2.Server.Plugin.Generator.__init__(self)
Bcfg2.Server.Plugin.Connector.__init__(self)
- Bcfg2.Server.Plugin.Probing.__init__(self)
+ Bcfg2.Server.Plugin.ClientRunHooks.__init__(self)
self.sentinels = set()
- self.cachepath = os.path.join(self.data, 'cache')
- self.keypath = os.path.join(self.data, 'keys')
+ self.cachepath = \
+ self.core.setup.cfp.get("packages", "cache",
+ default=os.path.join(self.data, 'cache'))
+ self.keypath = \
+ self.core.setup.cfp.get("packages", "keycache",
+ default=os.path.join(self.data, 'keys'))
if not os.path.exists(self.keypath):
# create key directory if needed
os.makedirs(self.keypath)
@@ -40,11 +46,16 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
self.core.setup)
def toggle_debug(self):
- Bcfg2.Server.Plugin.Plugin.toggle_debug(self)
+ rv = Bcfg2.Server.Plugin.Plugin.toggle_debug(self)
self.sources.toggle_debug()
+ return rv
@property
def disableResolver(self):
+ if self.disableMetaData:
+ # disabling metadata without disabling the resolver Breaks
+ # Things
+ return True
try:
return not self.core.setup.cfp.getboolean("packages", "resolver")
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
@@ -87,16 +98,18 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
if entry.tag == 'Package':
collection = self._get_collection(metadata)
entry.set('version', self.core.setup.cfp.get("packages",
- "version",
- default="auto"))
+ "version",
+ default="auto"))
entry.set('type', collection.ptype)
elif entry.tag == 'Path':
- if (entry.get("name") == self.core.setup.cfp.get("packages",
- "yum_config",
- default="") or
- entry.get("name") == self.core.setup.cfp.get("packages",
- "apt_config",
- default="")):
+ if (entry.get("name") == \
+ self.core.setup.cfp.get("packages",
+ "yum_config",
+ default=yum_config_default) or
+ entry.get("name") == \
+ self.core.setup.cfp.get("packages",
+ "apt_config",
+ default=apt_config_default)):
self.create_config(entry, metadata)
def HandlesEntry(self, entry, metadata):
@@ -110,12 +123,14 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
return True
elif entry.tag == 'Path':
# managed entries for yum/apt configs
- if (entry.get("name") == self.core.setup.cfp.get("packages",
- "yum_config",
- default="") or
- entry.get("name") == self.core.setup.cfp.get("packages",
- "apt_config",
- default="")):
+ if (entry.get("name") == \
+ self.core.setup.cfp.get("packages",
+ "yum_config",
+ default=yum_config_default) or
+ entry.get("name") == \
+ self.core.setup.cfp.get("packages",
+ "apt_config",
+ default=apt_config_default)):
return True
return False
@@ -151,26 +166,24 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
# essential pkgs are those marked as such by the distribution
essential = collection.get_essential()
to_remove = []
+ groups = []
for struct in structures:
for pkg in struct.xpath('//Package | //BoundPackage'):
if pkg.get("name"):
- initial.add(pkg.get("name"))
+ initial.update(collection.packages_from_entry(pkg))
elif pkg.get("group"):
- try:
- if pkg.get("type"):
- gpkgs = collection.get_group(pkg.get("group"),
- ptype=pkg.get("type"))
- else:
- gpkgs = collection.get_group(pkg.get("group"))
- base.update(gpkgs)
- except TypeError:
- raise
- self.logger.error("Could not resolve group %s" %
- pkg.get("group"))
+ groups.append((pkg.get("group"),
+ pkg.get("type")))
to_remove.append(pkg)
else:
self.logger.error("Packages: Malformed Package: %s" %
- lxml.etree.tostring(pkg))
+ lxml.etree.tostring(pkg,
+ xml_declaration=False).decode('UTF-8'))
+
+ gpkgs = collection.get_groups(groups)
+ for group, pkgs in gpkgs.items():
+ base.update(pkgs)
+
base.update(initial | essential)
for el in to_remove:
el.getparent().remove(el)
@@ -179,16 +192,11 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
if unknown:
self.logger.info("Packages: Got %d unknown entries" % len(unknown))
self.logger.info("Packages: %s" % list(unknown))
- newpkgs = list(packages.difference(initial))
+ newpkgs = collection.get_new_packages(initial, packages)
self.debug_log("Packages: %d initial, %d complete, %d new" %
(len(initial), len(packages), len(newpkgs)))
newpkgs.sort()
- for pkg in newpkgs:
- lxml.etree.SubElement(independent, 'BoundPackage', name=pkg,
- version=self.core.setup.cfp.get("packages",
- "version",
- default="auto"),
- type=collection.ptype, origin='Packages')
+ collection.packages_to_entry(newpkgs, independent)
def Refresh(self):
'''Packages.Refresh() => True|False\nReload configuration
@@ -271,10 +279,11 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
collection = self._get_collection(metadata)
return dict(sources=collection.get_additional_data())
- def validate_goals(self, metadata, _):
- """ we abuse the GoalValidator plugin since validate_goals()
- is the very last thing called during a client config run. so
- we use this to clear the collection cache for this client,
- which must persist only the duration of a client run """
+ def end_client_run(self, metadata):
+ """ clear the collection cache for this client, which must
+ persist only the duration of a client run"""
if metadata.hostname in Collection.clients:
del Collection.clients[metadata.hostname]
+
+ def end_statistics(self, metadata):
+ self.end_client_run(metadata)
diff --git a/src/lib/Bcfg2/Server/Plugins/Pkgmgr.py b/src/lib/Bcfg2/Server/Plugins/Pkgmgr.py
index e9254cdcc..7dac907e1 100644
--- a/src/lib/Bcfg2/Server/Plugins/Pkgmgr.py
+++ b/src/lib/Bcfg2/Server/Plugins/Pkgmgr.py
@@ -1,12 +1,17 @@
'''This module implements a package management scheme for all images'''
-import logging
+import os
import re
+import glob
+import logging
+import lxml.etree
import Bcfg2.Server.Plugin
-import lxml
+import Bcfg2.Server.Lint
+
try:
set
except NameError:
+ # deprecated since python 2.6
from sets import Set as set
logger = logging.getLogger('Bcfg2.Plugins.Pkgmgr')
@@ -24,12 +29,14 @@ class FuzzyDict(dict):
print("got non-string key %s" % str(key))
return dict.__getitem__(self, key)
- def has_key(self, key):
+ def __contains__(self, key):
if isinstance(key, str):
mdata = self.fuzzy.match(key)
- if self.fuzzy.match(key):
- return dict.has_key(self, mdata.groupdict()['name'])
- return dict.has_key(self, key)
+ if mdata:
+ return dict.__contains__(self, mdata.groupdict()['name'])
+ else:
+ print("got non-string key %s" % str(key))
+ return dict.__contains__(self, key)
def get(self, key, default=None):
try:
@@ -167,3 +174,40 @@ class Pkgmgr(Bcfg2.Server.Plugin.PrioDir):
def HandleEntry(self, entry, metadata):
self.BindEntry(entry, metadata)
+
+
+class PkgmgrLint(Bcfg2.Server.Lint.ServerlessPlugin):
+ """ find duplicate Pkgmgr entries with the same priority """
+ def Run(self):
+ pset = set()
+ for pfile in glob.glob(os.path.join(self.config['repo'], 'Pkgmgr',
+ '*.xml')):
+ if self.HandlesFile(pfile):
+ xdata = lxml.etree.parse(pfile).getroot()
+ # get priority, type, group
+ priority = xdata.get('priority')
+ ptype = xdata.get('type')
+ for pkg in xdata.xpath("//Package"):
+ if pkg.getparent().tag == 'Group':
+ grp = pkg.getparent().get('name')
+ if (type(grp) is not str and
+ grp.getparent().tag == 'Group'):
+ pgrp = grp.getparent().get('name')
+ else:
+ pgrp = 'none'
+ else:
+ grp = 'none'
+ pgrp = 'none'
+ ptuple = (pkg.get('name'), priority, ptype, grp, pgrp)
+ # check if package is already listed with same
+ # priority, type, grp
+ if ptuple in pset:
+ self.LintError("duplicate-package",
+ "Duplicate Package %s, priority:%s, type:%s" %
+ (pkg.get('name'), priority, ptype))
+ else:
+ pset.add(ptuple)
+
+ @classmethod
+ def Errors(cls):
+ return {"duplicate-packages":"error"}
diff --git a/src/lib/Bcfg2/Server/Plugins/Probes.py b/src/lib/Bcfg2/Server/Plugins/Probes.py
index af908eee8..7f300ebe0 100644
--- a/src/lib/Bcfg2/Server/Plugins/Probes.py
+++ b/src/lib/Bcfg2/Server/Plugins/Probes.py
@@ -1,7 +1,17 @@
+import re
+import os
+import sys
import time
-import lxml.etree
import operator
-import re
+import lxml.etree
+import Bcfg2.Server
+import Bcfg2.Server.Plugin
+
+try:
+ from django.db import models
+ has_django = True
+except ImportError:
+ has_django = False
try:
import json
@@ -14,23 +24,36 @@ except ImportError:
has_json = False
try:
- import syck
- has_syck = True
+ import syck as yaml
+ has_yaml = True
+ yaml_error = syck.error
except ImportError:
- has_syck = False
try:
import yaml
+ yaml_error = yaml.YAMLError
has_yaml = True
except ImportError:
has_yaml = False
import Bcfg2.Server.Plugin
-specific_probe_matcher = re.compile("(.*/)?(?P<basename>\S+)(.(?P<mode>[GH](\d\d)?)_\S+)")
-probe_matcher = re.compile("(.*/)?(?P<basename>\S+)")
+if has_django:
+ class ProbesDataModel(models.Model,
+ Bcfg2.Server.Plugin.PluginDatabaseModel):
+ hostname = models.CharField(max_length=255)
+ probe = models.CharField(max_length=255)
+ timestamp = models.DateTimeField(auto_now=True)
+ data = models.TextField(null=True)
+
+ class ProbesGroupsModel(models.Model,
+ Bcfg2.Server.Plugin.PluginDatabaseModel):
+ hostname = models.CharField(max_length=255)
+ group = models.CharField(max_length=255)
+
class ClientProbeDataSet(dict):
- """ dict of probe => [probe data] that records a for each host """
+ """ dict of probe => [probe data] that records a timestamp for
+ each host """
def __init__(self, *args, **kwargs):
if "timestamp" in kwargs and kwargs['timestamp'] is not None:
self.timestamp = kwargs.pop("timestamp")
@@ -39,61 +62,31 @@ class ClientProbeDataSet(dict):
dict.__init__(self, *args, **kwargs)
-class ProbeData(object):
- """ a ProbeData object emulates a str object, but also has .xdata
- and .json properties to provide convenient ways to use ProbeData
- objects as XML or JSON data """
+class ProbeData(str):
+ """ a ProbeData object emulates a str object, but also has .xdata,
+ .json, and .yaml properties to provide convenient ways to use
+ ProbeData objects as XML, JSON, or YAML data """
+ def __new__(cls, data):
+ return str.__new__(cls, data)
+
def __init__(self, data):
- self.data = data
+ str.__init__(self)
self._xdata = None
self._json = None
self._yaml = None
- def __str__(self):
- return str(self.data)
-
- def __repr__(self):
- return repr(self.data)
-
- def __getattr__(self, name):
- """ make ProbeData act like a str object """
- return getattr(self.data, name)
-
- def __complex__(self):
- return complex(self.data)
-
- def __int__(self):
- return int(self.data)
-
- def __long__(self):
- return long(self.data)
-
- def __float__(self):
- return float(self.data)
-
- def __eq__(self, other):
- return str(self) == str(other)
-
- def __ne__(self, other):
- return str(self) != str(other)
-
- def __gt__(self, other):
- return str(self) > str(other)
-
- def __lt__(self, other):
- return str(self) < str(other)
-
- def __ge__(self, other):
- return self > other or self == other
-
- def __le__(self, other):
- return self < other or self == other
-
+ @property
+ def data(self):
+ """ provide backwards compatibility with broken ProbeData
+ object in bcfg2 1.2.0 thru 1.2.2 """
+ return str(self)
+
@property
def xdata(self):
if self._xdata is None:
try:
- self._xdata = lxml.etree.XML(self.data)
+ self._xdata = lxml.etree.XML(self.data,
+ parser=Bcfg2.Server.XMLParser)
except lxml.etree.XMLSyntaxError:
pass
return self._xdata
@@ -109,44 +102,30 @@ class ProbeData(object):
@property
def yaml(self):
- if self._yaml is None:
- if has_yaml:
- try:
- self._yaml = yaml.load(self.data)
- except yaml.YAMLError:
- pass
- elif has_syck:
- try:
- self._yaml = syck.load(self.data)
- except syck.error:
- pass
+ if self._yaml is None and has_yaml:
+ try:
+ self._yaml = yaml.load(self.data)
+ except yaml_error:
+ pass
return self._yaml
class ProbeSet(Bcfg2.Server.Plugin.EntrySet):
ignore = re.compile("^(\.#.*|.*~|\\..*\\.(tmp|sw[px])|probed\\.xml)$")
+ probename = re.compile("(.*/)?(?P<basename>\S+?)(\.(?P<mode>(?:G\d\d)|H)_\S+)?$")
+ bangline = re.compile('^#!\s*(?P<interpreter>.*)$')
+ basename_is_regex = True
def __init__(self, path, fam, encoding, plugin_name):
- fpattern = '[0-9A-Za-z_\-]+'
self.plugin_name = plugin_name
- Bcfg2.Server.Plugin.EntrySet.__init__(self, fpattern, path,
+ Bcfg2.Server.Plugin.EntrySet.__init__(self, '[0-9A-Za-z_\-]+', path,
Bcfg2.Server.Plugin.SpecificData,
encoding)
fam.AddMonitor(path, self)
- self.bangline = re.compile('^#!(?P<interpreter>.*)$')
def HandleEvent(self, event):
- if event.filename != self.path:
- if (event.code2str == 'changed' and
- event.filename.endswith("probed.xml") and
- event.filename not in self.entries):
- # for some reason, probed.xml is particularly prone to
- # getting changed events before created events,
- # because gamin is the worst ever. anyhow, we
- # specifically handle it here to avoid a warning on
- # every single server startup.
- self.entry_init(event)
- return
+ if (event.filename != self.path and
+ not event.filename.endswith("probed.xml")):
return self.handle_event(event)
def get_probe_data(self, metadata):
@@ -155,9 +134,7 @@ class ProbeSet(Bcfg2.Server.Plugin.EntrySet):
candidates = self.get_matching(metadata)
candidates.sort(key=operator.attrgetter('specific'))
for entry in candidates:
- rem = specific_probe_matcher.match(entry.name)
- if not rem:
- rem = probe_matcher.match(entry.name)
+ rem = self.probename.match(entry.name)
pname = rem.group('basename')
if pname not in build:
build[pname] = entry
@@ -176,30 +153,37 @@ class ProbeSet(Bcfg2.Server.Plugin.EntrySet):
return ret
-class Probes(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Probing,
- Bcfg2.Server.Plugin.Connector):
+class Probes(Bcfg2.Server.Plugin.Probing,
+ Bcfg2.Server.Plugin.Connector,
+ Bcfg2.Server.Plugin.DatabaseBacked):
"""A plugin to gather information from a client machine."""
name = 'Probes'
__author__ = 'bcfg-dev@mcs.anl.gov'
def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
Bcfg2.Server.Plugin.Connector.__init__(self)
Bcfg2.Server.Plugin.Probing.__init__(self)
+ Bcfg2.Server.Plugin.DatabaseBacked.__init__(self, core, datastore)
try:
self.probes = ProbeSet(self.data, core.fam, core.encoding,
self.name)
except:
- raise Bcfg2.Server.Plugin.PluginInitError
+ err = sys.exc_info()[1]
+ raise Bcfg2.Server.Plugin.PluginInitError(err)
self.probedata = dict()
self.cgroups = dict()
self.load_data()
- def write_data(self):
+ def write_data(self, client):
"""Write probe data out for use with bcfg2-info."""
+ if self._use_db:
+ return self._write_data_db(client)
+ else:
+ return self._write_data_xml(client)
+
+ def _write_data_xml(self, _):
top = lxml.etree.Element("Probed")
for client, probed in sorted(self.probedata.items()):
cx = lxml.etree.SubElement(top, 'Client', name=client,
@@ -209,20 +193,47 @@ class Probes(Bcfg2.Server.Plugin.Plugin,
value=str(self.probedata[client][probe]))
for group in sorted(self.cgroups[client]):
lxml.etree.SubElement(cx, "Group", name=group)
- data = lxml.etree.tostring(top, encoding='UTF-8',
- xml_declaration=True,
- pretty_print='true')
try:
- datafile = open("%s/%s" % (self.data, 'probed.xml'), 'w')
+ datafile = open(os.path.join(self.data, 'probed.xml'), 'w')
+ datafile.write(lxml.etree.tostring(top, xml_declaration=False,
+ pretty_print='true').decode('UTF-8'))
except IOError:
- self.logger.error("Failed to write probed.xml")
- datafile.write(data.decode('utf-8'))
+ err = sys.exc_info()[1]
+ self.logger.error("Failed to write probed.xml: %s" % err)
+
+ def _write_data_db(self, client):
+ for probe, data in self.probedata[client.hostname].items():
+ pdata = \
+ ProbesDataModel.objects.get_or_create(hostname=client.hostname,
+ probe=probe)[0]
+ if pdata.data != data:
+ pdata.data = data
+ pdata.save()
+ ProbesDataModel.objects.filter(hostname=client.hostname).exclude(probe__in=self.probedata[client.hostname]).delete()
+
+ for group in self.cgroups[client.hostname]:
+ try:
+ ProbesGroupsModel.objects.get(hostname=client.hostname,
+ group=group)
+ except ProbesGroupsModel.DoesNotExist:
+ grp = ProbesGroupsModel(hostname=client.hostname,
+ group=group)
+ grp.save()
+ ProbesGroupsModel.objects.filter(hostname=client.hostname).exclude(group__in=self.cgroups[client.hostname]).delete()
def load_data(self):
+ if self._use_db:
+ return self._load_data_db()
+ else:
+ return self._load_data_xml()
+
+ def _load_data_xml(self):
try:
- data = lxml.etree.parse(self.data + '/probed.xml').getroot()
+ data = lxml.etree.parse(os.path.join(self.data, 'probed.xml'),
+ parser=Bcfg2.Server.XMLParser).getroot()
except:
- self.logger.error("Failed to read file probed.xml")
+ err = sys.exc_info()[1]
+ self.logger.error("Failed to read file probed.xml: %s" % err)
return
self.probedata = {}
self.cgroups = {}
@@ -231,12 +242,25 @@ class Probes(Bcfg2.Server.Plugin.Plugin,
ClientProbeDataSet(timestamp=client.get("timestamp"))
self.cgroups[client.get('name')] = []
for pdata in client:
- if (pdata.tag == 'Probe'):
+ if pdata.tag == 'Probe':
self.probedata[client.get('name')][pdata.get('name')] = \
- ProbeData(pdata.get('value'))
- elif (pdata.tag == 'Group'):
+ ProbeData(pdata.get("value"))
+ elif pdata.tag == 'Group':
self.cgroups[client.get('name')].append(pdata.get('name'))
+ def _load_data_db(self):
+ self.probedata = {}
+ self.cgroups = {}
+ for pdata in ProbesDataModel.objects.all():
+ if pdata.hostname not in self.probedata:
+ self.probedata[pdata.hostname] = \
+ ClientProbeDataSet(timestamp=time.mktime(pdata.timestamp.timetuple()))
+ self.probedata[pdata.hostname][pdata.probe] = ProbeData(pdata.data)
+ for pgroup in ProbesGroupsModel.objects.all():
+ if pgroup.hostname not in self.cgroups:
+ self.cgroups[pgroup.hostname] = []
+ self.cgroups[pgroup.hostname].append(pgroup.group)
+
def GetProbes(self, meta, force=False):
"""Return a set of probes for execution on client."""
return self.probes.get_probe_data(meta)
@@ -246,25 +270,24 @@ class Probes(Bcfg2.Server.Plugin.Plugin,
self.probedata[client.hostname] = ClientProbeDataSet()
for data in datalist:
self.ReceiveDataItem(client, data)
- self.write_data()
+ self.write_data(client)
def ReceiveDataItem(self, client, data):
"""Receive probe results pertaining to client."""
if client.hostname not in self.cgroups:
self.cgroups[client.hostname] = []
+ if client.hostname not in self.probedata:
+ self.probedata[client.hostname] = ClientProbeDataSet()
if data.text == None:
- self.logger.error("Got null response to probe %s from %s" % \
- (data.get('name'), client.hostname))
- try:
- self.probedata[client.hostname].update({data.get('name'):
+ self.logger.info("Got null response to probe %s from %s" %
+ (data.get('name'), client.hostname))
+ self.probedata[client.hostname].update({data.get('name'):
ProbeData('')})
- except KeyError:
- self.probedata[client.hostname] = \
- ClientProbeDataSet([(data.get('name'), ProbeData(''))])
return
dlines = data.text.split('\n')
- self.logger.debug("%s:probe:%s:%s" % (client.hostname,
- data.get('name'), [line.strip() for line in dlines]))
+ self.logger.debug("%s:probe:%s:%s" %
+ (client.hostname, data.get('name'),
+ [line.strip() for line in dlines]))
for line in dlines[:]:
if line.split(':')[0] == 'group':
newgroup = line.split(':')[1].strip()
@@ -272,11 +295,7 @@ class Probes(Bcfg2.Server.Plugin.Plugin,
self.cgroups[client.hostname].append(newgroup)
dlines.remove(line)
dobj = ProbeData("\n".join(dlines))
- try:
- self.probedata[client.hostname].update({data.get('name'): dobj})
- except KeyError:
- self.probedata[client.hostname] = \
- ClientProbeDataSet([(data.get('name'), dobj)])
+ self.probedata[client.hostname].update({data.get('name'): dobj})
def get_additional_groups(self, meta):
return self.cgroups.get(meta.hostname, list())
diff --git a/src/lib/Bcfg2/Server/Plugins/Properties.py b/src/lib/Bcfg2/Server/Plugins/Properties.py
index 680881858..78019933a 100644
--- a/src/lib/Bcfg2/Server/Plugins/Properties.py
+++ b/src/lib/Bcfg2/Server/Plugins/Properties.py
@@ -5,26 +5,53 @@ import copy
import logging
import lxml.etree
import Bcfg2.Server.Plugin
+try:
+ from Bcfg2.Encryption import ssl_decrypt, EVPError
+ have_crypto = True
+except ImportError:
+ have_crypto = False
+
+logger = logging.getLogger(__name__)
+
+SETUP = None
+
+def passphrases():
+ section = "encryption"
+ if SETUP.cfp.has_section(section):
+ return dict([(o, SETUP.cfp.get(section, o))
+ for o in SETUP.cfp.options(section)])
+ else:
+ return dict()
-logger = logging.getLogger('Bcfg2.Plugins.Properties')
class PropertyFile(Bcfg2.Server.Plugin.StructFile):
"""Class for properties files."""
def write(self):
""" Write the data in this data structure back to the property
file """
- if self.validate_data():
- try:
- open(self.name,
- "wb").write(lxml.etree.tostring(self.xdata,
- pretty_print=True))
- return True
- except IOError:
- err = sys.exc_info()[1]
- logger.error("Failed to write %s: %s" % (self.name, err))
- return False
- else:
- return False
+ if not SETUP.cfp.getboolean("properties", "writes_enabled",
+ default=True):
+ msg = "Properties files write-back is disabled in the configuration"
+ logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+ try:
+ self.validate_data()
+ except Bcfg2.Server.Plugin.PluginExecutionError:
+ msg = "Cannot write %s: %s" % (self.name, sys.exc_info()[1])
+ logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+
+ try:
+ open(self.name,
+ "wb").write(lxml.etree.tostring(self.xdata,
+ xml_declaration=False,
+ pretty_print=True).decode('UTF-8'))
+ return True
+ except IOError:
+ err = sys.exc_info()[1]
+ msg = "Failed to write %s: %s" % (self.name, err)
+ logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
def validate_data(self):
""" ensure that the data in this object validates against the
@@ -34,19 +61,51 @@ class PropertyFile(Bcfg2.Server.Plugin.StructFile):
try:
schema = lxml.etree.XMLSchema(file=schemafile)
except:
- logger.error("Failed to process schema for %s" % self.name)
- return False
+ err = sys.exc_info()[1]
+ raise Bcfg2.Server.Plugin.PluginExecutionError("Failed to process schema for %s: %s" % (self.name, err))
else:
# no schema exists
return True
if not schema.validate(self.xdata):
- logger.error("Data for %s fails to validate; run bcfg2-lint for "
- "more details" % self.name)
- return False
+ raise Bcfg2.Server.Plugin.PluginExecutionError("Data for %s fails to validate; run bcfg2-lint for more details" % self.name)
else:
return True
+ def Index(self):
+ Bcfg2.Server.Plugin.StructFile.Index(self)
+ if self.xdata.get("encryption", "false").lower() != "false":
+ if not have_crypto:
+ msg = "Properties: M2Crypto is not available: %s" % self.name
+ logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+ for el in self.xdata.xpath("*[@encrypted]"):
+ try:
+ el.text = self._decrypt(el)
+ except EVPError:
+ msg = "Failed to decrypt %s element in %s" % (el.tag,
+ self.name)
+ logger.error(msg)
+ raise Bcfg2.Server.PluginExecutionError(msg)
+
+ def _decrypt(self, element):
+ if not element.text.strip():
+ return
+ passes = passphrases()
+ try:
+ passphrase = passes[element.get("encrypted")]
+ try:
+ return ssl_decrypt(element.text, passphrase)
+ except EVPError:
+ # error is raised below
+ pass
+ except KeyError:
+ for passwd in passes.values():
+ try:
+ return ssl_decrypt(element.text, passwd)
+ except EVPError:
+ pass
+ raise EVPError("Failed to decrypt")
class PropDirectoryBacked(Bcfg2.Server.Plugin.DirectoryBacked):
__child__ = PropertyFile
@@ -62,6 +121,7 @@ class Properties(Bcfg2.Server.Plugin.Plugin,
name = 'Properties'
def __init__(self, core, datastore):
+ global SETUP
Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
Bcfg2.Server.Plugin.Connector.__init__(self)
try:
@@ -72,5 +132,16 @@ class Properties(Bcfg2.Server.Plugin.Plugin,
(e.strerror, e.filename))
raise Bcfg2.Server.Plugin.PluginInitError
- def get_additional_data(self, _):
- return copy.copy(self.store.entries)
+ SETUP = core.setup
+
+ def get_additional_data(self, metadata):
+ autowatch = self.core.setup.cfp.getboolean("properties", "automatch",
+ default=False)
+ rv = dict()
+ for fname, pfile in self.store.entries.items():
+ if (autowatch or
+ pfile.xdata.get("automatch", "false").lower() == "true"):
+ rv[fname] = pfile.XMLMatch(metadata)
+ else:
+ rv[fname] = copy.copy(pfile)
+ return rv
diff --git a/src/lib/Bcfg2/Server/Plugins/PuppetENC.py b/src/lib/Bcfg2/Server/Plugins/PuppetENC.py
new file mode 100644
index 000000000..46182e9a2
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/PuppetENC.py
@@ -0,0 +1,117 @@
+import os
+import Bcfg2.Server
+import Bcfg2.Server.Plugin
+from subprocess import Popen, PIPE
+
+try:
+ from syck import load as yaml_load, error as yaml_error
+except ImportError:
+ try:
+ from yaml import load as yaml_load, YAMLError as yaml_error
+ except ImportError:
+ raise ImportError("No yaml library could be found")
+
+class PuppetENCFile(Bcfg2.Server.Plugin.FileBacked):
+ def HandleEvent(self, event=None):
+ return
+
+
+class PuppetENC(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Connector,
+ Bcfg2.Server.Plugin.ClientRunHooks,
+ Bcfg2.Server.Plugin.DirectoryBacked):
+ """ A plugin to run Puppet external node classifiers
+ (http://docs.puppetlabs.com/guides/external_nodes.html) """
+ name = 'PuppetENC'
+ experimental = True
+ __child__ = PuppetENCFile
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Connector.__init__(self)
+ Bcfg2.Server.Plugin.ClientRunHooks.__init__(self)
+ Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data,
+ self.core.fam)
+ self.cache = dict()
+
+ def _run_encs(self, metadata):
+ cache = dict(groups=[], params=dict())
+ for enc in self.entries.keys():
+ epath = os.path.join(self.data, enc)
+ self.debug_log("PuppetENC: Running ENC %s for %s" %
+ (enc, metadata.hostname))
+ proc = Popen([epath, metadata.hostname], stdin=PIPE, stdout=PIPE,
+ stderr=PIPE)
+ (out, err) = proc.communicate()
+ rv = proc.wait()
+ if rv != 0:
+ msg = "PuppetENC: Error running ENC %s for %s (%s): %s" % \
+ (enc, metadata.hostname, rv)
+ self.logger.error("%s: %s" % (msg, err))
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+ if err:
+ self.debug_log("ENC Error: %s" % err)
+
+ try:
+ yaml = yaml_load(out)
+ self.debug_log("Loaded data from %s for %s: %s" %
+ (enc, metadata.hostname, yaml))
+ except yaml_error:
+ err = sys.exc_info()[1]
+ msg = "Error decoding YAML from %s for %s: %s" % \
+ (enc, metadata.hostname, err)
+ self.logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginExecutionError(msg)
+
+ groups = []
+ if "classes" in yaml:
+ # stock Puppet ENC output format
+ groups = yaml['classes']
+ elif "groups" in yaml:
+ # more Bcfg2-ish output format
+ groups = yaml['groups']
+ if groups:
+ if isinstance(groups, list):
+ self.debug_log("ENC %s adding groups to %s: %s" %
+ (enc, metadata.hostname, groups))
+ cache['groups'].extend(groups)
+ else:
+ self.debug_log("ENC %s adding groups to %s: %s" %
+ (enc, metadata.hostname, groups.keys()))
+ for group, params in groups.items():
+ cache['groups'].append(group)
+ if params:
+ cache['params'].update(params)
+ if "parameters" in yaml and yaml['parameters']:
+ cache['params'].update(yaml['parameters'])
+ if "environment" in yaml:
+ self.logger.info("Ignoring unsupported environment section of "
+ "ENC %s for %s" % (enc, metadata.hostname))
+
+ self.cache[metadata.hostname] = cache
+
+ def get_additional_groups(self, metadata):
+ if metadata.hostname not in self.cache:
+ self._run_encs(metadata)
+ return self.cache[metadata.hostname]['groups']
+
+ def get_additional_data(self, metadata):
+ if metadata.hostname not in self.cache:
+ self._run_encs(metadata)
+ return self.cache[metadata.hostname]['params']
+
+ def end_client_run(self, metadata):
+ """ clear the entire cache at the end of each client run. this
+ guarantees that each client will run all ENCs at or near the
+ start of each run; we have to clear the entire cache instead
+ of just the cache for this client because a client that builds
+ templates that use metadata for other clients will populate
+ the cache for those clients, which we don't want. This makes
+ the caching less than stellar, but it does prevent multiple
+ runs of ENCs for a single host a) for groups and data
+ separately; and b) when a single client's metadata is
+ generated multiple times by separate templates """
+ self.cache = dict()
+
+ def end_statistics(self, metadata):
+ self.end_client_run(self, metadata)
diff --git a/src/lib/Bcfg2/Server/Plugins/SEModules.py b/src/lib/Bcfg2/Server/Plugins/SEModules.py
new file mode 100644
index 000000000..62b3fb10a
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/SEModules.py
@@ -0,0 +1,45 @@
+import os
+import logging
+import Bcfg2.Server.Plugin
+from Bcfg2.Bcfg2Py3k import b64encode
+
+logger = logging.getLogger(__name__)
+
+class SEModuleData(Bcfg2.Server.Plugin.SpecificData):
+ def bind_entry(self, entry, _):
+ entry.set('encoding', 'base64')
+ entry.text = b64encode(self.data)
+
+
+class SEModules(Bcfg2.Server.Plugin.GroupSpool):
+ """ Handle SELinux 'module' entries """
+ name = 'SEModules'
+ __author__ = 'chris.a.st.pierre@gmail.com'
+ es_child_cls = SEModuleData
+ entry_type = 'SELinux'
+ experimental = True
+
+ def _get_module_name(self, entry):
+ """ GroupSpool stores entries as /foo.pp, but we want people
+ to be able to specify module entries as name='foo' or
+ name='foo.pp', so we put this abstraction in between """
+ if entry.get("name").endswith(".pp"):
+ name = entry.get("name")
+ else:
+ name = entry.get("name") + ".pp"
+ return "/" + name
+
+ def HandlesEntry(self, entry, metadata):
+ if entry.tag in self.Entries and entry.get('type') == 'module':
+ return self._get_module_name(entry) in self.Entries[entry.tag]
+ return Bcfg2.Server.Plugin.GroupSpool.HandlesEntry(self, entry,
+ metadata)
+
+ def HandleEntry(self, entry, metadata):
+ entry.set("name", self._get_module_name(entry))
+ return self.Entries[entry.tag][entry.get("name")](entry, metadata)
+
+ def add_entry(self, event):
+ self.filename_pattern = \
+ os.path.basename(os.path.dirname(self.event_path(event)))
+ Bcfg2.Server.Plugin.GroupSpool.add_entry(self, event)
diff --git a/src/lib/Bcfg2/Server/Plugins/SGenshi.py b/src/lib/Bcfg2/Server/Plugins/SGenshi.py
deleted file mode 100644
index 0ba08125e..000000000
--- a/src/lib/Bcfg2/Server/Plugins/SGenshi.py
+++ /dev/null
@@ -1,97 +0,0 @@
-'''This module implements a templating generator based on Genshi'''
-
-import genshi.input
-import genshi.template
-import lxml.etree
-import logging
-import copy
-import sys
-import os.path
-
-import Bcfg2.Server.Plugin
-import Bcfg2.Server.Plugins.TGenshi
-
-logger = logging.getLogger('Bcfg2.Plugins.SGenshi')
-
-
-class SGenshiTemplateFile(Bcfg2.Server.Plugins.TGenshi.TemplateFile,
- Bcfg2.Server.Plugin.StructFile):
- def __init__(self, name, specific, encoding):
- Bcfg2.Server.Plugins.TGenshi.TemplateFile.__init__(self, name,
- specific, encoding)
- Bcfg2.Server.Plugin.StructFile.__init__(self, name)
-
- def get_xml_value(self, metadata):
- if not hasattr(self, 'template'):
- logger.error("No parsed template information for %s" % (self.name))
- raise Bcfg2.Server.Plugin.PluginExecutionError
- try:
- stream = self.template.generate(metadata=metadata).filter( \
- Bcfg2.Server.Plugins.TGenshi.removecomment)
- data = lxml.etree.XML(stream.render('xml', strip_whitespace=False))
- bundlename = os.path.splitext(os.path.basename(self.name))[0]
- bundle = lxml.etree.Element('Bundle', name=bundlename)
- for item in self.Match(metadata, data):
- bundle.append(copy.deepcopy(item))
- return bundle
- except LookupError:
- lerror = sys.exc_info()[1]
- logger.error('Genshi lookup error: %s' % lerror)
- except genshi.template.TemplateError:
- terror = sys.exc_info()[1]
- logger.error('Genshi template error: %s' % terror)
- raise
- except genshi.input.ParseError:
- perror = sys.exc_info()[1]
- logger.error('Genshi parse error: %s' % perror)
- raise
-
- def Match(self, metadata, xdata):
- """Return matching fragments of parsed template."""
- rv = []
- for child in xdata.getchildren():
- rv.extend(self._match(child, metadata))
- logger.debug("File %s got %d match(es)" % (self.name, len(rv)))
- return rv
-
-class SGenshiEntrySet(Bcfg2.Server.Plugin.EntrySet):
-
- def __init__(self, path, fam, encoding):
- fpattern = '\S+\.xml'
- Bcfg2.Server.Plugin.EntrySet.__init__(self, fpattern, path,
- SGenshiTemplateFile, encoding)
- fam.AddMonitor(path, self)
-
- def HandleEvent(self, event):
- '''passthrough event handler for old calling convention'''
- if event.filename != self.path:
- return self.handle_event(event)
-
- def BuildStructures(self, metadata):
- """Build SGenshi structures."""
- ret = []
- for entry in self.get_matching(metadata):
- try:
- ret.append(entry.get_xml_value(metadata))
- except:
- logger.error("SGenshi: Failed to template file %s" % entry.name)
- return ret
-
-
-class SGenshi(SGenshiEntrySet,
- Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Structure):
- """The SGenshi plugin provides templated structures."""
- name = 'SGenshi'
- __author__ = 'bcfg-dev@mcs.anl.gov'
- deprecated = True
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Structure.__init__(self)
- try:
- SGenshiEntrySet.__init__(self, self.data, self.core.fam, core.encoding)
- except:
- logger.error("Failed to load %s repository; disabling %s" \
- % (self.name, self.name))
- raise Bcfg2.Server.Plugin.PluginInitError
diff --git a/src/lib/Bcfg2/Server/Plugins/SSHbase.py b/src/lib/Bcfg2/Server/Plugins/SSHbase.py
index a1a29727f..cbe8d0d9b 100644
--- a/src/lib/Bcfg2/Server/Plugins/SSHbase.py
+++ b/src/lib/Bcfg2/Server/Plugins/SSHbase.py
@@ -1,20 +1,16 @@
"""This module manages ssh key files for bcfg2"""
-import binascii
import re
import os
+import sys
import socket
import shutil
-import sys
+import logging
import tempfile
from subprocess import Popen, PIPE
import Bcfg2.Server.Plugin
-from Bcfg2.Bcfg2Py3k import u_str
+from Bcfg2.Bcfg2Py3k import u_str, reduce, b64encode
-if sys.hexversion >= 0x03000000:
- from functools import reduce
-
-import logging
logger = logging.getLogger(__name__)
class KeyData(Bcfg2.Server.Plugin.SpecificData):
@@ -31,7 +27,7 @@ class KeyData(Bcfg2.Server.Plugin.SpecificData):
def bind_entry(self, entry, metadata):
entry.set('type', 'file')
if entry.get('encoding') == 'base64':
- entry.text = binascii.b2a_base64(self.data)
+ entry.text = b64encode(self.data)
else:
try:
entry.text = u_str(self.data, self.encoding)
diff --git a/src/lib/Bcfg2/Server/Plugins/SSLCA.py b/src/lib/Bcfg2/Server/Plugins/SSLCA.py
index 0072dc62d..9d1c51a08 100644
--- a/src/lib/Bcfg2/Server/Plugins/SSLCA.py
+++ b/src/lib/Bcfg2/Server/Plugins/SSLCA.py
@@ -3,12 +3,15 @@ import Bcfg2.Options
import lxml.etree
import posixpath
import tempfile
-import pipes
import os
from subprocess import Popen, PIPE, STDOUT
# Compatibility import
from Bcfg2.Bcfg2Py3k import ConfigParser
+try:
+ from hashlib import md5
+except ImportError:
+ from md5 import md5
class SSLCA(Bcfg2.Server.Plugin.GroupSpool):
"""
@@ -22,6 +25,10 @@ class SSLCA(Bcfg2.Server.Plugin.GroupSpool):
cert_specs = {}
CAs = {}
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.GroupSpool.__init__(self, core, datastore)
+ self.infoxml = dict()
+
def HandleEvent(self, event=None):
"""
Updates which files this plugin handles based upon filesystem events.
@@ -37,19 +44,21 @@ class SSLCA(Bcfg2.Server.Plugin.GroupSpool):
else:
ident = self.handles[event.requestID][:-1]
- fname = "".join([ident, '/', event.filename])
+ fname = os.path.join(ident, event.filename)
if event.filename.endswith('.xml'):
if action in ['exists', 'created', 'changed']:
if event.filename.endswith('key.xml'):
- key_spec = dict(list(lxml.etree.parse(epath).find('Key').items()))
+ key_spec = dict(list(lxml.etree.parse(epath,
+ parser=Bcfg2.Server.XMLParser).find('Key').items()))
self.key_specs[ident] = {
'bits': key_spec.get('bits', 2048),
'type': key_spec.get('type', 'rsa')
}
self.Entries['Path'][ident] = self.get_key
elif event.filename.endswith('cert.xml'):
- cert_spec = dict(list(lxml.etree.parse(epath).find('Cert').items()))
+ cert_spec = dict(list(lxml.etree.parse(epath,
+ parser=Bcfg2.Server.XMLParser).find('Cert').items()))
ca = cert_spec.get('ca', 'default')
self.cert_specs[ident] = {
'ca': ca,
@@ -67,6 +76,9 @@ class SSLCA(Bcfg2.Server.Plugin.GroupSpool):
cp.read(self.core.cfile)
self.CAs[ca] = dict(cp.items('sslca_' + ca))
self.Entries['Path'][ident] = self.get_cert
+ elif event.filename.endswith("info.xml"):
+ self.infoxml[ident] = Bcfg2.Server.Plugin.InfoXML(epath)
+ self.infoxml[ident].HandleEvent(event)
if action == 'deleted':
if ident in self.Entries['Path']:
del self.Entries['Path'][ident]
@@ -90,28 +102,27 @@ class SSLCA(Bcfg2.Server.Plugin.GroupSpool):
either grabs a prexisting key hostfile, or triggers the generation
of a new key if one doesn't exist.
"""
- # set path type and permissions, otherwise bcfg2 won't bind the file
- permdata = {'owner': 'root',
- 'group': 'root',
- 'type': 'file',
- 'perms': '644'}
- [entry.attrib.__setitem__(key, permdata[key]) for key in permdata]
-
# check if we already have a hostfile, or need to generate a new key
# TODO: verify key fits the specs
path = entry.get('name')
- filename = "".join([path, '/', path.rsplit('/', 1)[1],
- '.H_', metadata.hostname])
+ filename = os.path.join(path, "%s.H_%s" % (os.path.basename(path),
+ metadata.hostname))
if filename not in list(self.entries.keys()):
key = self.build_key(filename, entry, metadata)
open(self.data + filename, 'w').write(key)
entry.text = key
- self.entries[filename] = self.__child__("%s%s" % (self.data,
- filename))
+ self.entries[filename] = self.__child__(self.data + filename)
self.entries[filename].HandleEvent()
else:
entry.text = self.entries[filename].data
+ entry.set("type", "file")
+ if path in self.infoxml:
+ Bcfg2.Server.Plugin.bind_info(entry, metadata,
+ infoxml=self.infoxml[path])
+ else:
+ Bcfg2.Server.Plugin.bind_info(entry, metadata)
+
def build_key(self, filename, entry, metadata):
"""
generates a new key according the the specification
@@ -130,56 +141,61 @@ class SSLCA(Bcfg2.Server.Plugin.GroupSpool):
either grabs a prexisting cert hostfile, or triggers the generation
of a new cert if one doesn't exist.
"""
- # set path type and permissions, otherwise bcfg2 won't bind the file
- permdata = {'owner': 'root',
- 'group': 'root',
- 'type': 'file',
- 'perms': '644'}
- [entry.attrib.__setitem__(key, permdata[key]) for key in permdata]
-
path = entry.get('name')
- filename = "".join([path, '/', path.rsplit('/', 1)[1],
- '.H_', metadata.hostname])
+ filename = os.path.join(path, "%s.H_%s" % (os.path.basename(path),
+ metadata.hostname))
# first - ensure we have a key to work with
key = self.cert_specs[entry.get('name')].get('key')
- key_filename = "".join([key, '/', key.rsplit('/', 1)[1],
- '.H_', metadata.hostname])
+ key_filename = os.path.join(key, "%s.H_%s" % (os.path.basename(key),
+ metadata.hostname))
if key_filename not in self.entries:
e = lxml.etree.Element('Path')
- e.attrib['name'] = key
+ e.set('name', key)
self.core.Bind(e, metadata)
# check if we have a valid hostfile
- if filename in list(self.entries.keys()) and self.verify_cert(filename,
- key_filename,
- entry):
+ if (filename in list(self.entries.keys()) and
+ self.verify_cert(filename, key_filename, entry)):
entry.text = self.entries[filename].data
else:
cert = self.build_cert(key_filename, entry, metadata)
open(self.data + filename, 'w').write(cert)
- self.entries[filename] = self.__child__("%s%s" % (self.data,
- filename))
+ self.entries[filename] = self.__child__(self.data + filename)
self.entries[filename].HandleEvent()
entry.text = cert
+ entry.set("type", "file")
+ if path in self.infoxml:
+ Bcfg2.Server.Plugin.bind_info(entry, metadata,
+ infoxml=self.infoxml[path])
+ else:
+ Bcfg2.Server.Plugin.bind_info(entry, metadata)
+
def verify_cert(self, filename, key_filename, entry):
- if self.verify_cert_against_ca(filename, entry):
- if self.verify_cert_against_key(filename, key_filename):
- return True
- return False
+ do_verify = self.CAs[self.cert_specs[entry.get('name')]['ca']].get('verify_certs', True)
+ if do_verify:
+ return (self.verify_cert_against_ca(filename, entry) and
+ self.verify_cert_against_key(filename, key_filename))
+ return True
def verify_cert_against_ca(self, filename, entry):
"""
check that a certificate validates against the ca cert,
and that it has not expired.
"""
- chaincert = self.CAs[self.cert_specs[entry.get('name')]['ca']].get('chaincert')
+ chaincert = \
+ self.CAs[self.cert_specs[entry.get('name')]['ca']].get('chaincert')
cert = self.data + filename
- res = Popen(["openssl", "verify", "-CAfile", chaincert, cert],
+ res = Popen(["openssl", "verify", "-untrusted", chaincert, "-purpose",
+ "sslserver", cert],
stdout=PIPE, stderr=STDOUT).stdout.read()
if res == cert + ": OK\n":
+ self.debug_log("SSLCA: %s verified successfully against CA" %
+ entry.get("name"))
return True
+ self.logger.warning("SSLCA: %s failed verification against CA: %s" %
+ (entry.get("name"), res))
return False
def verify_cert_against_key(self, filename, key_filename):
@@ -188,14 +204,20 @@ class SSLCA(Bcfg2.Server.Plugin.GroupSpool):
"""
cert = self.data + filename
key = self.data + key_filename
- cmd = ("openssl x509 -noout -modulus -in %s | openssl md5" %
- pipes.quote(cert))
- cert_md5 = Popen(cmd, shell=True, stdout=PIPE, stderr=STDOUT).stdout.read()
- cmd = ("openssl rsa -noout -modulus -in %s | openssl md5" %
- pipes.quote(key))
- key_md5 = Popen(cmd, shell=True, stdout=PIPE, stderr=STDOUT).stdout.read()
+ cert_md5 = \
+ md5(Popen(["openssl", "x509", "-noout", "-modulus", "-in", cert],
+ stdout=PIPE,
+ stderr=STDOUT).stdout.read().strip()).hexdigest()
+ key_md5 = \
+ md5(Popen(["openssl", "rsa", "-noout", "-modulus", "-in", key],
+ stdout=PIPE,
+ stderr=STDOUT).stdout.read().strip()).hexdigest()
if cert_md5 == key_md5:
+ self.debug_log("SSLCA: %s verified successfully against key %s" %
+ (filename, key_filename))
return True
+ self.logger.warning("SSLCA: %s failed verification against key %s" %
+ (filename, key_filename))
return False
def build_cert(self, key_filename, entry, metadata):
diff --git a/src/lib/Bcfg2/Server/Plugins/ServiceCompat.py b/src/lib/Bcfg2/Server/Plugins/ServiceCompat.py
new file mode 100644
index 000000000..aad92b7c7
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Plugins/ServiceCompat.py
@@ -0,0 +1,32 @@
+import Bcfg2.Server.Plugin
+
+class ServiceCompat(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.StructureValidator):
+ """ Use old-style service modes for older clients """
+ name = 'ServiceCompat'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+ mode_map = {('true', 'true'): 'default',
+ ('interactive', 'true'): 'interactive_only',
+ ('false', 'false'): 'manual'}
+
+ def validate_structures(self, metadata, structures):
+ """ Apply defaults """
+ if metadata.version_info and metadata.version_info > (1, 3, 0, '', 0):
+ # do not care about a client that is _any_ 1.3.0 release
+ # (including prereleases and RCs)
+ return
+
+ for struct in structures:
+ for entry in struct.xpath("//BoundService|//Service"):
+ mode_key = (entry.get("restart", "true").lower(),
+ entry.get("install", "true").lower())
+ try:
+ mode = self.mode_map[mode_key]
+ except KeyError:
+ self.logger.info("Could not map restart and install "
+ "settings of %s:%s to an old-style "
+ "Service mode for %s; using 'manual'" %
+ (entry.tag, entry.get("name"),
+ metadata.hostname))
+ mode = "manual"
+ entry.set("mode", mode)
diff --git a/src/lib/Bcfg2/Server/Plugins/Snapshots.py b/src/lib/Bcfg2/Server/Plugins/Snapshots.py
index aeb3b9f74..e62638b4f 100644
--- a/src/lib/Bcfg2/Server/Plugins/Snapshots.py
+++ b/src/lib/Bcfg2/Server/Plugins/Snapshots.py
@@ -1,9 +1,5 @@
-#import lxml.etree
import logging
-import binascii
import difflib
-#import sqlalchemy
-#import sqlalchemy.orm
import Bcfg2.Server.Plugin
import Bcfg2.Server.Snapshots
import Bcfg2.Logger
@@ -13,7 +9,7 @@ import time
import threading
# Compatibility import
-from Bcfg2.Bcfg2Py3k import Queue
+from Bcfg2.Bcfg2Py3k import Queue, u_str, b64decode
logger = logging.getLogger('Snapshots')
@@ -28,14 +24,6 @@ datafields = {
}
-# py3k compatibility
-def u_str(string):
- if sys.hexversion >= 0x03000000:
- return string
- else:
- return unicode(string)
-
-
def build_snap_ent(entry):
basefields = []
if entry.tag in ['Package', 'Service']:
@@ -52,13 +40,12 @@ def build_snap_ent(entry):
if entry.get('encoding', 'ascii') == 'ascii':
desired['contents'] = u_str(entry.text)
else:
- desired['contents'] = u_str(binascii.a2b_base64(entry.text))
+ desired['contents'] = u_str(b64decode(entry.text))
if 'current_bfile' in entry.attrib:
- state['contents'] = u_str(binascii.a2b_base64( \
- entry.get('current_bfile')))
+ state['contents'] = u_str(b64decode(entry.get('current_bfile')))
elif 'current_bdiff' in entry.attrib:
- diff = binascii.a2b_base64(entry.get('current_bdiff'))
+ diff = b64decode(entry.get('current_bdiff'))
state['contents'] = u_str( \
'\n'.join(difflib.restore(diff.split('\n'), 1)))
@@ -69,14 +56,12 @@ def build_snap_ent(entry):
return [desired, state]
-class Snapshots(Bcfg2.Server.Plugin.Statistics,
- Bcfg2.Server.Plugin.Plugin):
+class Snapshots(Bcfg2.Server.Plugin.Statistics):
name = 'Snapshots'
experimental = True
def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Statistics.__init__(self)
+ Bcfg2.Server.Plugin.Statistics.__init__(self, core, datastore)
self.session = Bcfg2.Server.Snapshots.setup_session(core.cfile)
self.work_queue = Queue()
self.loader = threading.Thread(target=self.load_snapshot)
diff --git a/src/lib/Bcfg2/Server/Plugins/Statistics.py b/src/lib/Bcfg2/Server/Plugins/Statistics.py
index 265ef95a8..984efb76c 100644
--- a/src/lib/Bcfg2/Server/Plugins/Statistics.py
+++ b/src/lib/Bcfg2/Server/Plugins/Statistics.py
@@ -1,15 +1,14 @@
'''This file manages the statistics collected by the BCFG2 Server'''
-import binascii
import copy
import difflib
import logging
-from lxml.etree import XML, SubElement, Element, XMLSyntaxError
import lxml.etree
import os
+import sys
from time import asctime, localtime, time, strptime, mktime
import threading
-
+from Bcfg2.Bcfg2Py3k import b64decode
import Bcfg2.Server.Plugin
@@ -19,7 +18,7 @@ class StatisticsStore(object):
def __init__(self, filename):
self.filename = filename
- self.element = Element('Dummy')
+ self.element = lxml.etree.Element('Dummy')
self.dirty = 0
self.lastwrite = 0
self.logger = logging.getLogger('Bcfg2.Server.Statistics')
@@ -35,7 +34,8 @@ class StatisticsStore(object):
ioerr = sys.exc_info()[1]
self.logger.error("Failed to open %s for writing: %s" % (self.filename + '.new', ioerr))
else:
- fout.write(lxml.etree.tostring(self.element, encoding='UTF-8', xml_declaration=True))
+ fout.write(lxml.etree.tostring(self.element,
+ xml_declaration=False).decode('UTF-8'))
fout.close()
os.rename(self.filename + '.new', self.filename)
self.dirty = 0
@@ -47,11 +47,11 @@ class StatisticsStore(object):
fin = open(self.filename, 'r')
data = fin.read()
fin.close()
- self.element = XML(data)
+ self.element = lxml.etree.XML(data)
self.dirty = 0
- except (IOError, XMLSyntaxError):
+ except (IOError, lxml.etree.XMLSyntaxError):
self.logger.error("Creating new statistics file %s"%(self.filename))
- self.element = Element('ConfigStatistics')
+ self.element = lxml.etree.Element('ConfigStatistics')
self.WriteBack()
self.dirty = 0
@@ -77,7 +77,7 @@ class StatisticsStore(object):
nummatch = len(nodes)
if nummatch == 0:
# Create an entry for this node
- node = SubElement(self.element, 'Node', name=client)
+ node = lxml.etree.SubElement(self.element, 'Node', name=client)
elif nummatch == 1 and not node_dirty:
# Delete old instance
node = nodes[0]
@@ -112,13 +112,11 @@ class StatisticsStore(object):
return (now-utime) > secondsPerDay
-class Statistics(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.ThreadedStatistics,
+class Statistics(Bcfg2.Server.Plugin.ThreadedStatistics,
Bcfg2.Server.Plugin.PullSource):
name = 'Statistics'
def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
Bcfg2.Server.Plugin.ThreadedStatistics.__init__(self, core, datastore)
Bcfg2.Server.Plugin.PullSource.__init__(self)
fpath = "%s/etc/statistics.xml" % datastore
@@ -151,9 +149,9 @@ class Statistics(Bcfg2.Server.Plugin.Plugin,
if cfentry.get('sensitive') in ['true', 'True']:
raise Bcfg2.Server.Plugin.PluginExecutionError
elif 'current_bfile' in cfentry.attrib:
- contents = binascii.a2b_base64(cfentry.get('current_bfile'))
+ contents = b64decode(cfentry.get('current_bfile'))
elif 'current_bdiff' in cfentry.attrib:
- diff = binascii.a2b_base64(cfentry.get('current_bdiff'))
+ diff = b64decode(cfentry.get('current_bdiff'))
contents = '\n'.join(difflib.restore(diff.split('\n'), 1))
else:
contents = None
diff --git a/src/lib/Bcfg2/Server/Plugins/Svcmgr.py b/src/lib/Bcfg2/Server/Plugins/Svcmgr.py
deleted file mode 100644
index f4232ad5c..000000000
--- a/src/lib/Bcfg2/Server/Plugins/Svcmgr.py
+++ /dev/null
@@ -1,10 +0,0 @@
-"""This generator provides service mappings."""
-
-import Bcfg2.Server.Plugin
-
-
-class Svcmgr(Bcfg2.Server.Plugin.PrioDir):
- """This is a generator that handles service assignments."""
- name = 'Svcmgr'
- __author__ = 'bcfg-dev@mcs.anl.gov'
- deprecated = True
diff --git a/src/lib/Bcfg2/Server/Plugins/TCheetah.py b/src/lib/Bcfg2/Server/Plugins/TCheetah.py
index 8879fdef1..2bf475363 100644
--- a/src/lib/Bcfg2/Server/Plugins/TCheetah.py
+++ b/src/lib/Bcfg2/Server/Plugins/TCheetah.py
@@ -1,13 +1,11 @@
'''This module implements a templating generator based on Cheetah'''
-import binascii
import logging
import sys
import traceback
import Bcfg2.Server.Plugin
-# py3k compatibility
-if sys.hexversion >= 0x03000000:
- unicode = str
+
+from Bcfg2.Bcfg2Py3k import unicode, b64encode
logger = logging.getLogger('Bcfg2.Plugins.TCheetah')
@@ -60,7 +58,7 @@ class TemplateFile:
else:
if entry.get('encoding') == 'base64':
# take care of case where file needs base64 encoding
- entry.text = binascii.b2a_base64(self.template)
+ entry.text = b64encode(self.template)
else:
entry.text = unicode(str(self.template), self.encoding)
except:
@@ -78,3 +76,4 @@ class TCheetah(Bcfg2.Server.Plugin.GroupSpool):
__author__ = 'bcfg-dev@mcs.anl.gov'
filename_pattern = 'template'
es_child_cls = TemplateFile
+ deprecated = True
diff --git a/src/lib/Bcfg2/Server/Plugins/TGenshi.py b/src/lib/Bcfg2/Server/Plugins/TGenshi.py
index c4dd40614..c7335a0c0 100644
--- a/src/lib/Bcfg2/Server/Plugins/TGenshi.py
+++ b/src/lib/Bcfg2/Server/Plugins/TGenshi.py
@@ -1,12 +1,10 @@
"""This module implements a templating generator based on Genshi."""
-import binascii
import logging
import sys
import Bcfg2.Server.Plugin
-# py3k compatibility
-if sys.hexversion >= 0x03000000:
- unicode = str
+
+from Bcfg2.Bcfg2Py3k import unicode, b64encode
logger = logging.getLogger('Bcfg2.Plugins.TGenshi')
@@ -18,7 +16,7 @@ try:
TextTemplate, MarkupTemplate, TemplateError
except ImportError:
logger.error("TGenshi: Failed to import Genshi. Is it installed?")
- raise Bcfg2.Server.Plugin.PluginInitError
+ raise
try:
from genshi.template import NewTextTemplate
have_ntt = True
@@ -33,7 +31,7 @@ def removecomment(stream):
yield kind, data, pos
-class TemplateFile:
+class TemplateFile(object):
"""Template file creates Genshi template structures for the loaded file."""
def __init__(self, name, specific, encoding):
@@ -99,7 +97,7 @@ class TemplateFile:
else:
if entry.get('encoding') == 'base64':
# take care of case where file needs base64 encoding
- entry.text = binascii.b2a_base64(textdata)
+ entry.text = b64encode(textdata)
else:
entry.text = unicode(textdata, self.encoding)
else:
@@ -123,6 +121,10 @@ class TemplateFile:
raise Bcfg2.Server.Plugin.PluginExecutionError('Genshi template loading error: %s' % err)
+class TemplateEntrySet(Bcfg2.Server.Plugin.EntrySet):
+ basename_is_regex = True
+
+
class TGenshi(Bcfg2.Server.Plugin.GroupSpool):
"""
The TGenshi generator implements a templating
@@ -132,4 +134,6 @@ class TGenshi(Bcfg2.Server.Plugin.GroupSpool):
name = 'TGenshi'
__author__ = 'jeff@ocjtech.us'
filename_pattern = 'template\.(txt|newtxt|xml)'
+ es_cls = TemplateEntrySet
es_child_cls = TemplateFile
+ deprecated = True
diff --git a/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py b/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py
index 2c0ee03e0..6d92bb530 100644
--- a/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py
+++ b/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py
@@ -1,26 +1,23 @@
import re
import imp
import sys
+import glob
import logging
+import Bcfg2.Server.Lint
import Bcfg2.Server.Plugin
logger = logging.getLogger(__name__)
-class HelperModule(Bcfg2.Server.Plugin.SpecificData):
- _module_name_re = re.compile(r'([^/]+?)\.py')
-
- def __init__(self, name, specific, encoding):
- Bcfg2.Server.Plugin.SpecificData.__init__(self, name, specific,
- encoding)
- match = self._module_name_re.search(self.name)
- if match:
- self._module_name = match.group(1)
- else:
- self._module_name = name
+module_pattern = r'(?P<filename>(?P<module>[^\/]+)\.py)$'
+module_re = re.compile(module_pattern)
+
+class HelperModule(Bcfg2.Server.Plugin.FileBacked):
+ def __init__(self, name, fam=None):
+ Bcfg2.Server.Plugin.FileBacked.__init__(self, name, fam=fam)
+ self._module_name = module_re.search(self.name).group('module')
self._attrs = []
- def handle_event(self, event):
- Bcfg2.Server.Plugin.SpecificData.handle_event(self, event)
+ def Index(self):
try:
module = imp.load_source(self._module_name, self.name)
except:
@@ -34,32 +31,29 @@ class HelperModule(Bcfg2.Server.Plugin.SpecificData):
self.name)
return
+ newattrs = []
for sym in module.__export__:
if sym not in self._attrs and hasattr(self, sym):
logger.warning("TemplateHelper: %s: %s is a reserved keyword, "
"skipping export" % (self.name, sym))
- setattr(self, sym, getattr(module, sym))
+ continue
+ try:
+ setattr(self, sym, getattr(module, sym))
+ newattrs.append(sym)
+ except AttributeError:
+ logger.warning("TemplateHelper: %s: %s exports %s, but has no "
+ "such attribute" % (self.name, sym))
# remove old exports
- for sym in set(self._attrs) - set(module.__export__):
+ for sym in set(self._attrs) - set(newattrs):
delattr(self, sym)
- self._attrs = module.__export__
+ self._attrs = newattrs
-class HelperSet(Bcfg2.Server.Plugin.EntrySet):
+class HelperSet(Bcfg2.Server.Plugin.DirectoryBacked):
ignore = re.compile("^(\.#.*|.*~|\\..*\\.(sw[px])|.*\.py[co])$")
-
- def __init__(self, path, fam, encoding, plugin_name):
- fpattern = '[0-9A-Za-z_\-]+\.py'
- self.plugin_name = plugin_name
- Bcfg2.Server.Plugin.EntrySet.__init__(self, fpattern, path,
- HelperModule, encoding)
- fam.AddMonitor(path, self)
-
- def HandleEvent(self, event):
- if (event.filename != self.path and
- not self.ignore.match(event.filename)):
- return self.handle_event(event)
+ patterns = module_re
+ __child__ = HelperModule
class TemplateHelper(Bcfg2.Server.Plugin.Plugin,
@@ -71,13 +65,69 @@ class TemplateHelper(Bcfg2.Server.Plugin.Plugin,
def __init__(self, core, datastore):
Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
Bcfg2.Server.Plugin.Connector.__init__(self)
+ self.helpers = HelperSet(self.data, core.fam)
+
+ def get_additional_data(self, _):
+ return dict([(h._module_name, h)
+ for h in self.helpers.entries.values()])
+
+
+class TemplateHelperLint(Bcfg2.Server.Lint.ServerlessPlugin):
+ """ find duplicate Pkgmgr entries with the same priority """
+ def __init__(self, *args, **kwargs):
+ Bcfg2.Server.Lint.ServerlessPlugin.__init__(self, *args, **kwargs)
+ hm = HelperModule("foo.py")
+ self.reserved_keywords = dir(hm)
+
+ def Run(self):
+ for fname in os.listdir(os.path.join(self.config['repo'],
+ "TemplateHelper")):
+ helper = os.path.join(self.config['repo'], "TemplateHelper",
+ fname)
+ if not module_re.search(helper) or not self.HandlesFile(helper):
+ continue
+ self.check_helper(helper)
+
+ def check_helper(self, helper):
+ module_name = module_re.search(helper).group(1)
try:
- self.helpers = HelperSet(self.data, core.fam, core.encoding,
- self.name)
+ module = imp.load_source(module_name, helper)
except:
- raise Bcfg2.Server.Plugin.PluginInitError
+ err = sys.exc_info()[1]
+ self.LintError("templatehelper-import-error",
+ "Failed to import %s: %s" %
+ (helper, err))
+ return
- def get_additional_data(self, metadata):
- return dict([(h._module_name, h)
- for h in list(self.helpers.entries.values())])
+ if not hasattr(module, "__export__"):
+ self.LintError("templatehelper-no-export",
+ "%s has no __export__ list" % helper)
+ return
+ elif not isinstance(module.__export__, list):
+ self.LintError("templatehelper-nonlist-export",
+ "__export__ is not a list in %s" % helper)
+ return
+
+ for sym in module.__export__:
+ if not hasattr(module, sym):
+ self.LintError("templatehelper-nonexistent-export",
+ "%s: exported symbol %s does not exist" %
+ (helper, sym))
+ elif sym in self.reserved_keywords:
+ self.LintError("templatehelper-reserved-export",
+ "%s: exported symbol %s is reserved" %
+ (helper, sym))
+ elif sym.startswith("_"):
+ self.LintError("templatehelper-underscore-export",
+ "%s: exported symbol %s starts with underscore" %
+ (helper, sym))
+
+ @classmethod
+ def Errors(cls):
+ return {"templatehelper-import-error":"error",
+ "templatehelper-no-export":"error",
+ "templatehelper-nonlist-export":"error",
+ "templatehelper-nonexistent-export":"error",
+ "templatehelper-reserved-export":"error",
+ "templatehelper-underscore-export":"warning"}
diff --git a/src/lib/Bcfg2/Server/Plugins/Trigger.py b/src/lib/Bcfg2/Server/Plugins/Trigger.py
index b0d21545c..313a1bf03 100644
--- a/src/lib/Bcfg2/Server/Plugins/Trigger.py
+++ b/src/lib/Bcfg2/Server/Plugins/Trigger.py
@@ -1,43 +1,52 @@
import os
+import pipes
import Bcfg2.Server.Plugin
+from subprocess import Popen, PIPE
+class TriggerFile(Bcfg2.Server.Plugin.FileBacked):
+ def HandleEvent(self, event=None):
+ return
-def async_run(prog, args):
- pid = os.fork()
- if pid:
- os.waitpid(pid, 0)
- else:
- dpid = os.fork()
- if not dpid:
- os.system(" ".join([prog] + args))
- os._exit(0)
+ def __str__(self):
+ return "%s: %s" % (self.__class__.__name__, self.name)
class Trigger(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.Statistics):
+ Bcfg2.Server.Plugin.ClientRunHooks,
+ Bcfg2.Server.Plugin.DirectoryBacked):
"""Trigger is a plugin that calls external scripts (on the server)."""
name = 'Trigger'
__author__ = 'bcfg-dev@mcs.anl.gov'
def __init__(self, core, datastore):
Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.Statistics.__init__(self)
- try:
- os.stat(self.data)
- except:
- self.logger.error("Trigger: spool directory %s does not exist; "
- "unloading" % self.data)
- raise Bcfg2.Server.Plugin.PluginInitError
-
- def process_statistics(self, metadata, _):
+ Bcfg2.Server.Plugin.ClientRunHooks.__init__(self)
+ Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data,
+ self.core.fam)
+
+ def async_run(self, args):
+ pid = os.fork()
+ if pid:
+ os.waitpid(pid, 0)
+ else:
+ dpid = os.fork()
+ if not dpid:
+ self.debug_log("Running %s" % " ".join(pipes.quote(a)
+ for a in args))
+ proc = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
+ (out, err) = proc.communicate()
+ rv = proc.wait()
+ if rv != 0:
+ self.logger.error("Trigger: Error running %s (%s): %s" %
+ (args[0], rv, err))
+ elif err:
+ self.debug_log("Trigger: Error: %s" % err)
+ os._exit(0)
+
+
+ def end_client_run(self, metadata):
args = [metadata.hostname, '-p', metadata.profile, '-g',
':'.join([g for g in metadata.groups])]
- for notifier in os.listdir(self.data):
- if ((notifier[-1] == '~') or
- (notifier[:2] == '.#') or
- (notifier[-4:] == '.swp') or
- (notifier in ['SCCS', '.svn', '4913'])):
- continue
- npath = self.data + '/' + notifier
- self.logger.debug("Running %s %s" % (npath, " ".join(args)))
- async_run(npath, args)
+ for notifier in self.entries.keys():
+ npath = os.path.join(self.data, notifier)
+ self.async_run([npath] + args)
diff --git a/src/lib/Bcfg2/Server/Plugins/__init__.py b/src/lib/Bcfg2/Server/Plugins/__init__.py
index f9f1b4e52..b33eeba28 100644
--- a/src/lib/Bcfg2/Server/Plugins/__init__.py
+++ b/src/lib/Bcfg2/Server/Plugins/__init__.py
@@ -25,10 +25,8 @@ __all__ = [
'SSHbase',
'Snapshots',
'Statistics',
- 'Svcmgr',
'Svn',
'TCheetah',
'Trigger',
- 'SGenshi',
'TGenshi',
]
diff --git a/src/lib/Bcfg2/Server/Reports/importscript.py b/src/lib/Bcfg2/Server/Reports/importscript.py
index 16df86a9b..4eced8340 100755
--- a/src/lib/Bcfg2/Server/Reports/importscript.py
+++ b/src/lib/Bcfg2/Server/Reports/importscript.py
@@ -4,17 +4,17 @@ Imports statistics.xml and clients.xml files in to database backend for
new statistics engine
"""
-import binascii
import os
import sys
+import traceback
try:
- import Bcfg2.Server.Reports.settings
+ import Bcfg2.settings
except Exception:
e = sys.exc_info()[1]
sys.stderr.write("Failed to load configuration settings. %s\n" % e)
sys.exit(1)
-project_directory = os.path.dirname(Bcfg2.Server.Reports.settings.__file__)
+project_directory = os.path.dirname(Bcfg2.settings.__file__)
project_name = os.path.basename(project_directory)
sys.path.append(os.path.join(project_directory, '..'))
project_module = __import__(project_name, '', '', [''])
@@ -27,14 +27,14 @@ from lxml.etree import XML, XMLSyntaxError
from getopt import getopt, GetoptError
from datetime import datetime
from time import strptime
-from django.db import connection
-from Bcfg2.Server.Reports.updatefix import update_database
+from django.db import connection, transaction
+from Bcfg2.Server.Plugins.Metadata import ClientMetadata
import logging
import Bcfg2.Logger
import platform
# Compatibility import
-from Bcfg2.Bcfg2Py3k import ConfigParser
+from Bcfg2.Bcfg2Py3k import ConfigParser, b64decode
def build_reason_kwargs(r_ent, encoding, logger):
@@ -53,7 +53,7 @@ def build_reason_kwargs(r_ent, encoding, logger):
# No point in flagging binary if we have no data
binary_file = False
elif r_ent.get('current_bdiff', False):
- rc_diff = binascii.a2b_base64(r_ent.get('current_bdiff'))
+ rc_diff = b64decode(r_ent.get('current_bdiff'))
elif r_ent.get('current_diff', False):
rc_diff = r_ent.get('current_diff')
else:
@@ -86,130 +86,160 @@ def build_reason_kwargs(r_ent, encoding, logger):
is_sensitive=sensitive_file,
unpruned=unpruned_entries)
+def _fetch_reason(elem, kargs, logger):
+ try:
+ rr = None
+ try:
+ rr = Reason.objects.filter(**kargs)[0]
+ except IndexError:
+ rr = Reason(**kargs)
+ rr.save()
+ logger.debug("Created reason: %s" % rr.id)
+ except Exception:
+ ex = sys.exc_info()[1]
+ logger.error("Failed to create reason for %s: %s" % (elem.get('name'), ex))
+ rr = Reason(current_exists=elem.get('current_exists',
+ default="True").capitalize() == "True")
+ rr.save()
+ return rr
-def load_stats(cdata, sdata, encoding, vlevel, logger, quick=False, location=''):
- clients = {}
- [clients.__setitem__(c.name, c) \
- for c in Client.objects.all()]
-
- pingability = {}
- [pingability.__setitem__(n.get('name'), n.get('pingable', default='N')) \
- for n in cdata.findall('Client')]
+def load_stats(sdata, encoding, vlevel, logger, quick=False, location=''):
for node in sdata.findall('Node'):
name = node.get('name')
- c_inst, created = Client.objects.get_or_create(name=name)
- if vlevel > 0:
- logger.info("Client %s added to db" % name)
- clients[name] = c_inst
- try:
- pingability[name]
- except KeyError:
- pingability[name] = 'N'
for statistics in node.findall('Statistics'):
- timestamp = datetime(*strptime(statistics.get('time'))[0:6])
- ilist = Interaction.objects.filter(client=c_inst,
- timestamp=timestamp)
- if ilist:
- current_interaction = ilist[0]
- if vlevel > 0:
- logger.info("Interaction for %s at %s with id %s already exists" % \
- (c_inst.id, timestamp, current_interaction.id))
- continue
- else:
- newint = Interaction(client=c_inst,
- timestamp=timestamp,
- state=statistics.get('state',
+ try:
+ load_stat(name, statistics, encoding, vlevel, logger, quick, location)
+ except:
+ logger.error("Failed to create interaction for %s: %s" %
+ (name, traceback.format_exc().splitlines()[-1]))
+
+@transaction.commit_on_success
+def load_stat(cobj, statistics, encoding, vlevel, logger, quick, location):
+ if isinstance(cobj, ClientMetadata):
+ client_name = cobj.hostname
+ else:
+ client_name = cobj
+ client, created = Client.objects.get_or_create(name=client_name)
+ if created and vlevel > 0:
+ logger.info("Client %s added to db" % client_name)
+
+ timestamp = datetime(*strptime(statistics.get('time'))[0:6])
+ ilist = Interaction.objects.filter(client=client,
+ timestamp=timestamp)
+ if ilist:
+ current_interaction = ilist[0]
+ if vlevel > 0:
+ logger.info("Interaction for %s at %s with id %s already exists" % \
+ (client.id, timestamp, current_interaction.id))
+ return
+ else:
+ newint = Interaction(client=client,
+ timestamp=timestamp,
+ state=statistics.get('state',
+ default="unknown"),
+ repo_rev_code=statistics.get('revision',
default="unknown"),
- repo_rev_code=statistics.get('revision',
- default="unknown"),
- goodcount=statistics.get('good',
- default="0"),
- totalcount=statistics.get('total',
- default="0"),
- server=location)
- newint.save()
- current_interaction = newint
- if vlevel > 0:
- logger.info("Interaction for %s at %s with id %s INSERTED in to db" % (c_inst.id,
- timestamp, current_interaction.id))
-
- counter_fields = {TYPE_CHOICES[0]: 0,
- TYPE_CHOICES[1]: 0,
- TYPE_CHOICES[2]: 0}
- pattern = [('Bad/*', TYPE_CHOICES[0]),
- ('Extra/*', TYPE_CHOICES[2]),
- ('Modified/*', TYPE_CHOICES[1])]
- for (xpath, type) in pattern:
- for x in statistics.findall(xpath):
- counter_fields[type] = counter_fields[type] + 1
- kargs = build_reason_kwargs(x, encoding, logger)
-
- try:
- rr = None
- try:
- rr = Reason.objects.filter(**kargs)[0]
- except IndexError:
- rr = Reason(**kargs)
- rr.save()
- if vlevel > 0:
- logger.info("Created reason: %s" % rr.id)
- except Exception:
- ex = sys.exc_info()[1]
- logger.error("Failed to create reason for %s: %s" % (x.get('name'), ex))
- rr = Reason(current_exists=x.get('current_exists',
- default="True").capitalize() == "True")
- rr.save()
-
- entry, created = Entries.objects.get_or_create(\
- name=x.get('name'), kind=x.tag)
-
- Entries_interactions(entry=entry, reason=rr,
- interaction=current_interaction,
- type=type[0]).save()
- if vlevel > 0:
- logger.info("%s interaction created with reason id %s and entry %s" % (xpath, rr.id, entry.id))
-
- # Update interaction counters
- current_interaction.bad_entries = counter_fields[TYPE_CHOICES[0]]
- current_interaction.modified_entries = counter_fields[TYPE_CHOICES[1]]
- current_interaction.extra_entries = counter_fields[TYPE_CHOICES[2]]
- current_interaction.save()
-
- mperfs = []
- for times in statistics.findall('OpStamps'):
- for metric, value in list(times.items()):
- mmatch = []
- if not quick:
- mmatch = Performance.objects.filter(metric=metric, value=value)
-
- if mmatch:
- mperf = mmatch[0]
- else:
- mperf = Performance(metric=metric, value=value)
- mperf.save()
- mperfs.append(mperf)
- current_interaction.performance_items.add(*mperfs)
-
- for key in list(pingability.keys()):
- if key not in clients:
- continue
+ goodcount=statistics.get('good',
+ default="0"),
+ totalcount=statistics.get('total',
+ default="0"),
+ server=location)
+ newint.save()
+ current_interaction = newint
+ if vlevel > 0:
+ logger.info("Interaction for %s at %s with id %s INSERTED in to db" % (client.id,
+ timestamp, current_interaction.id))
+
+ if isinstance(cobj, ClientMetadata):
try:
- pmatch = Ping.objects.filter(client=clients[key]).order_by('-endtime')[0]
- if pmatch.status == pingability[key]:
- pmatch.endtime = datetime.now()
- pmatch.save()
- continue
- except IndexError:
- pass
- Ping(client=clients[key], status=pingability[key],
- starttime=datetime.now(),
- endtime=datetime.now()).save()
+ imeta = InteractionMetadata(interaction=current_interaction)
+ profile, created = Group.objects.get_or_create(name=cobj.profile)
+ imeta.profile = profile
+ imeta.save() # save here for m2m
+
+ #FIXME - this should be more efficient
+ group_set = []
+ for group_name in cobj.groups:
+ group, created = Group.objects.get_or_create(name=group_name)
+ if created:
+ logger.debug("Added group %s" % group)
+ imeta.groups.add(group)
+ for bundle_name in cobj.bundles:
+ bundle, created = Bundle.objects.get_or_create(name=bundle_name)
+ if created:
+ logger.debug("Added bundle %s" % bundle)
+ imeta.bundles.add(bundle)
+ imeta.save()
+ except:
+ logger.error("Failed to save interaction metadata for %s: %s" %
+ (client_name, traceback.format_exc().splitlines()[-1]))
+
+
+ entries_cache = {}
+ [entries_cache.__setitem__((e.kind, e.name), e) \
+ for e in Entries.objects.all()]
+ counter_fields = {TYPE_BAD: 0,
+ TYPE_MODIFIED: 0,
+ TYPE_EXTRA: 0}
+ pattern = [('Bad/*', TYPE_BAD),
+ ('Extra/*', TYPE_EXTRA),
+ ('Modified/*', TYPE_MODIFIED)]
+ for (xpath, type) in pattern:
+ for x in statistics.findall(xpath):
+ counter_fields[type] = counter_fields[type] + 1
+ rr = _fetch_reason(x, build_reason_kwargs(x, encoding, logger), logger)
- if vlevel > 1:
- logger.info("---------------PINGDATA SYNCED---------------------")
+ try:
+ entry = entries_cache[(x.tag, x.get('name'))]
+ except KeyError:
+ entry, created = Entries.objects.get_or_create(\
+ name=x.get('name'), kind=x.tag)
+
+ Entries_interactions(entry=entry, reason=rr,
+ interaction=current_interaction,
+ type=type).save()
+ if vlevel > 0:
+ logger.info("%s interaction created with reason id %s and entry %s" % (xpath, rr.id, entry.id))
+
+ # add good entries
+ good_reason = None
+ for x in statistics.findall('Good/*'):
+ if good_reason == None:
+ # Do this once. Really need to fix Reasons...
+ good_reason = _fetch_reason(x, build_reason_kwargs(x, encoding, logger), logger)
+ try:
+ entry = entries_cache[(x.tag, x.get('name'))]
+ except KeyError:
+ entry, created = Entries.objects.get_or_create(\
+ name=x.get('name'), kind=x.tag)
+ Entries_interactions(entry=entry, reason=good_reason,
+ interaction=current_interaction,
+ type=TYPE_GOOD).save()
+ if vlevel > 0:
+ logger.info("%s interaction created with reason id %s and entry %s" % (xpath, good_reason.id, entry.id))
+
+ # Update interaction counters
+ current_interaction.bad_entries = counter_fields[TYPE_BAD]
+ current_interaction.modified_entries = counter_fields[TYPE_MODIFIED]
+ current_interaction.extra_entries = counter_fields[TYPE_EXTRA]
+ current_interaction.save()
+
+ mperfs = []
+ for times in statistics.findall('OpStamps'):
+ for metric, value in list(times.items()):
+ mmatch = []
+ if not quick:
+ mmatch = Performance.objects.filter(metric=metric, value=value)
+
+ if mmatch:
+ mperf = mmatch[0]
+ else:
+ mperf = Performance(metric=metric, value=value)
+ mperf.save()
+ mperfs.append(mperf)
+ current_interaction.performance_items.add(*mperfs)
- #Clients are consistent
if __name__ == '__main__':
from sys import argv
@@ -231,18 +261,17 @@ if __name__ == '__main__':
except GetoptError:
mesg = sys.exc_info()[1]
# print help information and exit:
- print("%s\nUsage:\nimportscript.py [-h] [-v] [-u] [-d] [-S] [-C bcfg2 config file] [-c clients-file] [-s statistics-file]" % (mesg))
+ print("%s\nUsage:\nimportscript.py [-h] [-v] [-u] [-d] [-S] [-C bcfg2 config file] [-s statistics-file]" % (mesg))
raise SystemExit(2)
for o, a in opts:
if o in ("-h", "--help"):
- print("Usage:\nimportscript.py [-h] [-v] -c <clients-file> -s <statistics-file> \n")
+ print("Usage:\nimportscript.py [-h] [-v] -s <statistics-file> \n")
print("h : help; this message")
print("v : verbose; print messages on record insertion/skip")
print("u : updates; print status messages as items inserted semi-verbose")
print("d : debug; print most SQL used to manipulate database")
print("C : path to bcfg2.conf config file.")
- print("c : clients.xml file")
print("s : statistics.xml file")
print("S : syslog; output to syslog")
raise SystemExit
@@ -256,7 +285,7 @@ if __name__ == '__main__':
if o in ("-d", "--debug"):
verb = 3
if o in ("-c", "--clients"):
- clientspath = a
+ print("DeprecationWarning: %s is no longer used" % o)
if o in ("-s", "--stats"):
statpath = a
@@ -267,7 +296,7 @@ if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
Bcfg2.Logger.setup_logging('importscript.py',
True,
- syslog)
+ syslog, level=logging.INFO)
cf = ConfigParser.ConfigParser()
cf.read([cpath])
@@ -289,24 +318,16 @@ if __name__ == '__main__':
except:
encoding = 'UTF-8'
- if not clientpath:
- try:
- clientspath = "%s/Metadata/clients.xml" % \
- cf.get('server', 'repository')
- except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
- print("Could not read bcfg2.conf; exiting")
- raise SystemExit(1)
- try:
- clientsdata = XML(open(clientspath).read())
- except (IOError, XMLSyntaxError):
- print("StatReports: Failed to parse %s" % (clientspath))
- raise SystemExit(1)
-
q = '-O3' in sys.argv
+
+ # don't load this at the top. causes a circular import error
+ from Bcfg2.Server.SchemaUpdater import update_database, UpdaterError
# Be sure the database is ready for new schema
- update_database()
- load_stats(clientsdata,
- statsdata,
+ try:
+ update_database()
+ except UpdaterError:
+ raise SystemExit(1)
+ load_stats(statsdata,
encoding,
verb,
logger,
diff --git a/src/lib/Bcfg2/Server/Reports/reports/fixtures/initial_version.xml b/src/lib/Bcfg2/Server/Reports/reports/fixtures/initial_version.xml
deleted file mode 100644
index bde236989..000000000
--- a/src/lib/Bcfg2/Server/Reports/reports/fixtures/initial_version.xml
+++ /dev/null
@@ -1,43 +0,0 @@
-<?xml version='1.0' encoding='utf-8' ?>
-<django-objects version="1.0">
- <object pk="1" model="reports.internaldatabaseversion">
- <field type="IntegerField" name="version">0</field>
- <field type="DateTimeField" name="updated">2008-08-05 11:03:50</field>
- </object>
- <object pk="2" model="reports.internaldatabaseversion">
- <field type="IntegerField" name="version">1</field>
- <field type="DateTimeField" name="updated">2008-08-05 11:04:10</field>
- </object>
- <object pk="3" model="reports.internaldatabaseversion">
- <field type="IntegerField" name="version">2</field>
- <field type="DateTimeField" name="updated">2008-08-05 13:37:19</field>
- </object>
- <object pk="4" model="reports.internaldatabaseversion">
- <field type='IntegerField' name='version'>3</field>
- <field type='DateTimeField' name='updated'>2008-08-11 08:44:36</field>
- </object>
- <object pk="5" model="reports.internaldatabaseversion">
- <field type='IntegerField' name='version'>10</field>
- <field type='DateTimeField' name='updated'>2008-08-22 11:28:50</field>
- </object>
- <object pk="5" model="reports.internaldatabaseversion">
- <field type='IntegerField' name='version'>11</field>
- <field type='DateTimeField' name='updated'>2009-01-13 12:26:10</field>
- </object>
- <object pk="6" model="reports.internaldatabaseversion">
- <field type='IntegerField' name='version'>16</field>
- <field type='DateTimeField' name='updated'>2010-06-01 12:26:10</field>
- </object>
- <object pk="7" model="reports.internaldatabaseversion">
- <field type='IntegerField' name='version'>17</field>
- <field type='DateTimeField' name='updated'>2010-07-02 00:00:00</field>
- </object>
- <object pk="8" model="reports.internaldatabaseversion">
- <field type='IntegerField' name='version'>18</field>
- <field type='DateTimeField' name='updated'>2011-06-30 00:00:00</field>
- </object>
- <object pk="8" model="reports.internaldatabaseversion">
- <field type='IntegerField' name='version'>19</field>
- <field type='DateTimeField' name='updated'>2012-03-28 00:00:00</field>
- </object>
-</django-objects>
diff --git a/src/lib/Bcfg2/Server/Reports/reports/models.py b/src/lib/Bcfg2/Server/Reports/reports/models.py
index 35f2a4393..73adaaaaf 100644
--- a/src/lib/Bcfg2/Server/Reports/reports/models.py
+++ b/src/lib/Bcfg2/Server/Reports/reports/models.py
@@ -23,16 +23,13 @@ KIND_CHOICES = (
('Path', 'symlink'),
('Service', 'Service'),
)
-PING_CHOICES = (
- #These are possible ping states
- ('Up (Y)', 'Y'),
- ('Down (N)', 'N')
-)
+TYPE_GOOD = 0
TYPE_BAD = 1
TYPE_MODIFIED = 2
TYPE_EXTRA = 3
TYPE_CHOICES = (
+ (TYPE_GOOD, 'Good'),
(TYPE_BAD, 'Bad'),
(TYPE_MODIFIED, 'Modified'),
(TYPE_EXTRA, 'Extra'),
@@ -87,30 +84,9 @@ class Client(models.Model):
pass
-class Ping(models.Model):
- """Represents a ping of a client (sparsely)."""
- client = models.ForeignKey(Client, related_name="pings")
- starttime = models.DateTimeField()
- endtime = models.DateTimeField()
- status = models.CharField(max_length=4, choices=PING_CHOICES) # up/down
-
- class Meta:
- get_latest_by = 'endtime'
-
-
class InteractiveManager(models.Manager):
"""Manages interactions objects."""
- def recent_interactions_dict(self, maxdate=None, active_only=True):
- """
- Return the most recent interactions for clients as of a date.
-
- This method uses aggregated queries to return a ValuesQueryDict object.
- Faster then raw sql since this is executed as a single query.
- """
-
- return list(self.values('client').annotate(max_timestamp=Max('timestamp')).values())
-
def interaction_per_client(self, maxdate=None, active_only=True):
"""
Returns the most recent interactions for clients as of a date
@@ -154,15 +130,15 @@ class InteractiveManager(models.Manager):
cursor.execute(sql)
return [item[0] for item in cursor.fetchall()]
except:
- '''FIXME - really need some error hadling'''
+ '''FIXME - really need some error handling'''
pass
return []
class Interaction(models.Model):
"""Models each reconfiguration operation interaction between client and server."""
- client = models.ForeignKey(Client, related_name="interactions",)
- timestamp = models.DateTimeField() # Timestamp for this record
+ client = models.ForeignKey(Client, related_name="interactions")
+ timestamp = models.DateTimeField(db_index=True) # Timestamp for this record
state = models.CharField(max_length=32) # good/bad/modified/etc
repo_rev_code = models.CharField(max_length=64) # repo revision at time of interaction
goodcount = models.IntegerField() # of good config-items
@@ -270,27 +246,47 @@ class Interaction(models.Model):
class Reason(models.Model):
"""reason why modified or bad entry did not verify, or changed."""
- owner = models.TextField(max_length=128, blank=True)
- current_owner = models.TextField(max_length=128, blank=True)
- group = models.TextField(max_length=128, blank=True)
- current_group = models.TextField(max_length=128, blank=True)
- perms = models.TextField(max_length=4, blank=True) # txt fixes typing issue
- current_perms = models.TextField(max_length=4, blank=True)
- status = models.TextField(max_length=3, blank=True) # on/off/(None)
- current_status = models.TextField(max_length=1, blank=True) # on/off/(None)
- to = models.TextField(max_length=256, blank=True)
- current_to = models.TextField(max_length=256, blank=True)
- version = models.TextField(max_length=128, blank=True)
- current_version = models.TextField(max_length=128, blank=True)
+ owner = models.CharField(max_length=255, blank=True)
+ current_owner = models.CharField(max_length=255, blank=True)
+ group = models.CharField(max_length=255, blank=True)
+ current_group = models.CharField(max_length=255, blank=True)
+ perms = models.CharField(max_length=4, blank=True)
+ current_perms = models.CharField(max_length=4, blank=True)
+ status = models.CharField(max_length=128, blank=True)
+ current_status = models.CharField(max_length=128, blank=True)
+ to = models.CharField(max_length=1024, blank=True)
+ current_to = models.CharField(max_length=1024, blank=True)
+ version = models.CharField(max_length=1024, blank=True)
+ current_version = models.CharField(max_length=1024, blank=True)
current_exists = models.BooleanField() # False means its missing. Default True
- current_diff = models.TextField(max_length=1280, blank=True)
+ current_diff = models.TextField(max_length=1024*1024, blank=True)
is_binary = models.BooleanField(default=False)
is_sensitive = models.BooleanField(default=False)
- unpruned = models.TextField(max_length=1280, blank=True)
+ unpruned = models.TextField(max_length=4096, blank=True, default='')
def _str_(self):
return "Reason"
+ def short_list(self):
+ rv = []
+ if self.current_owner or self.current_group or self.current_perms:
+ rv.append("File permissions")
+ if self.current_status:
+ rv.append("Incorrect status")
+ if self.current_to:
+ rv.append("Incorrect target")
+ if self.current_version or self.version == 'auto':
+ rv.append("Wrong version")
+ if not self.current_exists:
+ rv.append("Missing")
+ if self.current_diff or self.is_sensitive:
+ rv.append("Incorrect data")
+ if self.unpruned:
+ rv.append("Directory has extra files")
+ if len(rv) == 0:
+ rv.append("Exists")
+ return rv
+
@staticmethod
@transaction.commit_on_success
def prune_orphans():
@@ -316,6 +312,9 @@ class Entries(models.Model):
cursor.execute('delete from reports_entries where not exists (select rei.id from reports_entries_interactions rei where rei.entry_id = reports_entries.id)')
transaction.set_dirty()
+ class Meta:
+ unique_together = ("name", "kind")
+
class Entries_interactions(models.Model):
"""Define the relation between the reason, the interaction and the entry."""
@@ -343,10 +342,52 @@ class Performance(models.Model):
transaction.set_dirty()
-class InternalDatabaseVersion(models.Model):
- """Object that tell us to witch version is the database."""
- version = models.IntegerField()
- updated = models.DateTimeField(auto_now_add=True)
+class Group(models.Model):
+ """
+ Groups extracted from interactions
+
+ name - The group name
+
+ TODO - Most of this is for future use
+ TODO - set a default group
+ """
+
+ name = models.CharField(max_length=255, unique=True)
+ profile = models.BooleanField(default=False)
+ public = models.BooleanField(default=False)
+ category = models.CharField(max_length=1024, blank=True)
+ comment = models.TextField(blank=True)
+
+ groups = models.ManyToManyField("self", symmetrical=False)
+ bundles = models.ManyToManyField("Bundle")
+
+ def __unicode__(self):
+ return self.name
+
+
+class Bundle(models.Model):
+ """
+ Bundles extracted from interactions
+
+ name - The bundle name
+ """
+
+ name = models.CharField(max_length=255, unique=True)
+
+ def __unicode__(self):
+ return self.name
+
+
+class InteractionMetadata(models.Model):
+ """
+ InteractionMetadata
+
+ Hold extra data associated with the client and interaction
+ """
+
+ interaction = models.OneToOneField(Interaction, primary_key=True, related_name='metadata')
+ profile = models.ForeignKey(Group, related_name="+")
+ groups = models.ManyToManyField(Group)
+ bundles = models.ManyToManyField(Bundle)
+
- def __str__(self):
- return "version %d updated the %s" % (self.version, self.updated.isoformat())
diff --git a/src/lib/Bcfg2/Server/Reports/reports/sql/client.sql b/src/lib/Bcfg2/Server/Reports/reports/sql/client.sql
deleted file mode 100644
index 28e785450..000000000
--- a/src/lib/Bcfg2/Server/Reports/reports/sql/client.sql
+++ /dev/null
@@ -1,7 +0,0 @@
-CREATE VIEW reports_current_interactions AS SELECT x.client_id AS client_id, reports_interaction.id AS interaction_id FROM (select client_id, MAX(timestamp) as timer FROM reports_interaction GROUP BY client_id) x, reports_interaction WHERE reports_interaction.client_id = x.client_id AND reports_interaction.timestamp = x.timer;
-
-create index reports_interaction_client_id on reports_interaction (client_id);
-create index reports_client_current_interaction_id on reports_client (current_interaction_id);
-create index reports_performance_interaction_performance_id on reports_performance_interaction (performance_id);
-create index reports_interaction_timestamp on reports_interaction (timestamp);
-create index reports_performance_interation_interaction_id on reports_performance_interaction (interaction_id);
diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/base-timeview.html b/src/lib/Bcfg2/Server/Reports/reports/templates/base-timeview.html
index 842de36f0..9a5ef651c 100644
--- a/src/lib/Bcfg2/Server/Reports/reports/templates/base-timeview.html
+++ b/src/lib/Bcfg2/Server/Reports/reports/templates/base-timeview.html
@@ -20,6 +20,9 @@ document.write(getCalendarStyles());
{% if not timestamp %}Rendered at {% now "Y-m-d H:i" %} | {% else %}View as of {{ timestamp|date:"Y-m-d H:i" }} | {% endif %}{% spaceless %}
<a id='cal_link' name='cal_link' href='#' onclick='showCalendar(); return false;'
>[change]</a>
- <form method='post' action='{{ path }}' id='cal_form' name='cal_form'><input id='cal_date' name='cal_date' type='hidden' value=''/></form>
+ <form method='post' action='{{ path }}' id='cal_form' name='cal_form'>
+ <input id='cal_date' name='cal_date' type='hidden' value=''/>
+ <input name='op' type='hidden' value='timeview'/>
+ </form>
{% endspaceless %}
{% endblock %}
diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/base.html b/src/lib/Bcfg2/Server/Reports/reports/templates/base.html
index f541c0d2b..3fa482a19 100644
--- a/src/lib/Bcfg2/Server/Reports/reports/templates/base.html
+++ b/src/lib/Bcfg2/Server/Reports/reports/templates/base.html
@@ -62,6 +62,7 @@
<li>Entries Configured</li>
</ul>
<ul class='menu-level2'>
+ <li><a href="{% url reports_common_problems %}">Common problems</a></li>
<li><a href="{% url reports_item_list "bad" %}">Bad</a></li>
<li><a href="{% url reports_item_list "modified" %}">Modified</a></li>
<li><a href="{% url reports_item_list "extra" %}">Extra</a></li>
@@ -87,7 +88,7 @@
<div style='clear:both'></div>
</div><!-- document -->
<div id="footer">
- <span>Bcfg2 Version 1.2.2</span>
+ <span>Bcfg2 Version 1.2.3</span>
</div>
<div id="calendar_div" style='position:absolute; visibility:hidden; background-color:white; layer-background-color:white;'></div>
diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/clients/detail.html b/src/lib/Bcfg2/Server/Reports/reports/templates/clients/detail.html
index dd4295f21..9b86b609f 100644
--- a/src/lib/Bcfg2/Server/Reports/reports/templates/clients/detail.html
+++ b/src/lib/Bcfg2/Server/Reports/reports/templates/clients/detail.html
@@ -50,6 +50,9 @@ span.history_links a {
{% if interaction.server %}
<tr><td>Served by</td><td>{{interaction.server}}</td></tr>
{% endif %}
+ {% if interaction.metadata %}
+ <tr><td>Profile</td><td>{{interaction.metadata.profile}}</td></tr>
+ {% endif %}
{% if interaction.repo_rev_code %}
<tr><td>Revision</td><td>{{interaction.repo_rev_code}}</td></tr>
{% endif %}
@@ -60,58 +63,57 @@ span.history_links a {
{% endif %}
</table>
- {% if interaction.bad_entry_count %}
+ {% if interaction.metadata.groups.count %}
<div class='entry_list'>
- <div class='entry_list_head dirty-lineitem' onclick='javascript:toggleMe("bad_table");'>
- <h3>Bad Entries &#8212; {{ interaction.bad_entry_count }}</h3>
- <div class='entry_expand_tab' id='plusminus_bad_table'>[+]</div>
+ <div class='entry_list_head' onclick='javascript:toggleMe("groups_table");'>
+ <h3>Group membership</h3>
+ <div class='entry_expand_tab' id='plusminus_groups_table'>[+]</div>
</div>
- <table id='bad_table' class='entry_list'>
- {% for e in interaction.bad|sortwell %}
+ <table id='groups_table' class='entry_list' style='display: none'>
+ {% for group in interaction.metadata.groups.all %}
<tr class='{% cycle listview,listview_alt %}'>
- <td class='entry_list_type'>{{e.entry.kind}}:</td>
- <td><a href="{% url reports_item "bad",e.id %}">
- {{e.entry.name}}</a></td>
+ <td class='entry_list_type'>{{group}}</td>
</tr>
{% endfor %}
</table>
</div>
{% endif %}
- {% if interaction.modified_entry_count %}
+ {% if interaction.metadata.bundles.count %}
<div class='entry_list'>
- <div class='entry_list_head modified-lineitem' onclick='javascript:toggleMe("modified_table");'>
- <h3>Modified Entries &#8212; {{ interaction.modified_entry_count }}</h3>
- <div class='entry_expand_tab' id='plusminus_modified_table'>[+]</div>
+ <div class='entry_list_head' onclick='javascript:toggleMe("bundles_table");'>
+ <h3>Bundle membership</h3>
+ <div class='entry_expand_tab' id='plusminus_bundless_table'>[+]</div>
</div>
- <table id='modified_table' class='entry_list'>
- {% for e in interaction.modified|sortwell %}
+ <table id='bundles_table' class='entry_list' style='display: none'>
+ {% for bundle in interaction.metadata.bundles.all %}
<tr class='{% cycle listview,listview_alt %}'>
- <td class='entry_list_type'>{{e.entry.kind}}:</td>
- <td><a href="{% url reports_item "modified",e.id %}">
- {{e.entry.name}}</a></td>
+ <td class='entry_list_type'>{{bundle}}</td>
</tr>
{% endfor %}
</table>
</div>
{% endif %}
- {% if interaction.extra_entry_count %}
+ {% for type, ei_list in ei_lists %}
+ {% if ei_list %}
<div class='entry_list'>
- <div class='entry_list_head extra-lineitem' onclick='javascript:toggleMe("extra_table");'>
- <h3>Extra Entries &#8212; {{ interaction.extra_entry_count }}</h3>
- <div class='entry_expand_tab' id='plusminus_extra_table'>[+]</div>
+ <div class='entry_list_head {{type}}-lineitem' onclick='javascript:toggleMe("{{type}}_table");'>
+ <h3>{{ type|capfirst }} Entries &#8212; {{ ei_list|length }}</h3>
+ <div class='entry_expand_tab' id='plusminus_{{type}}_table'>[+]</div>
</div>
- <table id='extra_table' class='entry_list'>
- {% for e in interaction.extra|sortwell %}
+ <table id='{{type}}_table' class='entry_list'>
+ {% for ei in ei_list %}
<tr class='{% cycle listview,listview_alt %}'>
- <td class='entry_list_type'>{{e.entry.kind}}:</td>
- <td><a href="{% url reports_item "extra",e.id %}">{{e.entry.name}}</a></td>
+ <td class='entry_list_type'>{{ei.entry.kind}}</td>
+ <td><a href="{% url reports_item type ei.id %}">
+ {{ei.entry.name}}</a></td>
</tr>
- {% endfor %}
+ {% endfor %}
</table>
</div>
{% endif %}
+ {% endfor %}
{% if entry_list %}
<div class="entry_list recent_history_wrapper">
diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/clients/detailed-list.html b/src/lib/Bcfg2/Server/Reports/reports/templates/clients/detailed-list.html
index 84ac71d92..9be59e7d2 100644
--- a/src/lib/Bcfg2/Server/Reports/reports/templates/clients/detailed-list.html
+++ b/src/lib/Bcfg2/Server/Reports/reports/templates/clients/detailed-list.html
@@ -6,18 +6,18 @@
{% block content %}
<div class='client_list_box'>
-{% if entry_list %}
{% filter_navigator %}
+{% if entry_list %}
<table cellpadding="3">
<tr id='table_list_header' class='listview'>
- <td class='left_column'>Node</td>
- <td class='right_column' style='width:75px'>State</td>
- <td class='right_column_narrow'>Good</td>
- <td class='right_column_narrow'>Bad</td>
- <td class='right_column_narrow'>Modified</td>
- <td class='right_column_narrow'>Extra</td>
- <td class='right_column'>Last Run</td>
- <td class='right_column_wide'>Server</td>
+ <td class='left_column'>{% sort_link 'client' 'Node' %}</td>
+ <td class='right_column' style='width:75px'>{% sort_link 'state' 'State' %}</td>
+ <td class='right_column_narrow'>{% sort_link '-good' 'Good' %}</td>
+ <td class='right_column_narrow'>{% sort_link '-bad' 'Bad' %}</td>
+ <td class='right_column_narrow'>{% sort_link '-modified' 'Modified' %}</td>
+ <td class='right_column_narrow'>{% sort_link '-extra' 'Extra' %}</td>
+ <td class='right_column'>{% sort_link 'timestamp' 'Last Run' %}</td>
+ <td class='right_column_wide'>{% sort_link 'server' 'Server' %}</td>
</tr>
{% for entry in entry_list %}
<tr class='{% cycle listview,listview_alt %}'>
diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/clients/index.html b/src/lib/Bcfg2/Server/Reports/reports/templates/clients/index.html
index 134e237d6..45ba20b86 100644
--- a/src/lib/Bcfg2/Server/Reports/reports/templates/clients/index.html
+++ b/src/lib/Bcfg2/Server/Reports/reports/templates/clients/index.html
@@ -9,6 +9,7 @@
{% block pagebanner %}Clients - Grid View{% endblock %}
{% block content %}
+{% filter_navigator %}
{% if inter_list %}
<table class='grid-view' align='center'>
{% for inter in inter_list %}
diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/clients/manage.html b/src/lib/Bcfg2/Server/Reports/reports/templates/clients/manage.html
index 5725ae577..443ec8ccb 100644
--- a/src/lib/Bcfg2/Server/Reports/reports/templates/clients/manage.html
+++ b/src/lib/Bcfg2/Server/Reports/reports/templates/clients/manage.html
@@ -38,8 +38,8 @@
</tr>
{% endfor %}
</table>
- </div>
{% else %}
<p>No client records are available.</p>
{% endif %}
+ </div>
{% endblock %}
diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/config_items/common.html b/src/lib/Bcfg2/Server/Reports/reports/templates/config_items/common.html
new file mode 100644
index 000000000..d6ad303fc
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Reports/reports/templates/config_items/common.html
@@ -0,0 +1,42 @@
+{% extends "base-timeview.html" %}
+{% load bcfg2_tags %}
+
+{% block title %}Bcfg2 - Common Problems{% endblock %}
+
+{% block extra_header_info %}
+{% endblock%}
+
+{% block pagebanner %}Common configuration problems{% endblock %}
+
+{% block content %}
+ <div id='threshold_box'>
+ <form method='post' action='{{ request.path }}'>
+ <span>Showing items with more then {{ threshold }} entries</span>
+ <input type='text' name='threshold' value='{{ threshold }}' maxlength='5' size='5' />
+ <input type='submit' value='Change' />
+ </form>
+ </div>
+ {% for type_name, type_list in lists %}
+ <div class='entry_list'>
+ <div class='entry_list_head element_list_head' onclick='javascript:toggleMe("table_{{ type_name }}");'>
+ <h3>{{ type_name|capfirst }} entries</h3>
+ <div class='entry_expand_tab' id='plusminus_table_{{ type_name }}'>[&ndash;]</div>
+ </div>
+ {% if type_list %}
+ <table id='table_{{ type_name }}' class='entry_list'>
+ <tr style='text-align: left'><th>Type</th><th>Name</th><th>Count</th><th>Reason</th></tr>
+ {% for entry, reason, interaction in type_list %}
+ <tr class='{% cycle listview,listview_alt %}'>
+ <td>{{ entry.kind }}</td>
+ <td><a href="{% url reports_entry eid=entry.pk %}">{{ entry.name }}</a></td>
+ <td>{{ interaction|length }}</td>
+ <td><a href="{% url reports_item type=type_name pk=interaction.0 %}">{{ reason.short_list|join:"," }}</a></td>
+ </tr>
+ {% endfor %}
+ </table>
+ {% else %}
+ <p>There are currently no inconsistent {{ type_name }} configuration entries.</p>
+ {% endif %}
+ </div>
+ {% endfor %}
+{% endblock %}
diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/config_items/entry_status.html b/src/lib/Bcfg2/Server/Reports/reports/templates/config_items/entry_status.html
new file mode 100644
index 000000000..5f7579eb9
--- /dev/null
+++ b/src/lib/Bcfg2/Server/Reports/reports/templates/config_items/entry_status.html
@@ -0,0 +1,30 @@
+{% extends "base-timeview.html" %}
+{% load bcfg2_tags %}
+
+{% block title %}Bcfg2 - Entry Status{% endblock %}
+
+{% block extra_header_info %}
+{% endblock%}
+
+{% block pagebanner %}{{ entry.kind }} entry {{ entry.name }} status{% endblock %}
+
+{% block content %}
+{% filter_navigator %}
+{% if item_data %}
+ <div class='entry_list'>
+ <table class='entry_list'>
+ <tr style='text-align: left' ><th>Name</th><th>Timestamp</th><th>State</th><th>Reason</th></tr>
+ {% for ei, inter, reason in item_data %}
+ <tr class='{% cycle listview,listview_alt %}'>
+ <td><a href='{% url Bcfg2.Server.Reports.reports.views.client_detail hostname=inter.client.name, pk=inter.id %}'>{{ inter.client.name }}</a></td>
+ <td style='white-space: nowrap'><a href='{% url Bcfg2.Server.Reports.reports.views.client_detail hostname=inter.client.name, pk=inter.id %}'>{{ inter.timestamp|date:"Y-m-d\&\n\b\s\p\;H:i"|safe }}</a></td>
+ <td>{{ ei.get_type_display }}</td>
+ <td style='white-space: nowrap'><a href="{% url reports_item type=ei.get_type_display pk=ei.pk %}">{{ reason.short_list|join:"," }}</a></td>
+ </tr>
+ {% endfor %}
+ </table>
+ </div>
+{% else %}
+ <p>There are currently no hosts with this configuration entry.</p>
+{% endif %}
+{% endblock %}
diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/config_items/listing.html b/src/lib/Bcfg2/Server/Reports/reports/templates/config_items/listing.html
index 9b1026a08..0a92e7fc0 100644
--- a/src/lib/Bcfg2/Server/Reports/reports/templates/config_items/listing.html
+++ b/src/lib/Bcfg2/Server/Reports/reports/templates/config_items/listing.html
@@ -9,19 +9,21 @@
{% block pagebanner %}{{mod_or_bad|capfirst}} Element Listing{% endblock %}
{% block content %}
-{% if item_list_dict %}
- {% for kind, entries in item_list_dict.items %}
-
+{% filter_navigator %}
+{% if item_list %}
+ {% for type_name, type_data in item_list %}
<div class='entry_list'>
- <div class='entry_list_head element_list_head' onclick='javascript:toggleMe("table_{{ kind }}");'>
- <h3>{{ kind }} &#8212; {{ entries|length }}</h3>
- <div class='entry_expand_tab' id='plusminus_table_{{ kind }}'>[&ndash;]</div>
+ <div class='entry_list_head element_list_head' onclick='javascript:toggleMe("table_{{ type_name }}");'>
+ <h3>{{ type_name }} &#8212; {{ type_data|length }}</h3>
+ <div class='entry_expand_tab' id='plusminus_table_{{ type_name }}'>[&ndash;]</div>
</div>
-
- <table id='table_{{ kind }}' class='entry_list'>
- {% for e in entries %}
+ <table id='table_{{ type_name }}' class='entry_list'>
+ <tr style='text-align: left' ><th>Name</th><th>Count</th><th>Reason</th></tr>
+ {% for entry, reason, eis in type_data %}
<tr class='{% cycle listview,listview_alt %}'>
- <td><a href="{% url reports_item type=mod_or_bad,pk=e.id %}">{{e.entry.name}}</a></td>
+ <td><a href="{% url reports_entry eid=entry.pk %}">{{entry.name}}</a></td>
+ <td>{{ eis|length }}</td>
+ <td><a href="{% url reports_item type=mod_or_bad,pk=eis.0 %}">{{ reason.short_list|join:"," }}</a></td>
</tr>
{% endfor %}
</table>
diff --git a/src/lib/Bcfg2/Server/Reports/reports/templates/widgets/filter_bar.html b/src/lib/Bcfg2/Server/Reports/reports/templates/widgets/filter_bar.html
index 6fbe585ab..759415507 100644
--- a/src/lib/Bcfg2/Server/Reports/reports/templates/widgets/filter_bar.html
+++ b/src/lib/Bcfg2/Server/Reports/reports/templates/widgets/filter_bar.html
@@ -1,13 +1,25 @@
{% spaceless %}
+<div class="filter_bar">
+<form name='filter_form'>
{% if filters %}
{% for filter, filter_url in filters %}
{% if forloop.first %}
- <div class="filter_bar">Active filters (click to remove):
+ Active filters (click to remove):
{% endif %}
<a href='{{ filter_url }}'>{{ filter|capfirst }}</a>{% if not forloop.last %}, {% endif %}
{% if forloop.last %}
- </div>
+ {% if groups %}|{% endif %}
{% endif %}
{% endfor %}
{% endif %}
+{% if groups %}
+<label for="id_group">Group filter:</label>
+<select id="id_group" name="group" onchange="javascript:url=document.forms['filter_form'].group.value; if(url) { location.href=url }">
+ {% for group, group_url, selected in groups %}
+ <option label="{{group}}" value="{{group_url}}" {% if selected %}selected {% endif %}/>
+ {% endfor %}
+</select>
+{% endif %}
+</form>
+</div>
{% endspaceless %}
diff --git a/src/lib/Bcfg2/Server/Reports/reports/templatetags/bcfg2_tags.py b/src/lib/Bcfg2/Server/Reports/reports/templatetags/bcfg2_tags.py
index ac63cda3e..894353bba 100644
--- a/src/lib/Bcfg2/Server/Reports/reports/templatetags/bcfg2_tags.py
+++ b/src/lib/Bcfg2/Server/Reports/reports/templatetags/bcfg2_tags.py
@@ -1,11 +1,17 @@
import sys
+from copy import copy
from django import template
+from django.conf import settings
from django.core.urlresolvers import resolve, reverse, \
Resolver404, NoReverseMatch
+from django.template.loader import get_template, \
+ get_template_from_string,TemplateDoesNotExist
from django.utils.encoding import smart_unicode, smart_str
+from django.utils.safestring import mark_safe
from datetime import datetime, timedelta
from Bcfg2.Server.Reports.utils import filter_list
+from Bcfg2.Server.Reports.reports.models import Group
register = template.Library()
@@ -115,13 +121,27 @@ def filter_navigator(context):
filters = []
for filter in filter_list:
+ if filter == 'group':
+ continue
if filter in kwargs:
myargs = kwargs.copy()
del myargs[filter]
filters.append((filter,
reverse(view, args=args, kwargs=myargs)))
filters.sort(lambda x, y: cmp(x[0], y[0]))
- return {'filters': filters}
+
+ myargs = kwargs.copy()
+ selected=True
+ if 'group' in myargs:
+ del myargs['group']
+ selected=False
+ groups = [('---', reverse(view, args=args, kwargs=myargs), selected)]
+ for group in Group.objects.values('name'):
+ myargs['group'] = group['name']
+ groups.append((group['name'], reverse(view, args=args, kwargs=myargs),
+ group['name'] == kwargs.get('group', '')))
+
+ return {'filters': filters, 'groups': groups}
except (Resolver404, NoReverseMatch, ValueError, KeyError):
pass
return dict()
@@ -242,19 +262,6 @@ def add_url_filter(parser, token):
return AddUrlFilter(filter_name, filter_value)
-@register.filter
-def sortwell(value):
- """
- Sorts a list(or evaluates queryset to list) of bad, extra, or modified items in the best
- way for presentation
- """
-
- configItems = list(value)
- configItems.sort(lambda x, y: cmp(x.entry.name, y.entry.name))
- configItems.sort(lambda x, y: cmp(x.entry.kind, y.entry.kind))
- return configItems
-
-
class MediaTag(template.Node):
def __init__(self, filter_value):
self.filter_value = filter_value
@@ -311,3 +318,98 @@ def determine_client_state(entry):
else:
thisdirty = "very-dirty-lineitem"
return thisdirty
+
+
+@register.tag(name='qs')
+def do_qs(parser, token):
+ """
+ qs tag
+
+ accepts a name value pair and inserts or replaces it in the query string
+ """
+ try:
+ tag, name, value = token.split_contents()
+ except ValueError:
+ raise TemplateSyntaxError, "%r tag requires exactly two arguments" \
+ % token.contents.split()[0]
+ return QsNode(name, value)
+
+class QsNode(template.Node):
+ def __init__(self, name, value):
+ self.name = template.Variable(name)
+ self.value = template.Variable(value)
+
+ def render(self, context):
+ try:
+ name = self.name.resolve(context)
+ value = self.value.resolve(context)
+ request = context['request']
+ qs = copy(request.GET)
+ qs[name] = value
+ return "?%s" % qs.urlencode()
+ except template.VariableDoesNotExist:
+ return ''
+ except KeyError:
+ if settings.TEMPLATE_DEBUG:
+ raise Exception, "'qs' tag requires context['request']"
+ return ''
+ except:
+ return ''
+
+
+@register.tag
+def sort_link(parser, token):
+ '''
+ Create a sort anchor tag. Reverse it if active.
+
+ {% sort_link sort_key text %}
+ '''
+ try:
+ tag, sort_key, text = token.split_contents()
+ except ValueError:
+ raise TemplateSyntaxError("%r tag requires at least four arguments" \
+ % token.split_contents()[0])
+
+ return SortLinkNode(sort_key, text)
+
+class SortLinkNode(template.Node):
+ __TMPL__ = "{% load bcfg2_tags %}<a href='{% qs 'sort' key %}'>{{ text }}</a>"
+
+ def __init__(self, sort_key, text):
+ self.sort_key = template.Variable(sort_key)
+ self.text = template.Variable(text)
+
+ def render(self, context):
+ try:
+ try:
+ sort = context['request'].GET['sort']
+ except KeyError:
+ #fall back on this
+ sort = context.get('sort', '')
+ sort_key = self.sort_key.resolve(context)
+ text = self.text.resolve(context)
+
+ # add arrows
+ try:
+ sort_base = sort_key.lstrip('-')
+ if sort[0] == '-' and sort[1:] == sort_base:
+ text = text + '&#x25BC;'
+ sort_key = sort_base
+ elif sort_base == sort:
+ text = text + '&#x25B2;'
+ sort_key = '-' + sort_base
+ except IndexError:
+ pass
+
+ context.push()
+ context['key'] = sort_key
+ context['text'] = mark_safe(text)
+ output = get_template_from_string(self.__TMPL__).render(context)
+ context.pop()
+ return output
+ except:
+ if settings.DEBUG:
+ raise
+ raise
+ return ''
+
diff --git a/src/lib/Bcfg2/Server/Reports/reports/templatetags/syntax_coloring.py b/src/lib/Bcfg2/Server/Reports/reports/templatetags/syntax_coloring.py
index 36d4cf693..0d4c6501d 100644
--- a/src/lib/Bcfg2/Server/Reports/reports/templatetags/syntax_coloring.py
+++ b/src/lib/Bcfg2/Server/Reports/reports/templatetags/syntax_coloring.py
@@ -4,6 +4,8 @@ from django.utils.encoding import smart_unicode
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
+from Bcfg2.Bcfg2Py3k import u_str
+
register = template.Library()
try:
@@ -16,14 +18,6 @@ except:
colorize = False
-# py3k compatibility
-def u_str(string):
- if sys.hexversion >= 0x03000000:
- return string
- else:
- return unicode(string)
-
-
@register.filter
def syntaxhilight(value, arg="diff", autoescape=None):
"""
diff --git a/src/lib/Bcfg2/Server/Reports/reports/urls.py b/src/lib/Bcfg2/Server/Reports/reports/urls.py
index 434ce07b7..1cfe725c2 100644
--- a/src/lib/Bcfg2/Server/Reports/reports/urls.py
+++ b/src/lib/Bcfg2/Server/Reports/reports/urls.py
@@ -17,20 +17,23 @@ urlpatterns = patterns('Bcfg2.Server.Reports.reports',
url(r'^client/(?P<hostname>[^/]+)/(?P<pk>\d+)/?$', 'views.client_detail', name='reports_client_detail_pk'),
url(r'^client/(?P<hostname>[^/]+)/?$', 'views.client_detail', name='reports_client_detail'),
url(r'^elements/(?P<type>\w+)/(?P<pk>\d+)/?$', 'views.config_item', name='reports_item'),
+ url(r'^entry/(?P<eid>\w+)/?$', 'views.entry_status', name='reports_entry'),
)
urlpatterns += patterns('Bcfg2.Server.Reports.reports',
*timeviewUrls(
- (r'^grid/?$', 'views.client_index', None, 'reports_grid_view'),
(r'^summary/?$', 'views.display_summary', None, 'reports_summary'),
(r'^timing/?$', 'views.display_timing', None, 'reports_timing'),
- (r'^elements/(?P<type>\w+)/?$', 'views.config_item_list', None, 'reports_item_list'),
+ (r'^common/(?P<threshold>\d+)/?$', 'views.common_problems', None, 'reports_common_problems'),
+ (r'^common/?$', 'views.common_problems', None, 'reports_common_problems'),
))
urlpatterns += patterns('Bcfg2.Server.Reports.reports',
*filteredUrls(*timeviewUrls(
+ (r'^grid/?$', 'views.client_index', None, 'reports_grid_view'),
(r'^detailed/?$',
- 'views.client_detailed_list', None, 'reports_detailed_list')
+ 'views.client_detailed_list', None, 'reports_detailed_list'),
+ (r'^elements/(?P<type>\w+)/?$', 'views.config_item_list', None, 'reports_item_list'),
)))
urlpatterns += patterns('Bcfg2.Server.Reports.reports',
diff --git a/src/lib/Bcfg2/Server/Reports/reports/views.py b/src/lib/Bcfg2/Server/Reports/reports/views.py
index ccd71a60e..e4c38363f 100644
--- a/src/lib/Bcfg2/Server/Reports/reports/views.py
+++ b/src/lib/Bcfg2/Server/Reports/reports/views.py
@@ -13,16 +13,41 @@ from django.http import \
from django.shortcuts import render_to_response, get_object_or_404
from django.core.urlresolvers import \
resolve, reverse, Resolver404, NoReverseMatch
-from django.db import connection
+from django.db import connection, DatabaseError
+from django.db.models import Q
from Bcfg2.Server.Reports.reports.models import *
+__SORT_FIELDS__ = ( 'client', 'state', 'good', 'bad', 'modified', 'extra', \
+ 'timestamp', 'server' )
+
class PaginationError(Exception):
"""This error is raised when pagination cannot be completed."""
pass
+def _in_bulk(model, ids):
+ """
+ Short cut to fetch in bulk and trap database errors. sqlite will raise
+ a "too many SQL variables" exception if this list is too long. Try using
+ django and fetch manually if an error occurs
+
+ returns a dict of this form { id: <model instance> }
+ """
+
+ try:
+ return model.objects.in_bulk(ids)
+ except DatabaseError:
+ pass
+
+ # if objects.in_bulk fails so will obejcts.filter(pk__in=ids)
+ bulk_dict = {}
+ [bulk_dict.__setitem__(i.id, i) \
+ for i in model.objects.all() if i.id in ids]
+ return bulk_dict
+
+
def server_error(request):
"""
500 error handler.
@@ -44,7 +69,7 @@ def timeview(fn):
"""
def _handle_timeview(request, **kwargs):
"""Send any posts back."""
- if request.method == 'POST':
+ if request.method == 'POST' and request.POST.get('op', '') == 'timeview':
cal_date = request.POST['cal_date']
try:
fmt = "%Y/%m/%d"
@@ -84,6 +109,30 @@ def timeview(fn):
return _handle_timeview
+def _handle_filters(query, **kwargs):
+ """
+ Applies standard filters to a query object
+
+ Returns an updated query object
+
+ query - query object to filter
+
+ server -- Filter interactions by server
+ state -- Filter interactions by state
+ group -- Filter interactions by group
+
+ """
+ if 'state' in kwargs and kwargs['state']:
+ query = query.filter(state__exact=kwargs['state'])
+ if 'server' in kwargs and kwargs['server']:
+ query = query.filter(server__exact=kwargs['server'])
+
+ if 'group' in kwargs and kwargs['group']:
+ group = get_object_or_404(Group, name=kwargs['group'])
+ query = query.filter(metadata__groups__id=group.pk)
+ return query
+
+
def config_item(request, pk, type="bad"):
"""
Display a single entry.
@@ -121,47 +170,138 @@ def config_item(request, pk, type="bad"):
@timeview
-def config_item_list(request, type, timestamp=None):
+def config_item_list(request, type, timestamp=None, **kwargs):
"""Render a listing of affected elements"""
mod_or_bad = type.lower()
type = convert_entry_type_to_id(type)
if type < 0:
raise Http404
- current_clients = Interaction.objects.get_interaction_per_client_ids(timestamp)
- item_list_dict = {}
- seen = dict()
- for x in Entries_interactions.objects.filter(interaction__in=current_clients,
- type=type).select_related():
- if (x.entry, x.reason) in seen:
- continue
- seen[(x.entry, x.reason)] = 1
- if item_list_dict.get(x.entry.kind, None):
- item_list_dict[x.entry.kind].append(x)
- else:
- item_list_dict[x.entry.kind] = [x]
+ current_clients = Interaction.objects.interaction_per_client(timestamp)
+ current_clients = [q['id'] for q in _handle_filters(current_clients, **kwargs).values('id')]
+
+ ldata = list(Entries_interactions.objects.filter(
+ interaction__in=current_clients, type=type).values())
+ entry_ids = set([x['entry_id'] for x in ldata])
+ reason_ids = set([x['reason_id'] for x in ldata])
- for kind in item_list_dict:
- item_list_dict[kind].sort(lambda a, b: cmp(a.entry.name, b.entry.name))
+ entries = _in_bulk(Entries, entry_ids)
+ reasons = _in_bulk(Reason, reason_ids)
+
+ kind_list = {}
+ [kind_list.__setitem__(kind, {}) for kind in set([e.kind for e in entries.values()])]
+ for x in ldata:
+ kind = entries[x['entry_id']].kind
+ data_key = (x['entry_id'], x['reason_id'])
+ try:
+ kind_list[kind][data_key].append(x['id'])
+ except KeyError:
+ kind_list[kind][data_key] = [x['id']]
+
+ lists = []
+ for kind in kind_list.keys():
+ lists.append((kind, [(entries[e[0][0]], reasons[e[0][1]], e[1])
+ for e in sorted(kind_list[kind].iteritems(), key=lambda x: entries[x[0][0]].name)]))
return render_to_response('config_items/listing.html',
- {'item_list_dict': item_list_dict,
+ {'item_list': lists,
'mod_or_bad': mod_or_bad,
'timestamp': timestamp},
context_instance=RequestContext(request))
@timeview
-def client_index(request, timestamp=None):
+def entry_status(request, eid, timestamp=None, **kwargs):
+ """Render a listing of affected elements"""
+ entry = get_object_or_404(Entries, pk=eid)
+
+ current_clients = Interaction.objects.interaction_per_client(timestamp)
+ inters = {}
+ [inters.__setitem__(i.id, i) \
+ for i in _handle_filters(current_clients, **kwargs).select_related('client')]
+
+ eis = Entries_interactions.objects.filter(
+ interaction__in=inters.keys(), entry=entry)
+
+ reasons = _in_bulk(Reason, set([x.reason_id for x in eis]))
+
+ item_data = []
+ for ei in eis:
+ item_data.append((ei, inters[ei.interaction_id], reasons[ei.reason_id]))
+
+ return render_to_response('config_items/entry_status.html',
+ {'entry': entry,
+ 'item_data': item_data,
+ 'timestamp': timestamp},
+ context_instance=RequestContext(request))
+
+
+@timeview
+def common_problems(request, timestamp=None, threshold=None):
+ """Mine config entries"""
+
+ if request.method == 'POST':
+ try:
+ threshold = int(request.POST['threshold'])
+ view, args, kw = resolve(request.META['PATH_INFO'])
+ kw['threshold'] = threshold
+ return HttpResponseRedirect(reverse(view,
+ args=args,
+ kwargs=kw))
+ except:
+ pass
+
+ try:
+ threshold = int(threshold)
+ except:
+ threshold = 10
+
+ c_intr = Interaction.objects.get_interaction_per_client_ids(timestamp)
+ data_list = {}
+ [data_list.__setitem__(t_id, {}) \
+ for t_id, t_label in TYPE_CHOICES if t_id != TYPE_GOOD]
+ ldata = list(Entries_interactions.objects.filter(
+ interaction__in=c_intr).exclude(type=TYPE_GOOD).values())
+
+ entry_ids = set([x['entry_id'] for x in ldata])
+ reason_ids = set([x['reason_id'] for x in ldata])
+ for x in ldata:
+ type = x['type']
+ data_key = (x['entry_id'], x['reason_id'])
+ try:
+ data_list[type][data_key].append(x['id'])
+ except KeyError:
+ data_list[type][data_key] = [x['id']]
+
+ entries = _in_bulk(Entries, entry_ids)
+ reasons = _in_bulk(Reason, reason_ids)
+
+ lists = []
+ for type, type_name in TYPE_CHOICES:
+ if type == TYPE_GOOD:
+ continue
+ lists.append([type_name.lower(), [(entries[e[0][0]], reasons[e[0][1]], e[1])
+ for e in sorted(data_list[type].items(), key=lambda x: len(x[1]), reverse=True)
+ if len(e[1]) > threshold]])
+
+ return render_to_response('config_items/common.html',
+ {'lists': lists,
+ 'timestamp': timestamp,
+ 'threshold': threshold},
+ context_instance=RequestContext(request))
+
+
+@timeview
+def client_index(request, timestamp=None, **kwargs):
"""
Render a grid view of active clients.
Keyword parameters:
- timestamp -- datetime objectto render from
+ timestamp -- datetime object to render from
"""
- list = Interaction.objects.interaction_per_client(timestamp).select_related()\
- .order_by("client__name").all()
+ list = _handle_filters(Interaction.objects.interaction_per_client(timestamp), **kwargs).\
+ select_related().order_by("client__name").all()
return render_to_response('clients/index.html',
{'inter_list': list,
@@ -177,8 +317,29 @@ def client_detailed_list(request, timestamp=None, **kwargs):
"""
+ try:
+ sort = request.GET['sort']
+ if sort[0] == '-':
+ sort_key = sort[1:]
+ else:
+ sort_key = sort
+ if not sort_key in __SORT_FIELDS__:
+ raise ValueError
+
+ if sort_key == "client":
+ kwargs['orderby'] = "%s__name" % sort
+ elif sort_key == "good":
+ kwargs['orderby'] = "%scount" % sort
+ elif sort_key in ["bad", "modified", "extra"]:
+ kwargs['orderby'] = "%s_entries" % sort
+ else:
+ kwargs['orderby'] = sort
+ kwargs['sort'] = sort
+ except (ValueError, KeyError):
+ kwargs['orderby'] = "client__name"
+ kwargs['sort'] = "client"
+
kwargs['interaction_base'] = Interaction.objects.interaction_per_client(timestamp).select_related()
- kwargs['orderby'] = "client__name"
kwargs['page_limit'] = 0
return render_history_view(request, 'clients/detailed-list.html', **kwargs)
@@ -187,13 +348,25 @@ def client_detail(request, hostname=None, pk=None):
context = dict()
client = get_object_or_404(Client, name=hostname)
if(pk == None):
- context['interaction'] = client.current_interaction
- return render_history_view(request, 'clients/detail.html', page_limit=5,
- client=client, context=context)
+ inter = client.current_interaction
+ maxdate = None
else:
- context['interaction'] = client.interactions.get(pk=pk)
- return render_history_view(request, 'clients/detail.html', page_limit=5,
- client=client, maxdate=context['interaction'].timestamp, context=context)
+ inter = client.interactions.get(pk=pk)
+ maxdate = inter.timestamp
+
+ ei = Entries_interactions.objects.filter(interaction=inter).select_related('entry').order_by('entry__kind', 'entry__name')
+ #ei = Entries_interactions.objects.filter(interaction=inter).select_related('entry')
+ #ei = sorted(Entries_interactions.objects.filter(interaction=inter).select_related('entry'),
+ # key=lambda x: (x.entry.kind, x.entry.name))
+ context['ei_lists'] = (
+ ('bad', [x for x in ei if x.type == TYPE_BAD]),
+ ('modified', [x for x in ei if x.type == TYPE_MODIFIED]),
+ ('extra', [x for x in ei if x.type == TYPE_EXTRA])
+ )
+
+ context['interaction']=inter
+ return render_history_view(request, 'clients/detail.html', page_limit=5,
+ client=client, maxdate=maxdate, context=context)
def client_manage(request):
@@ -230,9 +403,9 @@ def display_summary(request, timestamp=None):
"""
Display a summary of the bcfg2 world
"""
- query = Interaction.objects.interaction_per_client(timestamp).select_related()
- node_count = query.count()
- recent_data = query.all()
+ recent_data = Interaction.objects.interaction_per_client(timestamp) \
+ .select_related().all()
+ node_count = len(recent_data)
if not timestamp:
timestamp = datetime.now()
@@ -240,18 +413,11 @@ def display_summary(request, timestamp=None):
bad=[],
modified=[],
extra=[],
- stale=[],
- pings=[])
+ stale=[])
for node in recent_data:
if timestamp - node.timestamp > timedelta(hours=24):
collected_data['stale'].append(node)
# If stale check for uptime
- try:
- if node.client.pings.latest().status == 'N':
- collected_data['pings'].append(node)
- except Ping.DoesNotExist:
- collected_data['pings'].append(node)
- continue
if node.bad_entry_count() > 0:
collected_data['bad'].append(node)
else:
@@ -281,9 +447,6 @@ def display_summary(request, timestamp=None):
if len(collected_data['stale']) > 0:
summary_data.append(get_dict('stale',
'nodes did not run within the last 24 hours.'))
- if len(collected_data['pings']) > 0:
- summary_data.append(get_dict('pings',
- 'are down.'))
return render_to_response('displays/summary.html',
{'summary_data': summary_data, 'node_count': node_count,
@@ -299,7 +462,11 @@ def display_timing(request, timestamp=None):
for inter in inters]
for metric in Performance.objects.filter(interaction__in=list(mdict.keys())).all():
for i in metric.interaction.all():
- mdict[i][metric.metric] = metric.value
+ try:
+ mdict[i][metric.metric] = metric.value
+ except KeyError:
+ #In the unlikely event two interactions share a metric, ignore it
+ pass
return render_to_response('displays/timing.html',
{'metrics': list(mdict.values()),
'timestamp': timestamp},
@@ -324,6 +491,7 @@ def render_history_view(request, template='clients/history.html', **kwargs):
not found
server -- Filter interactions by server
state -- Filter interactions by state
+ group -- Filter interactions by group
entry_max -- Most recent interaction to display
orderby -- Sort results using this field
@@ -345,15 +513,15 @@ def render_history_view(request, template='clients/history.html', **kwargs):
# Either filter by client or limit by clients
iquery = kwargs.get('interaction_base', Interaction.objects)
if client:
- iquery = iquery.filter(client__exact=client).select_related()
+ iquery = iquery.filter(client__exact=client)
+ iquery = iquery.select_related()
if 'orderby' in kwargs and kwargs['orderby']:
iquery = iquery.order_by(kwargs['orderby'])
+ if 'sort' in kwargs:
+ context['sort'] = kwargs['sort']
- if 'state' in kwargs and kwargs['state']:
- iquery = iquery.filter(state__exact=kwargs['state'])
- if 'server' in kwargs and kwargs['server']:
- iquery = iquery.filter(server__exact=kwargs['server'])
+ iquery = _handle_filters(iquery, **kwargs)
if entry_max:
iquery = iquery.filter(timestamp__lte=entry_max)
diff --git a/src/lib/Bcfg2/Server/Reports/settings.py b/src/lib/Bcfg2/Server/Reports/settings.py
deleted file mode 100644
index 4d567f1a2..000000000
--- a/src/lib/Bcfg2/Server/Reports/settings.py
+++ /dev/null
@@ -1,161 +0,0 @@
-import django
-import sys
-
-# Compatibility import
-from Bcfg2.Bcfg2Py3k import ConfigParser
-# Django settings for bcfg2 reports project.
-c = ConfigParser.ConfigParser()
-if len(c.read(['/etc/bcfg2.conf', '/etc/bcfg2-web.conf'])) == 0:
- raise ImportError("Please check that bcfg2.conf or bcfg2-web.conf exists "
- "and is readable by your web server.")
-
-try:
- DEBUG = c.getboolean('statistics', 'web_debug')
-except:
- DEBUG = False
-
-if DEBUG:
- print("Warning: Setting web_debug to True causes extraordinary memory "
- "leaks. Only use this setting if you know what you're doing.")
-
-TEMPLATE_DEBUG = DEBUG
-
-ADMINS = (
- ('Root', 'root'),
-)
-
-MANAGERS = ADMINS
-try:
- db_engine = c.get('statistics', 'database_engine')
-except ConfigParser.NoSectionError:
- e = sys.exc_info()[1]
- raise ImportError("Failed to determine database engine: %s" % e)
-db_name = ''
-if c.has_option('statistics', 'database_name'):
- db_name = c.get('statistics', 'database_name')
-if db_engine == 'sqlite3' and db_name == '':
- db_name = "%s/etc/brpt.sqlite" % c.get('server', 'repository')
-
-DATABASES = {
- 'default': {
- 'ENGINE': "django.db.backends.%s" % db_engine,
- 'NAME': db_name
- }
-}
-
-if db_engine != 'sqlite3':
- DATABASES['default']['USER'] = c.get('statistics', 'database_user')
- DATABASES['default']['PASSWORD'] = c.get('statistics', 'database_password')
- DATABASES['default']['HOST'] = c.get('statistics', 'database_host')
- try:
- DATABASES['default']['PORT'] = c.get('statistics', 'database_port')
- except: # An empty string tells Django to use the default port.
- DATABASES['default']['PORT'] = ''
-
-if django.VERSION[0] == 1 and django.VERSION[1] < 2:
- DATABASE_ENGINE = db_engine
- DATABASE_NAME = DATABASES['default']['NAME']
- if DATABASE_ENGINE != 'sqlite3':
- DATABASE_USER = DATABASES['default']['USER']
- DATABASE_PASSWORD = DATABASES['default']['PASSWORD']
- DATABASE_HOST = DATABASES['default']['HOST']
- DATABASE_PORT = DATABASES['default']['PORT']
-
-
-# Local time zone for this installation. All choices can be found here:
-# http://docs.djangoproject.com/en/dev/ref/settings/#time-zone
-if django.VERSION[0] == 1 and django.VERSION[1] > 2:
- try:
- TIME_ZONE = c.get('statistics', 'time_zone')
- except:
- TIME_ZONE = None
-
-# Language code for this installation. All choices can be found here:
-# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
-# http://blogs.law.harvard.edu/tech/stories/storyReader$15
-LANGUAGE_CODE = 'en-us'
-
-SITE_ID = 1
-
-# Absolute path to the directory that holds media.
-# Example: "/home/media/media.lawrence.com/"
-MEDIA_ROOT = ''
-
-# URL that handles the media served from MEDIA_ROOT.
-# Example: "http://media.lawrence.com"
-MEDIA_URL = '/site_media'
-if c.has_option('statistics', 'web_prefix'):
- MEDIA_URL = c.get('statistics', 'web_prefix').rstrip('/') + MEDIA_URL
-
-# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
-# trailing slash.
-# Examples: "http://foo.com/media/", "/media/".
-ADMIN_MEDIA_PREFIX = '/media/'
-
-# Make this unique, and don't share it with anybody.
-SECRET_KEY = 'eb5+y%oy-qx*2+62vv=gtnnxg1yig_odu0se5$h0hh#pc*lmo7'
-
-# List of callables that know how to import templates from various sources.
-TEMPLATE_LOADERS = (
- 'django.template.loaders.filesystem.load_template_source',
- 'django.template.loaders.app_directories.load_template_source',
-)
-
-MIDDLEWARE_CLASSES = (
- 'django.middleware.common.CommonMiddleware',
- 'django.contrib.sessions.middleware.SessionMiddleware',
- 'django.contrib.auth.middleware.AuthenticationMiddleware',
- 'django.middleware.doc.XViewMiddleware',
-)
-
-ROOT_URLCONF = 'Bcfg2.Server.Reports.urls'
-
-# Authentication Settings
-# Use NIS authentication backend defined in backends.py
-AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',
- 'Bcfg2.Server.Reports.backends.NISBackend')
-# The NIS group authorized to login to BCFG2's reportinvg system
-AUTHORIZED_GROUP = ''
-#create login url area:
-try:
- import django.contrib.auth
-except ImportError:
- raise ImportError('Import of Django module failed. Is Django installed?')
-django.contrib.auth.LOGIN_URL = '/login'
-
-SESSION_EXPIRE_AT_BROWSER_CLOSE = True
-
-
-
-TEMPLATE_DIRS = (
- # Put strings here, like "/home/html/django_templates".
- # Always use forward slashes, even on Windows.
- '/usr/share/python-support/python-django/django/contrib/admin/templates/',
- 'Bcfg2.Server.Reports.reports'
-)
-
-if django.VERSION[0] == 1 and django.VERSION[1] < 2:
- TEMPLATE_CONTEXT_PROCESSORS = (
- 'django.core.context_processors.auth',
- 'django.core.context_processors.debug',
- 'django.core.context_processors.i18n',
- 'django.core.context_processors.media',
- 'django.core.context_processors.request'
- )
-else:
- TEMPLATE_CONTEXT_PROCESSORS = (
- 'django.contrib.auth.context_processors.auth',
- 'django.core.context_processors.debug',
- 'django.core.context_processors.i18n',
- 'django.core.context_processors.media',
- 'django.core.context_processors.request'
- )
-
-INSTALLED_APPS = (
- 'django.contrib.auth',
- 'django.contrib.contenttypes',
- 'django.contrib.sessions',
- 'django.contrib.sites',
- 'django.contrib.admin',
- 'Bcfg2.Server.Reports.reports'
-)
diff --git a/src/lib/Bcfg2/Server/Reports/updatefix.py b/src/lib/Bcfg2/Server/Reports/updatefix.py
deleted file mode 100644
index 192b94b61..000000000
--- a/src/lib/Bcfg2/Server/Reports/updatefix.py
+++ /dev/null
@@ -1,281 +0,0 @@
-import Bcfg2.Server.Reports.settings
-
-from django.db import connection, DatabaseError
-import django.core.management
-import logging
-import sys
-import traceback
-from Bcfg2.Server.Reports.reports.models import InternalDatabaseVersion, \
- TYPE_BAD, TYPE_MODIFIED, TYPE_EXTRA
-logger = logging.getLogger('Bcfg2.Server.Reports.UpdateFix')
-
-
-# all update function should go here
-def _merge_database_table_entries():
- cursor = connection.cursor()
- insert_cursor = connection.cursor()
- find_cursor = connection.cursor()
- cursor.execute("""
- Select name, kind from reports_bad
- union
- select name, kind from reports_modified
- union
- select name, kind from reports_extra
- """)
- # this fetch could be better done
- entries_map = {}
- for row in cursor.fetchall():
- insert_cursor.execute("insert into reports_entries (name, kind) \
- values (%s, %s)", (row[0], row[1]))
- entries_map[(row[0], row[1])] = insert_cursor.lastrowid
-
- cursor.execute("""
- Select name, kind, reason_id, interaction_id, 1 from reports_bad
- inner join reports_bad_interactions on reports_bad.id=reports_bad_interactions.bad_id
- union
- Select name, kind, reason_id, interaction_id, 2 from reports_modified
- inner join reports_modified_interactions on reports_modified.id=reports_modified_interactions.modified_id
- union
- Select name, kind, reason_id, interaction_id, 3 from reports_extra
- inner join reports_extra_interactions on reports_extra.id=reports_extra_interactions.extra_id
- """)
- for row in cursor.fetchall():
- key = (row[0], row[1])
- if entries_map.get(key, None):
- entry_id = entries_map[key]
- else:
- find_cursor.execute("Select id from reports_entries where name=%s and kind=%s", key)
- rowe = find_cursor.fetchone()
- entry_id = rowe[0]
- insert_cursor.execute("insert into reports_entries_interactions \
- (entry_id, interaction_id, reason_id, type) values (%s, %s, %s, %s)", (entry_id, row[3], row[2], row[4]))
-
-
-def _interactions_constraint_or_idx():
- """sqlite doesn't support alter tables.. or constraints"""
- cursor = connection.cursor()
- try:
- cursor.execute('alter table reports_interaction add constraint reports_interaction_20100601 unique (client_id,timestamp)')
- except:
- cursor.execute('create unique index reports_interaction_20100601 on reports_interaction (client_id,timestamp)')
-
-
-def _remove_table_column(tbl, col):
- """sqlite doesn't support deleting a column via alter table"""
- cursor = connection.cursor()
- db_engine = Bcfg2.Server.Reports.settings.DATABASES['default']['ENGINE']
- if db_engine == 'django.db.backends.mysql':
- db_name = Bcfg2.Server.Reports.settings.DATABASES['default']['NAME']
- column_exists = cursor.execute('select * from information_schema.columns '
- 'where table_schema="%s" and '
- 'table_name="%s" '
- 'and column_name="%s";' % (db_name, tbl, col))
- if not column_exists:
- # column doesn't exist
- return
- # if column exists from previous database, remove it
- cursor.execute('alter table %s '
- 'drop column %s;' % (tbl, col))
- elif db_engine == 'django.db.backends.sqlite3':
- # check if table exists
- try:
- cursor.execute('select * from sqlite_master where name=%s and type="table";' % tbl)
- except DatabaseError:
- # table doesn't exist
- return
-
- # sqlite wants us to create a new table containing the columns we want
- # and copy into it http://www.sqlite.org/faq.html#q11
- tmptbl_name = "t_backup"
- _tmptbl_create = \
-"""create temporary table "%s" (
- "id" integer NOT NULL PRIMARY KEY,
- "client_id" integer NOT NULL REFERENCES "reports_client" ("id"),
- "timestamp" datetime NOT NULL,
- "state" varchar(32) NOT NULL,
- "repo_rev_code" varchar(64) NOT NULL,
- "goodcount" integer NOT NULL,
- "totalcount" integer NOT NULL,
- "server" varchar(256) NOT NULL,
- "bad_entries" integer NOT NULL,
- "modified_entries" integer NOT NULL,
- "extra_entries" integer NOT NULL,
- UNIQUE ("client_id", "timestamp")
-);""" % tmptbl_name
- _newtbl_create = \
-"""create table "%s" (
- "id" integer NOT NULL PRIMARY KEY,
- "client_id" integer NOT NULL REFERENCES "reports_client" ("id"),
- "timestamp" datetime NOT NULL,
- "state" varchar(32) NOT NULL,
- "repo_rev_code" varchar(64) NOT NULL,
- "goodcount" integer NOT NULL,
- "totalcount" integer NOT NULL,
- "server" varchar(256) NOT NULL,
- "bad_entries" integer NOT NULL,
- "modified_entries" integer NOT NULL,
- "extra_entries" integer NOT NULL,
- UNIQUE ("client_id", "timestamp")
-);""" % tbl
- new_cols = "id,\
- client_id,\
- timestamp,\
- state,\
- repo_rev_code,\
- goodcount,\
- totalcount,\
- server,\
- bad_entries,\
- modified_entries,\
- extra_entries"
-
- delete_col = [_tmptbl_create,
- "insert into %s select %s from %s;" % (tmptbl_name, new_cols, tbl),
- "drop table %s" % tbl,
- _newtbl_create,
- "create index reports_interaction_client_id on %s (client_id);" % tbl,
- "insert into %s select %s from %s;" % (tbl, new_cols,
- tmptbl_name),
- "drop table %s;" % tmptbl_name]
-
- for sql in delete_col:
- cursor.execute(sql)
-
-
-def _populate_interaction_entry_counts():
- '''Populate up the type totals for the interaction table'''
- cursor = connection.cursor()
- count_field = {TYPE_BAD: 'bad_entries',
- TYPE_MODIFIED: 'modified_entries',
- TYPE_EXTRA: 'extra_entries'}
-
- for type in list(count_field.keys()):
- cursor.execute("select count(type), interaction_id " +
- "from reports_entries_interactions where type = %s group by interaction_id" % type)
- updates = []
- for row in cursor.fetchall():
- updates.append(row)
- try:
- cursor.executemany("update reports_interaction set " + count_field[type] + "=%s where id = %s", updates)
- except Exception:
- e = sys.exc_info()[1]
- print(e)
- cursor.close()
-
-
-# be sure to test your upgrade query before reflecting the change in the models
-# the list of function and sql command to do should go here
-_fixes = [_merge_database_table_entries,
- # this will remove unused tables
- "drop table reports_bad;",
- "drop table reports_bad_interactions;",
- "drop table reports_extra;",
- "drop table reports_extra_interactions;",
- "drop table reports_modified;",
- "drop table reports_modified_interactions;",
- "drop table reports_repository;",
- "drop table reports_metadata;",
- "alter table reports_interaction add server varchar(256) not null default 'N/A';",
- # fix revision data type to support $VCS hashes
- "alter table reports_interaction add repo_rev_code varchar(64) default '';",
- # Performance enhancements for large sites
- 'alter table reports_interaction add column bad_entries integer not null default -1;',
- 'alter table reports_interaction add column modified_entries integer not null default -1;',
- 'alter table reports_interaction add column extra_entries integer not null default -1;',
- _populate_interaction_entry_counts,
- _interactions_constraint_or_idx,
- 'alter table reports_reason add is_binary bool NOT NULL default False;',
- 'alter table reports_reason add is_sensitive bool NOT NULL default False;',
- _remove_table_column('reports_interaction', 'client_version'),
- "alter table reports_reason add unpruned varchar(1280) not null default '';",
-]
-
-# this will calculate the last possible version of the database
-lastversion = len(_fixes)
-
-
-def rollupdate(current_version):
- """function responsible to coordinates all the updates
- need current_version as integer
- """
- ret = None
- if current_version < lastversion:
- for i in range(current_version, lastversion):
- try:
- if type(_fixes[i]) == str:
- connection.cursor().execute(_fixes[i])
- else:
- _fixes[i]()
- except:
- logger.error("Failed to perform db update %s" % (_fixes[i]),
- exc_info=1)
- # since the array starts at 0 but version
- # starts at 1 we add 1 to the normal count
- ret = InternalDatabaseVersion.objects.create(version=i + 1)
- return ret
- else:
- return None
-
-
-def dosync():
- """Function to do the syncronisation for the models"""
- # try to detect if it's a fresh new database
- try:
- cursor = connection.cursor()
- # If this table goes missing,
- # don't forget to change it to the new one
- cursor.execute("Select * from reports_client")
- # if we get here with no error then the database has existing tables
- fresh = False
- except:
- logger.debug("there was an error while detecting "
- "the freshness of the database")
- #we should get here if the database is new
- fresh = True
-
- # ensure database connections are closed
- # so that the management can do its job right
- try:
- cursor.close()
- connection.close()
- except:
- # ignore any errors from missing/invalid dbs
- pass
- # Do the syncdb according to the django version
- if "call_command" in dir(django.core.management):
- # this is available since django 1.0 alpha.
- # not yet tested for full functionnality
- django.core.management.call_command("syncdb", interactive=False, verbosity=0)
- if fresh:
- django.core.management.call_command("loaddata", 'initial_version.xml', verbosity=0)
- elif "syncdb" in dir(django.core.management):
- # this exist only for django 0.96.*
- django.core.management.syncdb(interactive=False, verbosity=0)
- if fresh:
- logger.debug("loading the initial_version fixtures")
- django.core.management.load_data(fixture_labels=['initial_version'], verbosity=0)
- else:
- logger.warning("Don't forget to run syncdb")
-
-
-def update_database():
- """method to search where we are in the revision
- of the database models and update them"""
- try:
- logger.debug("Running upgrade of models to the new one")
- dosync()
- know_version = InternalDatabaseVersion.objects.order_by('-version')
- if not know_version:
- logger.debug("No version, creating initial version")
- know_version = InternalDatabaseVersion.objects.create(version=0)
- else:
- know_version = know_version[0]
- logger.debug("Presently at %s" % know_version)
- if know_version.version < lastversion:
- new_version = rollupdate(know_version.version)
- if new_version:
- logger.debug("upgraded to %s" % new_version)
- except:
- logger.error("Error while updating the database")
- for x in traceback.format_exc().splitlines():
- logger.error(x)
diff --git a/src/lib/Bcfg2/Server/Reports/utils.py b/src/lib/Bcfg2/Server/Reports/utils.py
index e0b6ead59..c47763e39 100755
--- a/src/lib/Bcfg2/Server/Reports/utils.py
+++ b/src/lib/Bcfg2/Server/Reports/utils.py
@@ -3,7 +3,7 @@ from django.conf.urls.defaults import *
import re
"""List of filters provided by filteredUrls"""
-filter_list = ('server', 'state')
+filter_list = ('server', 'state', 'group')
class BatchFetch(object):
@@ -97,6 +97,8 @@ def filteredUrls(pattern, view, kwargs=None, name=None):
tail = mtail.group(1)
pattern = pattern[:len(pattern) - len(tail)]
for filter in ('/state/(?P<state>\w+)',
+ '/group/(?P<group>[\w\-\.]+)',
+ '/group/(?P<group>[\w\-\.]+)/(?P<state>[A-Za-z]+)',
'/server/(?P<server>[\w\-\.]+)',
'/server/(?P<server>[\w\-\.]+)/(?P<state>[A-Za-z]+)'):
results += [(pattern + filter + tail, view, kwargs)]
diff --git a/src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_0_x.py b/src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_0_x.py
new file mode 100644
index 000000000..ff4c24328
--- /dev/null
+++ b/src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_0_x.py
@@ -0,0 +1,11 @@
+"""
+1_0_x.py
+
+This file should contain updates relevant to the 1.0.x branches ONLY.
+The updates() method must be defined and it should return an Updater object
+"""
+from Bcfg2.Server.SchemaUpdater import UnsupportedUpdate
+
+def updates():
+ return UnsupportedUpdate("1.0", 10)
+
diff --git a/src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_1_x.py b/src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_1_x.py
new file mode 100644
index 000000000..0d28786fd
--- /dev/null
+++ b/src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_1_x.py
@@ -0,0 +1,59 @@
+"""
+1_1_x.py
+
+This file should contain updates relevant to the 1.1.x branches ONLY.
+The updates() method must be defined and it should return an Updater object
+"""
+from Bcfg2.Server.SchemaUpdater import Updater
+from Bcfg2.Server.SchemaUpdater.Routines import updatercallable
+
+from django.db import connection
+import sys
+import Bcfg2.settings
+from Bcfg2.Server.Reports.reports.models import \
+ TYPE_BAD, TYPE_MODIFIED, TYPE_EXTRA
+
+@updatercallable
+def _interactions_constraint_or_idx():
+ """sqlite doesn't support alter tables.. or constraints"""
+ cursor = connection.cursor()
+ try:
+ cursor.execute('alter table reports_interaction add constraint reports_interaction_20100601 unique (client_id,timestamp)')
+ except:
+ cursor.execute('create unique index reports_interaction_20100601 on reports_interaction (client_id,timestamp)')
+
+
+@updatercallable
+def _populate_interaction_entry_counts():
+ '''Populate up the type totals for the interaction table'''
+ cursor = connection.cursor()
+ count_field = {TYPE_BAD: 'bad_entries',
+ TYPE_MODIFIED: 'modified_entries',
+ TYPE_EXTRA: 'extra_entries'}
+
+ for type in list(count_field.keys()):
+ cursor.execute("select count(type), interaction_id " +
+ "from reports_entries_interactions where type = %s group by interaction_id" % type)
+ updates = []
+ for row in cursor.fetchall():
+ updates.append(row)
+ try:
+ cursor.executemany("update reports_interaction set " + count_field[type] + "=%s where id = %s", updates)
+ except Exception:
+ e = sys.exc_info()[1]
+ print(e)
+ cursor.close()
+
+
+def updates():
+ fixes = Updater("1.1")
+ fixes.override_base_version(12) # Do not do this in new code
+
+ fixes.add('alter table reports_interaction add column bad_entries integer not null default -1;')
+ fixes.add('alter table reports_interaction add column modified_entries integer not null default -1;')
+ fixes.add('alter table reports_interaction add column extra_entries integer not null default -1;')
+ fixes.add(_populate_interaction_entry_counts())
+ fixes.add(_interactions_constraint_or_idx())
+ fixes.add('alter table reports_reason add is_binary bool NOT NULL default False;')
+ return fixes
+
diff --git a/src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_2_x.py b/src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_2_x.py
new file mode 100644
index 000000000..024965bd5
--- /dev/null
+++ b/src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_2_x.py
@@ -0,0 +1,15 @@
+"""
+1_2_x.py
+
+This file should contain updates relevant to the 1.2.x branches ONLY.
+The updates() method must be defined and it should return an Updater object
+"""
+from Bcfg2.Server.SchemaUpdater import Updater
+from Bcfg2.Server.SchemaUpdater.Routines import updatercallable
+
+def updates():
+ fixes = Updater("1.2")
+ fixes.override_base_version(18) # Do not do this in new code
+ fixes.add('alter table reports_reason add is_sensitive bool NOT NULL default False;')
+ return fixes
+
diff --git a/src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_3_0.py b/src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_3_0.py
new file mode 100644
index 000000000..4fc57c653
--- /dev/null
+++ b/src/lib/Bcfg2/Server/SchemaUpdater/Changes/1_3_0.py
@@ -0,0 +1,27 @@
+"""
+1_3_0.py
+
+This file should contain updates relevant to the 1.3.x branches ONLY.
+The updates() method must be defined and it should return an Updater object
+"""
+from Bcfg2.Server.SchemaUpdater import Updater, UpdaterError
+from Bcfg2.Server.SchemaUpdater.Routines import AddColumns, \
+ RemoveColumns, RebuildTable, DropTable
+
+from Bcfg2.Server.Reports.reports.models import Reason, Interaction
+
+
+def updates():
+ fixes = Updater("1.3")
+ fixes.add(RemoveColumns(Interaction, 'client_version'))
+ fixes.add(AddColumns(Reason))
+ fixes.add(RebuildTable(Reason, [
+ 'owner', 'current_owner',
+ 'group', 'current_group',
+ 'perms', 'current_perms',
+ 'status', 'current_status',
+ 'to', 'current_to']))
+ fixes.add(DropTable('reports_ping'))
+
+ return fixes
+
diff --git a/src/lib/Bcfg2/Server/SchemaUpdater/Changes/__init__.py b/src/lib/Bcfg2/Server/SchemaUpdater/Changes/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/lib/Bcfg2/Server/SchemaUpdater/Changes/__init__.py
diff --git a/src/lib/Bcfg2/Server/SchemaUpdater/Routines.py b/src/lib/Bcfg2/Server/SchemaUpdater/Routines.py
new file mode 100644
index 000000000..4fcf0e6bf
--- /dev/null
+++ b/src/lib/Bcfg2/Server/SchemaUpdater/Routines.py
@@ -0,0 +1,279 @@
+import logging
+import traceback
+from django.db.models.fields import NOT_PROVIDED
+from django.db import connection, DatabaseError, backend, models
+from django.core.management.color import no_style
+from django.core.management.sql import sql_create
+import django.core.management
+
+import Bcfg2.settings
+
+logger = logging.getLogger(__name__)
+
+def _quote(value):
+ """
+ Quote a string to use as a table name or column
+ """
+ return backend.DatabaseOperations().quote_name(value)
+
+
+def _rebuild_sqlite_table(model):
+ """Sqlite doesn't support most alter table statments. This streamlines the
+ rebuild process"""
+ try:
+ cursor = connection.cursor()
+ table_name = model._meta.db_table
+
+ # Build create staement from django
+ model._meta.db_table = "%s_temp" % table_name
+ sql, references = connection.creation.sql_create_model(model, no_style())
+ columns = ",".join([_quote(f.column) \
+ for f in model._meta.fields])
+
+ # Create a temp table
+ [cursor.execute(s) for s in sql]
+
+ # Fill the table
+ tbl_name = _quote(table_name)
+ tmp_tbl_name = _quote(model._meta.db_table)
+ # Reset this
+ model._meta.db_table = table_name
+ cursor.execute("insert into %s(%s) select %s from %s;" % (
+ tmp_tbl_name,
+ columns,
+ columns,
+ tbl_name))
+ cursor.execute("drop table %s" % tbl_name)
+
+ # Call syncdb to create the table again
+ django.core.management.call_command("syncdb", interactive=False, verbosity=0)
+ # syncdb closes our cursor
+ cursor = connection.cursor()
+ # Repopulate
+ cursor.execute('insert into %s(%s) select %s from %s;' % (tbl_name,
+ columns,
+ columns,
+ tmp_tbl_name))
+ cursor.execute('DROP TABLE %s;' % tmp_tbl_name)
+ except DatabaseError:
+ logger.error("Failed to rebuild sqlite table %s" % table_name, exc_info=1)
+ raise UpdaterRoutineException
+
+
+class UpdaterRoutineException(Exception):
+ pass
+
+
+class UpdaterRoutine(object):
+ """Base for routines."""
+ def __init__(self):
+ pass
+
+ def __str__(self):
+ return __name__
+
+ def run(self):
+ """Called to execute the action"""
+ raise UpdaterRoutineException
+
+
+
+class AddColumns(UpdaterRoutine):
+ """
+ Routine to add new columns to an existing model
+ """
+ def __init__(self, model):
+ self.model = model
+ self.model_name = model.__name__
+
+ def __str__(self):
+ return "Add new columns for model %s" % self.model_name
+
+ def run(self):
+ try:
+ cursor = connection.cursor()
+ except DatabaseError:
+ logger.error("Failed to connect to the db")
+ raise UpdaterRoutineException
+
+ try:
+ desc = {}
+ for d in connection.introspection.get_table_description(cursor,
+ self.model._meta.db_table):
+ desc[d[0]] = d
+ except DatabaseError:
+ logger.error("Failed to get table description", exc_info=1)
+ raise UpdaterRoutineException
+
+ for field in self.model._meta.fields:
+ if field.column in desc:
+ continue
+ logger.debug("Column %s does not exist yet" % field.column)
+ if field.default == NOT_PROVIDED:
+ logger.error("Cannot add a column with out a default value")
+ raise UpdaterRoutineException
+
+ sql = "ALTER TABLE %s ADD %s %s NOT NULL DEFAULT " % (
+ _quote(self.model._meta.db_table),
+ _quote(field.column), field.db_type(), )
+ db_engine = Bcfg2.settings.DATABASES['default']['ENGINE']
+ if db_engine == 'django.db.backends.sqlite3':
+ sql += _quote(field.default)
+ sql_values = ()
+ else:
+ sql += '%s'
+ sql_values = (field.default, )
+ try:
+ cursor.execute(sql, sql_values)
+ logger.debug("Added column %s to %s" %
+ (field.column, self.model._meta.db_table))
+ except DatabaseError:
+ logger.error("Unable to add column %s" % field.column)
+ raise UpdaterRoutineException
+
+
+class RebuildTable(UpdaterRoutine):
+ """
+ Rebuild the table for an existing model. Use this if field types have changed.
+ """
+ def __init__(self, model, columns):
+ self.model = model
+ self.model_name = model.__name__
+
+ if type(columns) == str:
+ self.columns = [columns]
+ elif type(columns) in (tuple, list):
+ self.columns = columns
+ else:
+ logger.error("Columns must be a str, tuple, or list")
+ raise UpdaterRoutineException
+
+
+ def __str__(self):
+ return "Rebuild columns for model %s" % self.model_name
+
+ def run(self):
+ try:
+ cursor = connection.cursor()
+ except DatabaseError:
+ logger.error("Failed to connect to the db")
+ raise UpdaterRoutineException
+
+ db_engine = Bcfg2.settings.DATABASES['default']['ENGINE']
+ if db_engine == 'django.db.backends.sqlite3':
+ """ Sqlite is a special case. Altering columns is not supported. """
+ _rebuild_sqlite_table(self.model)
+ return
+
+ if db_engine == 'django.db.backends.mysql':
+ modify_cmd = 'MODIFY '
+ else:
+ modify_cmd = 'ALTER COLUMN '
+
+ col_strings = []
+ for column in self.columns:
+ col_strings.append("%s %s %s" % ( \
+ modify_cmd,
+ _quote(column),
+ self.model._meta.get_field(column).db_type()
+ ))
+
+ try:
+ cursor.execute('ALTER TABLE %s %s' %
+ (_quote(self.model._meta.db_table), ", ".join(col_strings)))
+ except DatabaseError:
+ logger.debug("Failed modify table %s" % self.model._meta.db_table)
+ raise UpdaterRoutineException
+
+
+
+class RemoveColumns(RebuildTable):
+ """
+ Routine to remove columns from an existing model
+ """
+ def __init__(self, model, columns):
+ super(RemoveColumns, self).__init__(model, columns)
+
+
+ def __str__(self):
+ return "Remove columns from model %s" % self.model_name
+
+ def run(self):
+ try:
+ cursor = connection.cursor()
+ except DatabaseError:
+ logger.error("Failed to connect to the db")
+ raise UpdaterRoutineException
+
+ try:
+ columns = [d[0] for d in connection.introspection.get_table_description(cursor,
+ self.model._meta.db_table)]
+ except DatabaseError:
+ logger.error("Failed to get table description", exc_info=1)
+ raise UpdaterRoutineException
+
+ for column in self.columns:
+ if column not in columns:
+ logger.warning("Cannot drop column %s: does not exist" % column)
+ continue
+
+ logger.debug("Dropping column %s" % column)
+
+ db_engine = Bcfg2.settings.DATABASES['default']['ENGINE']
+ if db_engine == 'django.db.backends.sqlite3':
+ _rebuild_sqlite_table(self.model)
+ else:
+ sql = "alter table %s drop column %s" % \
+ (_quote(self.model._meta.db_table), _quote(column), )
+ try:
+ cursor.execute(sql)
+ except DatabaseError:
+ logger.debug("Failed to drop column %s from %s" %
+ (column, self.model._meta.db_table))
+ raise UpdaterRoutineException
+
+
+class DropTable(UpdaterRoutine):
+ """
+ Drop a table
+ """
+ def __init__(self, table_name):
+ self.table_name = table_name
+
+ def __str__(self):
+ return "Drop table %s" % self.table_name
+
+ def run(self):
+ try:
+ cursor = connection.cursor()
+ cursor.execute('DROP TABLE %s' % _quote(self.table_name))
+ except DatabaseError:
+ logger.error("Failed to drop table: %s" %
+ traceback.format_exc().splitlines()[-1])
+ raise UpdaterRoutineException
+
+
+class UpdaterCallable(UpdaterRoutine):
+ """Helper for routines. Basically delays execution"""
+ def __init__(self, fn):
+ self.fn = fn
+ self.args = []
+ self.kwargs = {}
+
+ def __call__(self, *args, **kwargs):
+ self.args = args
+ self.kwargs = kwargs
+ return self
+
+ def __str__(self):
+ return self.fn.__name__
+
+ def run(self):
+ self.fn(*self.args, **self.kwargs)
+
+def updatercallable(fn):
+ """Decorator for UpdaterCallable. Use for any function passed
+ into the fixes list"""
+ return UpdaterCallable(fn)
+
+
diff --git a/src/lib/Bcfg2/Server/SchemaUpdater/__init__.py b/src/lib/Bcfg2/Server/SchemaUpdater/__init__.py
new file mode 100644
index 000000000..304b36636
--- /dev/null
+++ b/src/lib/Bcfg2/Server/SchemaUpdater/__init__.py
@@ -0,0 +1,257 @@
+from django.db import connection, DatabaseError
+from django.core.exceptions import ImproperlyConfigured
+import django.core.management
+import logging
+import pkgutil
+import re
+import sys
+import traceback
+
+from Bcfg2.Bcfg2Py3k import CmpMixin
+from Bcfg2.Server.models import InternalDatabaseVersion
+from Bcfg2.Server.SchemaUpdater.Routines import UpdaterRoutineException, \
+ UpdaterRoutine
+from Bcfg2.Server.SchemaUpdater import Changes
+
+logger = logging.getLogger(__name__)
+
+class UpdaterError(Exception):
+ pass
+
+
+class SchemaTooOldError(UpdaterError):
+ pass
+
+
+def _walk_packages(paths):
+ """Python 2.4 lacks this routine"""
+ import glob
+ submodules = []
+ for path in paths:
+ for submodule in glob.glob("%s/*.py" % path):
+ mod = '.'.join(submodule.split("/")[-1].split('.')[:-1])
+ if mod != '__init__':
+ submodules.append((None, mod, False))
+ return submodules
+
+
+def _release_to_version(release):
+ """
+ Build a release base for a version
+
+ Expects a string of the form 00.00
+
+ returns an integer of the form MMmm00
+ """
+ regex = re.compile("^(\d+)\.(\d+)$")
+ m = regex.match(release)
+ if not m:
+ logger.error("Invalid release string: %s" % release)
+ raise TypeError
+ return int("%02d%02d00" % (int(m.group(1)), int(m.group(2))))
+
+
+class Updater(CmpMixin):
+ """Database updater to standardize updates"""
+
+ def __init__(self, release):
+ CmpMixin.__init__(self)
+
+ self._cursor = None
+ self._release = release
+ try:
+ self._base_version = _release_to_version(release)
+ except:
+ err = "Invalid release string: %s" % release
+ logger.error(err)
+ raise UpdaterError(err)
+
+ self._fixes = []
+ self._version = -1
+
+ def __cmp__(self, other):
+ return self._base_version - other._base_version
+
+ @property
+ def release(self):
+ return self._release
+
+ @property
+ def version(self):
+ if self._version < 0:
+ try:
+ iv = InternalDatabaseVersion.objects.latest()
+ self._version = iv.version
+ except InternalDatabaseVersion.DoesNotExist:
+ raise UpdaterError("No database version stored internally")
+ return self._version
+
+ @property
+ def cursor(self):
+ if not self._cursor:
+ self._cursor = connection.cursor()
+ return self._cursor
+
+ @property
+ def target_version(self):
+ if(len(self._fixes) == 0):
+ return self._base_version
+ else:
+ return self._base_version + len(self._fixes) - 1
+
+
+ def add(self, update):
+ if type(update) == str or isinstance(update, UpdaterRoutine):
+ self._fixes.append(update)
+ else:
+ raise TypeError
+
+
+ def override_base_version(self, version):
+ """Override our starting point for old releases. New code should
+ not use this method"""
+ self._base_version = int(version)
+
+
+ @staticmethod
+ def get_current_version():
+ """Queries the db for the latest version. Returns 0 for a
+ fresh install"""
+
+ if "call_command" in dir(django.core.management):
+ django.core.management.call_command("syncdb", interactive=False,
+ verbosity=0)
+ else:
+ msg = "Unable to call syndb routine"
+ logger.warning(msg)
+ raise UpdaterError(msg)
+
+ try:
+ iv = InternalDatabaseVersion.objects.latest()
+ version = iv.version
+ except InternalDatabaseVersion.DoesNotExist:
+ version = 0
+
+ return version
+
+
+ def syncdb(self):
+ """Function to do the syncronisation for the models"""
+
+ self._version = Updater.get_current_version()
+ self._cursor = None
+
+
+ def increment(self):
+ """Increment schema version in the database"""
+ if self._version < self._base_version:
+ self._version = self._base_version
+ else:
+ self._version += 1
+ InternalDatabaseVersion.objects.create(version=self._version)
+
+ def apply(self):
+ """Apply pending schema changes"""
+
+ if self.version >= self.target_version:
+ logger.debug("No updates for release %s" % self._release)
+ return
+
+ logger.debug("Applying updates for release %s" % self._release)
+
+ if self.version < self._base_version:
+ start = 0
+ else:
+ start = self.version - self._base_version + 1
+
+ try:
+ for fix in self._fixes[start:]:
+ if type(fix) == str:
+ self.cursor.execute(fix)
+ elif isinstance(fix, UpdaterRoutine):
+ fix.run()
+ else:
+ logger.error("Invalid schema change at %s" % \
+ self._version + 1)
+ self.increment()
+ logger.debug("Applied schema change number %s: %s" % \
+ (self.version, fix))
+ logger.info("Applied schema changes for release %s" % self._release)
+ except:
+ msg = "Failed to perform db update %s (%s): %s" % \
+ (self._version + 1, fix,
+ traceback.format_exc().splitlines()[-1])
+ logger.error(msg)
+ raise UpdaterError(msg)
+
+
+class UnsupportedUpdate(Updater):
+ """Handle an unsupported update"""
+
+ def __init__(self, release, version):
+ super(UnsupportedUpdate, self).__init__(release)
+ self._base_version = version
+
+ def apply(self):
+ """Raise an exception if we're too old"""
+
+ if self.version < self.target_version:
+ logger.error("Upgrade from release %s unsupported" % self._release)
+ raise SchemaTooOldError
+
+
+def update_database():
+ """method to search where we are in the revision
+ of the database models and update them"""
+ try:
+ logger.debug("Verifying database schema")
+
+ updaters = []
+ if hasattr(pkgutil, 'walk_packages'):
+ submodules = pkgutil.walk_packages(path=Changes.__path__)
+ else:
+ #python 2.4
+ submodules = _walk_packages(Changes.__path__)
+ for loader, submodule, ispkg in submodules:
+ if ispkg:
+ continue
+ try:
+ updates = getattr(
+ __import__("%s.%s" % (Changes.__name__, submodule),
+ globals(), locals(), ['*']),
+ "updates")
+ updaters.append(updates())
+ except ImportError:
+ logger.error("Failed to import %s" % submodule)
+ except AttributeError:
+ logger.warning("Module %s does not have an updates function" %
+ submodule)
+ except:
+ msg = "Failed to build updater for %s" % submodule
+ logger.error(msg, exc_info=1)
+ raise UpdaterError(msg)
+
+ current_version = Updater.get_current_version()
+ logger.debug("Database version at %s" % current_version)
+
+ updaters.sort()
+ if current_version > 0:
+ [u.apply() for u in updaters]
+ logger.debug("Database version at %s" %
+ Updater.get_current_version())
+ else:
+ target = updaters[-1].target_version
+ InternalDatabaseVersion.objects.create(version=target)
+ logger.info("A new database was created")
+
+ except UpdaterError:
+ raise
+ except ImproperlyConfigured:
+ logger.error("Django is not properly configured: %s" %
+ traceback.format_exc().splitlines()[-1])
+ raise UpdaterError
+ except:
+ logger.error("Error while updating the database")
+ for x in traceback.format_exc().splitlines():
+ logger.error(x)
+ raise UpdaterError
diff --git a/src/lib/Bcfg2/Server/Snapshots/model.py b/src/lib/Bcfg2/Server/Snapshots/model.py
index 5d7973c16..0bbd206da 100644
--- a/src/lib/Bcfg2/Server/Snapshots/model.py
+++ b/src/lib/Bcfg2/Server/Snapshots/model.py
@@ -6,13 +6,7 @@ import sqlalchemy.exceptions
from sqlalchemy.orm import relation, backref
from sqlalchemy.ext.declarative import declarative_base
-
-# py3k compatibility
-def u_str(string):
- if sys.hexversion >= 0x03000000:
- return string
- else:
- return unicode(string)
+from Bcfg2.Bcfg2Py3k import u_str
class Uniquer(object):
diff --git a/src/lib/Bcfg2/Server/__init__.py b/src/lib/Bcfg2/Server/__init__.py
index 96777b0bf..f79b51dd3 100644
--- a/src/lib/Bcfg2/Server/__init__.py
+++ b/src/lib/Bcfg2/Server/__init__.py
@@ -1,4 +1,13 @@
"""This is the set of modules for Bcfg2.Server."""
+import lxml.etree
+
__all__ = ["Admin", "Core", "FileMonitor", "Plugin", "Plugins",
- "Hostbase", "Reports", "Snapshots"]
+ "Hostbase", "Reports", "Snapshots", "XMLParser",
+ "XI", "XI_NAMESPACE"]
+
+XMLParser = lxml.etree.XMLParser(remove_blank_text=True)
+
+XI = 'http://www.w3.org/2001/XInclude'
+XI_NAMESPACE = '{%s}' % XI
+
diff --git a/src/lib/Bcfg2/Server/models.py b/src/lib/Bcfg2/Server/models.py
new file mode 100644
index 000000000..effd4d298
--- /dev/null
+++ b/src/lib/Bcfg2/Server/models.py
@@ -0,0 +1,77 @@
+import sys
+import logging
+import Bcfg2.Options
+import Bcfg2.Server.Plugins
+from django.db import models
+from Bcfg2.Bcfg2Py3k import ConfigParser
+
+logger = logging.getLogger('Bcfg2.Server.models')
+
+MODELS = []
+
+def load_models(plugins=None, cfile='/etc/bcfg2.conf', quiet=True):
+ global MODELS
+
+ if plugins is None:
+ # we want to provide a different default plugin list --
+ # namely, _all_ plugins, so that the database is guaranteed to
+ # work, even if /etc/bcfg2.conf isn't set up properly
+ plugin_opt = Bcfg2.Options.SERVER_PLUGINS
+ plugin_opt.default = Bcfg2.Server.Plugins.__all__
+
+ setup = Bcfg2.Options.OptionParser(dict(plugins=plugin_opt,
+ configfile=Bcfg2.Options.CFILE),
+ quiet=quiet)
+ setup.parse([Bcfg2.Options.CFILE.cmd, cfile])
+ plugins = setup['plugins']
+
+ if MODELS:
+ # load_models() has been called once, so first unload all of
+ # the models; otherwise we might call load_models() with no
+ # arguments, end up with _all_ models loaded, and then in a
+ # subsequent call only load a subset of models
+ for model in MODELS:
+ delattr(sys.modules[__name__], model)
+ MODELS = []
+
+ for plugin in plugins:
+ try:
+ mod = getattr(__import__("Bcfg2.Server.Plugins.%s" %
+ plugin).Server.Plugins, plugin)
+ except ImportError:
+ try:
+ err = sys.exc_info()[1]
+ mod = __import__(plugin)
+ except:
+ if plugins != Bcfg2.Server.Plugins.__all__:
+ # only produce errors if the default plugin list
+ # was not used -- i.e., if the config file was set
+ # up. don't produce errors when trying to load
+ # all plugins, IOW. the error from the first
+ # attempt to import is probably more accurate than
+ # the second attempt.
+ logger.error("Failed to load plugin %s: %s" % (plugin, err))
+ continue
+ for sym in dir(mod):
+ obj = getattr(mod, sym)
+ if hasattr(obj, "__bases__") and models.Model in obj.__bases__:
+ setattr(sys.modules[__name__], sym, obj)
+ MODELS.append(sym)
+
+# basic invocation to ensure that a default set of models is loaded,
+# and thus that this module will always work.
+load_models(quiet=True)
+
+# Monitor our internal db version
+class InternalDatabaseVersion(models.Model):
+ """Object that tell us to witch version is the database."""
+ version = models.IntegerField()
+ updated = models.DateTimeField(auto_now_add=True)
+
+ def __str__(self):
+ return "version %d updated the %s" % (self.version, self.updated.isoformat())
+
+ class Meta:
+ app_label = "reports"
+ get_latest_by = "version"
+