summaryrefslogtreecommitdiffstats
path: root/src/lib/Server/Plugins
diff options
context:
space:
mode:
Diffstat (limited to 'src/lib/Server/Plugins')
-rw-r--r--src/lib/Server/Plugins/Base.py59
-rw-r--r--src/lib/Server/Plugins/Bundler.py120
-rw-r--r--src/lib/Server/Plugins/Cfg.py147
-rw-r--r--src/lib/Server/Plugins/Hostbase.py391
-rw-r--r--src/lib/Server/Plugins/Pkgmgr.py128
-rw-r--r--src/lib/Server/Plugins/Svcmgr.py27
6 files changed, 431 insertions, 441 deletions
diff --git a/src/lib/Server/Plugins/Base.py b/src/lib/Server/Plugins/Base.py
index 1cdd7599c..3be30bc6a 100644
--- a/src/lib/Server/Plugins/Base.py
+++ b/src/lib/Server/Plugins/Base.py
@@ -1,62 +1,31 @@
'''This module sets up a base list of configuration entries'''
__revision__ = '$Revision$'
-from copy import deepcopy
-from lxml.etree import Element, XML, XMLSyntaxError, _Comment
+import Bcfg2.Server.Plugin
+import copy
+import lxml.etree
-from Bcfg2.Server.Plugin import Plugin, PluginInitError, SingleXMLFileBacked
-
-class Base(Plugin, SingleXMLFileBacked):
+class Base(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.DirectoryBacked):
'''This Structure is good for the pile of independent configs needed for most actual systems'''
__name__ = 'Base'
__version__ = '$Id$'
__author__ = 'bcfg-dev@mcs.anl.gov'
+ __child__ = Bcfg2.Server.Plugin.StructFile
'''base creates independent clauses based on client metadata'''
def __init__(self, core, datastore):
- Plugin.__init__(self, core, datastore)
- self.store = {'all':[], 'Class':{'all':[]}, 'Image':{'all':[]}, 'all':[]}
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ self.fragements = {}
try:
- SingleXMLFileBacked.__init__(self, "%s/etc/base.xml"%(datastore), self.core.fam)
+ Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data, self.core.fam)
except OSError:
- self.LogError("Failed to load base.xml")
- raise PluginInitError
+ self.LogError("Failed to load Base repository")
+ raise Bcfg2.Server.Plugin.PluginInitError
- def Index(self):
- '''Store XML data in reasonable structures'''
- try:
- xdata = XML(self.data)
- except XMLSyntaxError:
- self.LogError("Failed to parse base.xml")
- return
- self.store = {'all':[], 'Class':{'all':[]}, 'Image':{'all':[]}, 'all':[]}
- for entry in [ent for ent in xdata.getchildren() if not isinstance(ent, _Comment)]:
- if entry.tag in ['Image', 'Class']:
- if not self.store[entry.tag].has_key(entry.get('name')):
- self.store[entry.tag][entry.get('name')] = {'all':[], 'Class':{}, 'Image':{}}
- for child in [ent for ent in entry.getchildren() if not isinstance(ent, _Comment)]:
- if child.tag in ['Image', 'Class']:
- self.store[entry.tag][entry.get('name')][child.tag][child.get('name')] = \
- [ent for ent in child.getchildren() if \
- not isinstance(ent, _Comment)]
- else:
- self.store[entry.tag][entry.get('name')]['all'].append(child)
- else:
- self.store['all'].append(child)
-
def BuildStructures(self, metadata):
'''Build structures for client described by metadata'''
- ret = Element("Independant", version='2.0')
- [ret.append(deepcopy(entry)) for entry in self.store['all']]
- idata = self.store['Image'].get(metadata.image, {'all':[], 'Class':{}})
- for entry in idata['all']:
- ret.append(deepcopy(entry))
- for cls in metadata.classes:
- for entry in idata['Class'].get(cls, []):
- ret.append(deepcopy(entry))
- cdata = self.store['Class'].get(cls, {'all':[], 'Image':{}})
- for entry in cdata['all']:
- ret.append(deepcopy(entry))
- for entry in cdata['Image'].get(metadata.image, []):
- ret.append(deepcopy(entry))
+ ret = lxml.etree.Element("Independant", version='2.0')
+ fragments = reduce(lambda x, y: x+y,
+ [base.Match(metadata) for base in self.entries.values()])
+ [ret.append(copy.deepcopy(frag)) for frag in fragments]
return [ret]
diff --git a/src/lib/Server/Plugins/Bundler.py b/src/lib/Server/Plugins/Bundler.py
index 4b357f121..cbbb6c671 100644
--- a/src/lib/Server/Plugins/Bundler.py
+++ b/src/lib/Server/Plugins/Bundler.py
@@ -1,122 +1,36 @@
'''This provides bundle clauses with translation functionality'''
__revision__ = '$Revision$'
-from copy import deepcopy
-from syslog import LOG_ERR, syslog
-from lxml.etree import Element, XML, XMLSyntaxError, _Comment
+import Bcfg2.Server.Plugin
+import copy
+import lxml.etree
-from Bcfg2.Server.Plugin import Plugin, SingleXMLFileBacked, XMLFileBacked, DirectoryBacked
-
-
-class ImageFile(SingleXMLFileBacked):
- '''This file contains image -> system mappings'''
- def __init__(self, filename, fam):
- self.images = {}
- SingleXMLFileBacked.__init__(self, filename, fam)
-
- def Index(self):
- '''Build data structures out of the data'''
- try:
- xdata = XML(self.data)
- except XMLSyntaxError, err:
- syslog(LOG_ERR, "Failed to parse file %s" % (self.name))
- syslog(LOG_ERR, err)
- del self.data
- return
- self.images = {}
- for child in xdata.getchildren():
- [name, pkg, service] = [child.get(field) for field in ['name', 'package', 'service']]
- for grandchild in child.getchildren():
- self.images[grandchild.get('name')] = (name, pkg, service)
-
-class Bundle(XMLFileBacked):
- '''Bundles are configuration specifications (with image/translation abstraction)'''
-
- def __init__(self, filename):
- self.all = []
- self.attributes = {}
- self.systems = {}
- XMLFileBacked.__init__(self, filename)
-
- def Index(self):
- '''Build data structures from the source data'''
- try:
- xdata = XML(self.data)
- except XMLSyntaxError, err:
- syslog(LOG_ERR, "Failed to parse file %s" % (self.name))
- syslog(LOG_ERR, str(err))
- del self.data
- return
- self.all = []
- self.systems = {}
- self.attributes = {}
- for entry in [ent for ent in xdata.getchildren() if not isinstance(ent, _Comment)]:
- if entry.tag == 'System':
- self.systems[entry.attrib['name']] = [ent for ent in entry.getchildren() \
- if not isinstance(ent, _Comment)]
- elif entry.tag == 'Attribute':
- self.attributes[entry.get('name')] = [ent for ent in entry.getchildren() \
- if not isinstance(ent, _Comment)]
- else:
- self.all.append(entry)
- del self.data
-
- def BuildBundle(self, metadata, system):
- '''Build a bundle for a particular client'''
- bundlename = self.name.split('/')[-1]
- bundle = Element('Bundle', name=bundlename)
- for entry in self.all + self.systems.get(system, []):
- bundle.append(deepcopy(entry))
- for attribute in [aname for (scope, aname) in [item.split('.') for item in metadata.attributes]
- if scope == bundlename[:-4]]:
- for entry in self.attributes.get(attribute, []):
- bundle.append(deepcopy(entry))
- return bundle
-
-class BundleSet(DirectoryBacked):
- '''The Bundler handles creation of dependent clauses based on bundle definitions'''
- __child__ = Bundle
-
-class Bundler(Plugin):
+class Bundler(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.DirectoryBacked):
'''The bundler creates dependent clauses based on the bundle/translation scheme from bcfg1'''
__name__ = 'Bundler'
__version__ = '$Id$'
__author__ = 'bcfg-dev@mcs.anl.gov'
+ __child__ = Bcfg2.Server.Plugin.StructFile
def __init__(self, core, datastore):
- Plugin.__init__(self, core, datastore)
- self.imageinfo = ImageFile("%s/etc/imageinfo.xml"%(datastore), self.core.fam)
- self.bundles = BundleSet(self.data, self.core.fam)
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ try:
+ Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data, self.core.fam)
+ except OSError:
+ self.LogError("Failed to load Bundle repository")
+ raise Bcfg2.Server.Plugin.PluginInitError
def BuildStructures(self, metadata):
'''Build all structures for client (metadata)'''
- try:
- (system, package, service) = self.GetTransInfo(metadata)
- except KeyError:
- syslog(LOG_ERR, "Failed to find translation information for image %s" % metadata.image)
- return []
bundleset = []
for bundlename in metadata.bundles:
- if not self.bundles.entries.has_key("%s.xml"%(bundlename)):
- syslog(LOG_ERR, "Client %s requested nonexistent bundle %s"%(metadata.hostname, bundlename))
+ if not self.entries.has_key("%s.xml"%(bundlename)):
+ self.LogError("Client %s requested nonexistent bundle %s" % \
+ (metadata.hostname, bundlename))
continue
-
- bundle = self.bundles.entries["%s.xml" % (bundlename)].BuildBundle(metadata, system)
- # now we need to populate service/package types
- for entry in bundle.getchildren():
- if entry.tag == 'Package':
- entry.attrib['type'] = package
- elif entry.tag == 'Service':
- entry.attrib['type'] = service
+ bundle = lxml.etree.Element('Bundle', name=bundlename)
+ [bundle.append(copy.deepcopy(item))
+ for item in self.entries["%s.xml" % (bundlename)].Match(metadata)]
bundleset.append(bundle)
return bundleset
- def GetTransInfo(self, metadata):
- '''Get Translation info for metadata.image'''
- if self.imageinfo.images.has_key(metadata.image):
- return self.imageinfo.images[metadata.image]
- else:
- raise KeyError, metadata.image
-
-
-
diff --git a/src/lib/Server/Plugins/Cfg.py b/src/lib/Server/Plugins/Cfg.py
index 953401e7a..b325144e5 100644
--- a/src/lib/Server/Plugins/Cfg.py
+++ b/src/lib/Server/Plugins/Cfg.py
@@ -1,7 +1,6 @@
'''This module implements a config file repository'''
__revision__ = '$Revision$'
-from binascii import b2a_base64
from os import stat
from re import compile as regcompile
from stat import S_ISDIR, ST_MODE
@@ -9,54 +8,73 @@ from syslog import syslog, LOG_INFO, LOG_ERR
from Bcfg2.Server.Plugin import Plugin, PluginExecutionError, FileBacked
+import binascii
+import exceptions
+
+specific = regcompile('(.*/)(?P<filename>[\S\-.]+)\.((H_(?P<hostname>\S+))|' +
+ '(G(?P<prio>\d+)_(?P<group>\S+)))$')
+
+class SpecificityError(Exception):
+ '''Thrown in case of filename parse failure'''
+ pass
+
class FileEntry(FileBacked):
'''The File Entry class pertains to the config files contained in a particular directory.
This includes :info, all base files and deltas'''
-
- def __init__(self, name, all, image, classes, bundles, attribs, hostname):
+
+ def __init__(self, myid, name):
FileBacked.__init__(self, name)
- self.all = all
- self.image = image
- self.bundles = bundles
- self.classes = classes
- self.attributes = attribs
- self.hostname = hostname
+ self.name = name
+ self.identity = myid
+ self.all = False
+ self.hostname = False
+ self.group = False
+ self.op = False
+ self.prio = False
+ if name.split('.')[-1] in ['cat', 'diff']:
+ self.op = name.split('.')[-1]
+ name = name[:-(len(self.op) + 1)]
+ if self.name.split('/')[-1] == myid.split('/')[-1]:
+ self.all = True
+ else:
+ data = specific.match(name)
+ if not data:
+ syslog(LOG_ERR, "Cfg: Failed to match %s" % name)
+ raise SpecificityError
+ if data.group('hostname') != None:
+ self.hostname = data.group('hostname')
+ else:
+ self.group = data.group('group')
+ self.prio = int(data.group('prio'))
def __cmp__(self, other):
- fields = ['all', 'image', 'classes', 'bundles', 'attributes', 'hostname']
- try:
- most1 = [index for index in range(len(fields)) if getattr(self, fields[index])][0]
- except IndexError:
- most1 = 0
- try:
- most2 = [index for index in range(len(fields)) if getattr(other, fields[index])][0]
- except IndexError:
- most2 = 0
- if most1 == most2:
- if self.name.split('.')[-1] in ['cat', 'diff']:
- meta1 = self.name.split('.')[-2]
- else:
- meta1 = self.name.split('.')[-1]
- if other.name.split('.')[-1] in ['cat', 'diff']:
- meta2 = other.name.split('.')[-2]
+ data = [[getattr(self, field) for field in ['all', 'group', 'hostname']],
+ [getattr(other, field) for field in ['all', 'group', 'hostname']]]
+ for index in range(3):
+ if data[0][index] and not data[1][index]:
+ return -1
+ elif data[1][index] and not data[0][index]:
+ return 1
+ elif data[0][index] and data[1][index]:
+ if hasattr(self, 'prio') and hasattr(other, 'prio'):
+ return self.prio - other.prio
+ else:
+ return 0
else:
- meta2 = other.name.split('.')[-1]
+ pass
+ syslog(LOG_ERR, "Cfg: Critical: Ran off of the end of the world sorting %s" % (self.name))
- if meta1[0] not in ['C', 'B']:
- return 0
- # need to tiebreak with numeric prio
- prio1 = int(meta1[1:3])
- prio2 = int(meta2[1:3])
- return prio1 - prio2
+ def applies(self, metadata):
+ '''Predicate if fragment matches client metadata'''
+ if self.all or (self.hostname == metadata.hostname) or \
+ (self.group in metadata.groups):
+ return True
else:
- return most1 - most2
+ return False
class ConfigFileEntry(object):
'''ConfigFileEntry is a repository entry for a single file, containing
all data for all clients.'''
- specific = regcompile('(.*/)(?P<filename>[\S\-.]+)\.((H_(?P<hostname>\S+))|' +
- '(B(?P<bprio>\d+)_(?P<bundle>\S+))|(A(?P<aprio>\d+)_(?P<attr>\S+))|' +
- '(I_(?P<image>\S+))|(C(?P<cprio>\d+)_(?P<class>\S+)))$')
info = regcompile('^owner:(\s)*(?P<owner>\w+)|group:(\s)*(?P<group>\w+)|' +
'perms:(\s)*(?P<perms>\w+)|encoding:(\s)*(?P<encoding>\w+)|' +
'(?P<paranoid>paranoid(\s)*)$')
@@ -65,8 +83,7 @@ class ConfigFileEntry(object):
object.__init__(self)
self.path = path
self.repopath = repopath
- self.basefiles = []
- self.deltas = []
+ self.fragments = []
self.metadata = {'encoding': 'ascii', 'owner':'root', 'group':'root', 'perms':'0644'}
self.paranoid = False
@@ -94,41 +111,15 @@ class ConfigFileEntry(object):
def AddEntry(self, name):
'''add new file additions for a single cf file'''
- delta = False
- oldname = name
if name[-5:] == ':info':
return self.read_info()
- if name.split('/')[-1] == self.path.split('/')[-1]:
- self.basefiles.append(FileEntry(name, True, None, [], [], [], None))
- self.basefiles.sort()
- return
-
- if name.split('/')[-1].split('.')[-1] in ['cat']:
- delta = True
- oldname = name
- name = name[:-4]
-
- specmatch = self.specific.match(name)
- if specmatch == None:
- syslog(LOG_ERR, "Cfg: Failed to match file %s" % (name))
+ try:
+ self.fragments.append(FileEntry(self.path, name))
+ self.fragments.sort()
+ except SpecificityError:
return
- data = {}
- for item, value in specmatch.groupdict().iteritems():
- if value != None:
- data[item] = value
-
- cfile = FileEntry(oldname, False, data.get('image', None), data.get('class', []),
- data.get('bundle', []), data.get('attr', []), data.get('hostname', None))
-
- if delta:
- self.deltas.append(cfile)
- self.deltas.sort()
- else:
- self.basefiles.append(cfile)
- self.basefiles.sort()
-
def HandleEvent(self, event):
'''Handle FAM updates'''
action = event.code2str()
@@ -136,11 +127,11 @@ class ConfigFileEntry(object):
if action in ['changed', 'exists', 'created']:
return self.read_info()
if event.filename != self.path.split('/')[-1]:
- if not self.specific.match('/' + event.filename):
+ if not specific.match('/' + event.filename):
syslog(LOG_INFO, 'Cfg: Suppressing event for bogus file %s' % event.filename)
return
- entries = [entry for entry in self.basefiles + self.deltas if
+ entries = [entry for entry in self.fragments if
entry.name.split('/')[-1] == event.filename]
if len(entries) == 0:
@@ -152,10 +143,8 @@ class ConfigFileEntry(object):
syslog(LOG_INFO, "Cfg: Removing entry %s" % event.filename)
for entry in entries:
syslog(LOG_INFO, "Cfg: Removing entry %s" % (entry.name))
- if entry in self.basefiles:
- self.basefiles.remove(entry)
- if entry in self.deltas:
- self.deltas.remove(entry)
+ self.fragments.remove(entry)
+ self.fragments.sort()
syslog(LOG_INFO, "Cfg: Entry deletion completed")
elif action in ['changed', 'exists', 'created']:
[entry.HandleEvent(event) for entry in entries]
@@ -168,13 +157,13 @@ class ConfigFileEntry(object):
filedata = ""
# first find basefile
try:
- basefile = [bfile for bfile in self.basefiles if metadata.Applies(bfile)][-1]
+ basefile = [bfile for bfile in self.fragments if bfile.applies(metadata) and not bfile.op][-1]
except IndexError:
syslog(LOG_ERR, "Cfg: Failed to locate basefile for %s" % name)
raise PluginExecutionError, ('basefile', name)
filedata += basefile.data
- for delta in [x for x in self.deltas if metadata.Applies(x)]:
+ for delta in [delta for delta in self.fragments if delta.applies(metadata) and delta.op]:
# find applicable deltas
lines = filedata.split('\n')
if not lines[-1]:
@@ -188,15 +177,15 @@ class ConfigFileEntry(object):
lines.append(line[1:])
filedata = "\n".join(lines) + "\n"
- [entry.attrib.__setitem__(x,y) for (x,y) in self.metadata.iteritems()]
+ [entry.attrib.__setitem__(key, value) for (key, value) in self.metadata.iteritems()]
if self.paranoid:
entry.attrib['paranoid'] = 'true'
if entry.attrib['encoding'] == 'base64':
- entry.text = b2a_base64(filedata)
+ entry.text = binascii.b2a_base64(filedata)
else:
try:
entry.text = filedata
- except:
+ except exceptions.AttributeError:
syslog(LOG_ERR, "Failed to marshall file %s. Mark it as base64" % (entry.get('name')))
class Cfg(Plugin):
diff --git a/src/lib/Server/Plugins/Hostbase.py b/src/lib/Server/Plugins/Hostbase.py
index 9a488fc74..e729030fb 100644
--- a/src/lib/Server/Plugins/Hostbase.py
+++ b/src/lib/Server/Plugins/Hostbase.py
@@ -2,9 +2,12 @@
__revision__ = '$Revision$'
from syslog import syslog, LOG_INFO
-from lxml.etree import XML
+from lxml.etree import XML, SubElement
from Cheetah.Template import Template
from Bcfg2.Server.Plugin import Plugin, PluginExecutionError, PluginInitError, DirectoryBacked
+from time import strftime
+from sets import Set
+import re
class DataNexus(DirectoryBacked):
'''DataNexus is an object that watches multiple files and
@@ -39,7 +42,7 @@ class Hostbase(Plugin, DataNexus):
def __init__(self, core, datastore):
self.ready = False
- files = ['dnsdata.xml', 'hostbase.xml', 'networks.xml']
+ files = ['zones.xml', 'hostbase.xml', 'hostbase-dns.xml', 'hostbase-dhcp.xml']
Plugin.__init__(self, core, datastore)
try:
DataNexus.__init__(self, datastore + '/Hostbase/data',
@@ -49,11 +52,15 @@ class Hostbase(Plugin, DataNexus):
raise PluginInitError
self.xdata = {}
self.filedata = {}
+ self.dnsservers = ['scotty.mcs.anl.gov']
+ self.dhcpservers = ['thwap.mcs.anl.gov', 'squeak.mcs.anl.gov']
self.templates = {'zone':Template(open(self.data + '/templates/' + 'zonetemplate.tmpl').read()),
'reversesoa':Template(open(self.data + '/templates/' + 'reversesoa.tmpl').read()),
'named':Template(open(self.data + '/templates/' + 'namedtemplate.tmpl').read()),
'reverseapp':Template(open(self.data + '/templates/' + 'reverseappend.tmpl').read()),
- 'dhcp':Template(open(self.data + '/templates/' + 'dhcpd_template.tmpl').read())}
+ 'dhcp':Template(open(self.data + '/templates/' + 'dhcpd_template.tmpl').read()),
+ 'hosts':Template(open(self.data + '/templates/' + 'hosts.tmpl').read()),
+ 'hostsapp':Template(open(self.data + '/templates/' + 'hostsappend.tmpl').read())}
self.Entries['ConfigFile'] = {}
def FetchFile(self, entry, metadata):
@@ -65,8 +72,38 @@ class Hostbase(Plugin, DataNexus):
[entry.attrib.__setitem__(key, value) for (key, value) in perms.iteritems()]
entry.text = self.filedata[fname]
+ def BuildStructures(self, metadata):
+ '''Build hostbase bundle'''
+ if metadata.hostname in self.dnsservers or metadata.hostname in self.dhcpservers:
+ output = []
+ if metadata.hostname in self.dnsservers:
+ dnsbundle = XML(self.entries['hostbase-dns.xml'].data)
+ for configfile in self.Entries['ConfigFile']:
+ if re.search('/etc/bind/', configfile):
+ SubElement(dnsbundle, "ConfigFile", name=configfile)
+ output.append(dnsbundle)
+ if metadata.hostname in self.dhcpservers:
+ dhcpbundle = XML(self.entries['hostbase-dhcp.xml'].data)
+ output.append(dhcpbundle)
+ return output
+ else:
+ return []
+
def rebuildState(self, event):
'''Pre-cache all state information for hostbase config files'''
+ def get_serial(zone):
+ '''I think this does the zone file serial number hack but whatever'''
+ todaydate = (strftime('%Y%m%d'))
+ try:
+ if todaydate == zone.get('serial')[:8]:
+ serial = atoi(zone.get('serial')) + 1
+ else:
+ serial = atoi(todaydate) * 100
+ return str(serial)
+ except (KeyError):
+ serial = atoi(todaydate) * 100
+ return str(serial)
+
if self.entries.has_key(event.filename) and not self.xdata.has_key(event.filename):
self.xdata[event.filename] = XML(self.entries[event.filename].data)
if [item for item in self.files if not self.entries.has_key(item)]:
@@ -74,163 +111,293 @@ class Hostbase(Plugin, DataNexus):
# we might be able to rebuild data more sparsely,
# but hostbase.xml is the only one that will really change often
# rebuild zoneinfo
- iplist = []
- for zone in self.xdata['dnsdata.xml']:
+ hosts = {}
+ zones = self.xdata['zones.xml']
+ hostbase = self.xdata['hostbase.xml']
+ ## this now gets all hosts associated with the zone file being initialized
+ ## all ip addresses and cnames are grabbed from each host and passed to the appropriate template
+ for zone in zones:
+ hosts[zone.get('domain')] = []
+ for host in hostbase:
+ if host.get('domain') in hosts:
+ hosts[host.get('domain')].append(host)
+ for zone in zones:
zonehosts = []
- for host in [host for host in self.xdata['hostbase.xml']
- if host.get('domain') == zone.get('domain')]:
- hostname = host.get('hostname')
- if zone.get('domain') == 'mcs.anl.gov':
- ## special cases for the mcs.anl.gov domain
- ## all machines have a "-eth" entry as well as an entry identifying their subnet
- ## they also have their mail exchangers after every address
- ipnodes = host.findall("interface/ip")
- zonehosts.append((hostname, ipnodes[0].attrib['ip'], ipnodes[0].findall("name/mx"), None))
- [zonehosts.append(("-".join([hostname, ipnode.attrib['dnssuffix']]), \
- ipnode.attrib['ip'], ipnode.findall("name/mx"), None))
- for ipnode in ipnodes]
- [zonehosts.append(("-".join([hostname, namenode.attrib['name']]), \
- ipnode.attrib['ip'], namenode.findall("mx"), None))
- for ipnode in ipnodes
- for namenode in ipnode
- if namenode.attrib['name'] != ""]
- else:
- ipnodes = host.findall("interface/ip")
- zonehosts.append((host.attrib['hostname'], ipnodes[0].attrib['ip'], None, None))
- [zonehosts.append(("-".join([host.attrib['hostname'], namenode.attrib['name']]),
- ipnode.attrib['ip'], None, None))
- for ipnode in ipnodes
- for namenode in ipnode
- if namenode.attrib['name'] != ""]
-
- [zonehosts.append((host.attrib['hostname'], None, None, cnamenode.attrib['cname']))
- for cnamenode in host.findall("interface/ip/name/cname")
- if cnamenode.attrib['cname'] != ""]
-
- [iplist.append(ipnode.attrib['ip']) for ipnode in host.findall("interface/ip")]
+ for host in hosts[zone.get('domain')]:
+ hostname = host.attrib['hostname']
+ ipnodes = host.findall("interface/ip")
+ #gets all the forward look up stuff
+ [zonehosts.append((namenode.get('name').split(".")[0], ipnode.get('ip'),
+ namenode.findall('mx')))
+ for ipnode in ipnodes
+ for namenode in ipnode]
+ #gets cname stuff
+ [zonehosts.append((cnamenode.get('cname') + '.', namenode.get('name').split('.')[0], None))
+ for namenode in host.findall("interface/ip/name")
+ for cnamenode in namenode.findall("cname")
+ if (cnamenode.get('cname').split(".")[0], namenode.get('name').split('.')[0], None) not in zonehosts
+ and cnamenode.get('cname') is not None]
+
zonehosts.sort()
self.templates['zone'].zone = zone
- self.templates['zone'].root = self.xdata['dnsdata.xml']
+ self.templates['zone'].root = zones
self.templates['zone'].hosts = zonehosts
self.filedata[zone.get('domain')] = str(self.templates['zone'])
+ self.Entries['ConfigFile']["%s/%s" % (self.filepath, zone.get('domain'))] = self.FetchFile
# now all zone forward files are built
- iplist.sort()
filelist = []
- temp = None
- for x in range(len(iplist)-1):
- addressparts = iplist[x].split(".")
- if addressparts[:3] != iplist[x+1].split(".")[:3] and addressparts[:2] == iplist[x+1].split(".")[:2] \
- and ".".join([addressparts[1], addressparts[0]]) not in filelist:
- filelist.append(".".join([addressparts[1], addressparts[0]]))
- elif addressparts[:3] != iplist[x+1].split(".")[:3] and \
- addressparts[:2] != iplist[x+1].split(".")[:2] and \
- ".".join([addressparts[1], addressparts[0]]) not in filelist:
- filelist.append(".".join([addressparts[2], addressparts[1], addressparts[0]]))
- if x+1 == len(iplist) - 1:
- temp = iplist[x+1].split(".")
- if ".".join([temp[2], temp[1], temp[0]]) not in filelist \
- and ".".join([temp[1], temp[0]]) not in filelist:
- filelist.append(".".join([temp[2], temp[1], temp[0]]))
-
+ three_subnet = [ip.get('ip').rstrip('0123456789').rstrip('.')
+ for ip in hostbase.findall('host/interface/ip')]
+ three_subnet_set = Set(three_subnet)
+ two_subnet = [subnet.rstrip('0123456789').rstrip('.')
+ for subnet in three_subnet_set]
+ two_subnet_set = Set(two_subnet)
+ filelist = [each for each in two_subnet_set
+ if two_subnet.count(each) > 1]
+ [filelist.append(each) for each in three_subnet_set
+ if each.rstrip('0123456789').rstrip('.') not in filelist]
+
+ reversenames = []
for filename in filelist:
- self.templates['reversesoa'].inaddr = filename
+ towrite = filename.split('.')
+ towrite.reverse()
+ reversename = '.'.join(towrite)
+ self.templates['reversesoa'].inaddr = reversename
self.templates['reversesoa'].zone = zone
- self.templates['reversesoa'].root = self.xdata['dnsdata.xml']
- self.filedata["%s.rev" % filename] = str(self.templates['reversesoa'])
+ self.templates['reversesoa'].root = self.xdata['zones.xml']
+ self.filedata['%s.rev' % reversename] = str(self.templates['reversesoa'])
+ reversenames.append(reversename)
- self.templates['named'].zones = self.xdata['dnsdata.xml']
- self.templates['named'].reverses = filelist
+ self.templates['named'].zones = self.xdata['zones.xml']
+ self.templates['named'].reverses = reversenames
self.filedata["named.conf"] = str(self.templates['named'])
+ self.Entries['ConfigFile']["%s/%s" % (self.filepath, 'named.conf')] = self.FetchFile
- for filename in filelist:
+ reversenames.sort()
+ for filename in reversenames:
originlist = []
+ reversehosts = []
towrite = filename.split(".")
towrite.reverse()
if len(towrite) > 2:
- self.templates['reverseapp'].hosts = [(ipnode.get('ip').split('.'), host.get('hostname'),
- host.get('domain'), ipnode.get('num'), ipnode.get('dnssuffix'))
- for host in self.xdata['hostbase.xml']
- for ipnode in host.findall('interface/ip')
- if ipnode.get('ip').split('.')[:3] == towrite]
-
+ [reversehosts.append((ipnode.attrib['ip'].split("."), host.attrib['hostname'],
+ host.attrib['domain'], ipnode.get('num'), None))
+ for host in self.xdata['hostbase.xml']
+ for ipnode in host.findall("interface/ip")
+ if ipnode.attrib['ip'].split(".")[:3] == towrite]
+ self.templates['reverseapp'].hosts = reversehosts
self.templates['reverseapp'].inaddr = filename
self.templates['reverseapp'].fileorigin = None
self.filedata["%s.rev" % filename] += str(self.templates['reverseapp'])
else:
- revhosts = [(ipnode.get('ip').split('.'), host.get('hostname'), host.get('domain'),
- ipnode.get('num'), ipnode.get('dnssuffix'))
- for host in self.xdata['hostbase.xml']
- for ipnode in host.findall("interface/ip")
- if ipnode.get('ip').split(".")[:2] == towrite]
+ [reversehosts.append((ipnode.attrib['ip'].split("."), host.attrib['hostname'],
+ host.attrib['domain'], ipnode.get('num'), None))
+ for host in self.xdata['hostbase.xml']
+ for ipnode in host.findall("interface/ip")
+ if ipnode.attrib['ip'].split(".")[:2] == towrite]
[originlist.append(".".join([reversehost[0][2], reversehost[0][1], reversehost[0][0]]))
- for reversehost in revhosts
+ for reversehost in reversehosts
if ".".join([reversehost[0][2], reversehost[0][1], reversehost[0][0]]) not in originlist]
- revhosts.sort()
+ reversehosts.sort()
originlist.sort()
for origin in originlist:
- outputlist = [rhost for rhost in revhosts
- if ".".join([rhost[0][2], rhost[0][1], rhost[0][0]]) == origin]
+ outputlist = []
+ [outputlist.append(reversehost)
+ for reversehost in reversehosts
+ if ".".join([reversehost[0][2], reversehost[0][1], reversehost[0][0]]) == origin]
self.templates['reverseapp'].fileorigin = filename
self.templates['reverseapp'].hosts = outputlist
self.templates['reverseapp'].inaddr = origin
self.filedata["%s.rev" % filename] += str(self.templates['reverseapp'])
+ self.Entries['ConfigFile']["%s/%s.rev" % (self.filepath, filename)] = self.FetchFile
self.buildDHCP()
- for key in self.filedata:
- self.Entries['ConfigFile']["%s/%s" % (self.filepath, key)] = self.FetchFile
+ self.buildHosts()
+ self.buildHostsLPD()
+ self.buildPrinters()
+ self.buildNetgroups()
def buildDHCP(self):
'''Pre-build dhcpd.conf and stash in the filedata table'''
- if 'networks.xml' not in self.xdata.keys():
- print "not running before networks is cached"
- return
- networkroot = self.xdata['networks.xml']
if 'hostbase.xml' not in self.xdata.keys():
print "not running before hostbase is cached"
return
hostbase = self.xdata['hostbase.xml']
- vlanandsublist = []
- subnets = networkroot.findall("subnet")
- for vlan in networkroot.findall("vlan"):
- vlansubs = vlan.findall("subnet")
- vlansubs.sort(lambda x, y: cmp(x.get("address"), y.get("address")))
- vlanandsublist.append((vlan, vlansubs))
-
- subnets140 = [subnet for subnet in subnets if subnet.attrib['address'].split(".")[0] == "140"]
- privatesubnets = [subnet for subnet in subnets if subnet.attrib['address'].split(".")[0] != "140"]
- subnets140.sort(lambda x, y: cmp(x.get("address"), y.get("address")))
- privatesubnets.sort(lambda x, y: cmp(x.get("address"), y.get("address")))
-
- dhcphosts = [host for host in hostbase if host.get('dhcp') == 'y' \
- and host.find("interface").get('mac') != 'float' \
- and host.find("interface").get('mac') != ""]
+ dhcphosts = [host for host in hostbase if host.find('dhcp').get('dhcp') == 'y'
+ and host.find("interface").attrib['mac'] != 'float'
+ and host.find("interface").attrib['mac'] != ""
+ and host.find("interface").attrib['mac'] != "unknown"]
+ numips = 0
hosts = []
for host in dhcphosts:
if len(host.findall("interface")) == 1 and len(host.findall("interface/ip")) == 1:
- hosts.append([host.get('hostname'), host.get('domain'), \
- host.find("interface").get('mac'), \
- host.find("interface/ip").get('ip')])
- elif len(host.findall("interface")) > 1:
+ hosts.append([host.attrib['hostname'], host.attrib['domain'], \
+ host.find("interface").attrib['mac'], \
+ host.find("interface/ip").attrib['ip']])
+ else:
count = 0
- for interface in host.findall("interface"):
+ for interface in host.findall('interface'):
if count == 0 and interface.find("ip") is not None:
- hostdata = [host.get('hostname'), host.get('domain'), \
- interface.get('mac'), interface.find("ip").get('ip')]
+ hostdata = [host.attrib['hostname'], host.attrib['domain'],
+ interface.attrib['mac'], interface.find("ip").attrib['ip']]
elif count != 0 and interface.find("ip") is not None:
- hostdata = [host.get('hostname'), "-".join([host.get('domain'), str(count)]), \
- interface.get('mac'), interface.find("ip").get('ip')]
+ hostdata = [host.attrib['hostname'], "-".join([host.attrib['domain'], str(count)]),
+ interface.attrib['mac'], interface.find("ip").attrib['ip']]
if len(interface.findall("ip")) > 1:
- for ipnode in interface.findall("ip")[1:]:
- hostdata[3] = ", ".join([hostdata[3], ipnode.get('ip')])
+ for ip in interface.findall("ip")[1:]:
+ hostdata[3] = ", ".join([hostdata[3], ip.attrib['ip']])
count += 1
hosts.append(hostdata)
+
+ numips += len(host.findall("interface/ip"))
hosts.sort(lambda x, y: cmp(x[0], y[0]))
self.templates['dhcp'].hosts = hosts
- self.templates['dhcp'].privatesubnets = privatesubnets
- self.templates['dhcp'].subnets140 = subnets140
- self.templates['dhcp'].vlans = vlanandsublist
- self.templates['dhcp'].networkroot = networkroot
- self.filedata['/etc/dhcpd.conf'] = str(self.templates['dhcp'])
+ self.templates['dhcp'].numips = numips
+ self.templates['dhcp'].timecreated = strftime("%a %b %d %H:%M:%S %Z %Y")
+ self.filedata['dhcpd.conf'] = str(self.templates['dhcp'])
+ self.Entries['ConfigFile']['/etc/dhcpd.conf'] = self.FetchFile
+
+ def buildHosts(self):
+ '''This will rebuild the hosts file to include all important machines'''
+ hostbase = self.xdata['hostbase.xml']
+ domains = [host.get('domain') for host in hostbase]
+ domains_set = Set(domains)
+ domain_data = [(domain, domains.count(domain)) for domain in domains_set]
+ domain_data.sort()
+ ips = [(ip, host) for host in hostbase.findall('host')
+ for ip in host.findall("interface/ip")]
+ three_octets = [ip[0].get('ip').rstrip('0123456789').rstrip('.')
+ for ip in ips]
+ three_octets_set = list(Set(three_octets))
+ three_sort = [tuple([int(num) for num in each.split('.')]) for each in three_octets_set]
+ three_sort.sort()
+ three_octets_set = ['.'.join([str(num) for num in each]) for each in three_sort]
+ three_octets_data = [(octet, three_octets.count(octet))
+ for octet in three_octets_set]
+ append_data = [(subnet, [ip for ip in ips \
+ if ip[0].get('ip').rstrip("0123456789").rstrip('.')
+ == subnet[0]]) for subnet in three_octets_data]
+ for each in append_data:
+ each[1].sort(lambda x, y: cmp(int(x[0].get('ip').split('.')[-1]), int(y[0].get('ip').split('.')[-1])))
+ two_octets = [ip.rstrip('0123456789').rstrip('.') for ip in three_octets]
+ two_octets_set = list(Set(two_octets))
+ two_sort = [tuple([int(num) for num in each.split('.')]) for each in two_octets_set]
+ two_sort.sort()
+ two_octets_set = ['.'.join([str(num) for num in each]) for each in two_sort]
+ two_octets_data = [(octet, two_octets.count(octet)) for octet in two_octets_set]
+ self.templates['hosts'].domain_data = domain_data
+ self.templates['hosts'].three_octets_data = three_octets_data
+ self.templates['hosts'].two_octets_data = two_octets_data
+ self.templates['hosts'].three_octets = len(three_octets)
+ self.templates['hosts'].timecreated = strftime("%a %b %d %H:%M:%S %Z %Y")
+ self.filedata['hosts'] = str(self.templates['hosts'])
+ for subnet in append_data:
+ self.templates['hostsapp'].ips = subnet[1]
+ self.templates['hostsapp'].subnet = subnet[0]
+ self.filedata['hosts'] += str(self.templates['hostsapp'])
+ self.Entries['ConfigFile']['/mcs/etc/hosts'] = self.FetchFile
+
+
+ def buildPrinters(self):
+ '''this will rebuild the printers.data file used in
+ our local printers script'''
+ header = """# This file is automatically generated. DO NOT EDIT IT!
+# This datafile is for use with /mcs/bin/printers.
+#
+Name Room User Type Notes
+============== ========== ============ ======================== ====================
+"""
+
+ printers = [host for host in self.xdata['hostbase.xml']
+ if host.find('whatami').get('whatami') == "printer"
+ and host.get('domain') == 'mcs.anl.gov']
+ self.filedata['printers.data'] = header
+ output_list = []
+ for printer in printers:
+ if printer.find('printq').get('printq'):
+ for printq in re.split(',[ ]*', printer.find('printq').get('printq')):
+ output_list.append((printq, printer.find('room').get('room'), printer.find('user').get('user'),
+ printer.find('model').get('model'), printer.find('note').get('note')))
+ output_list.sort()
+ for printer in output_list:
+ self.filedata['printers.data'] += ("%-16s%-12s%-14s%-26s%s\n" % printer)
+ self.Entries['ConfigFile']['/mcs/etc/printers.data'] = self.FetchFile
+
+ def buildHostsLPD(self):
+ '''this rebuilds the hosts.lpd file'''
+ header = """+@machines
++@all-machines
+achilles.ctd.anl.gov
+raven.ops.anl.gov
+seagull.hr.anl.gov
+parrot.ops.anl.gov
+condor.ops.anl.gov
+delphi.esh.anl.gov
+anlcv1.ctd.anl.gov
+anlvms.ctd.anl.gov
+olivia.ctd.anl.gov\n\n"""
+
+ hostbase = self.xdata['hostbase.xml']
+ redmachines = [".".join([host.get('hostname'), host.get('domain')])
+ for host in hostbase if host.find('netgroup').get('netgroup') == 'red']
+ winmachines = [".".join([host.get('hostname'), host.get('domain')])
+ for host in hostbase if host.find('netgroup').get('netgroup') == 'win']
+ redmachines += [name.get('name') for host in hostbase
+ for name in host.findall('interface/ip/name')
+ if host.find('netgroup').get('netgroup') == 'red' and name.get('only') != 'no']
+ winmachines += [name.get('name') for host in hostbase
+ for name in host.findall('interface/ip/name')
+ if host.find('netgroup').get('netgroup') == 'win' and name.get('only') != 'no']
+ redmachines.sort()
+ winmachines.sort()
+ self.filedata['hosts.lpd'] = header
+ for machine in redmachines:
+ self.filedata['hosts.lpd'] += machine + "\n"
+ self.filedata['hosts.lpd'] += "\n"
+ for machine in winmachines:
+ self.filedata['hosts.lpd'] += machine + "\n"
+ self.Entries['ConfigFile']['/mcs/etc/hosts.lpd'] = self.FetchFile
+
+ def buildNetgroups(self):
+ '''this rebuilds the many different files that will eventually
+ get post processed and converted into a ypmap for netgroups'''
+ header = """###################################################################
+# This file lists hosts in the '%s' machine netgroup, it is
+# automatically generated. DO NOT EDIT THIS FILE! To update
+# the hosts in this file, edit hostbase and do a 'make nets'
+# in /mcs/adm/hostbase.
+#
+# Number of hosts in '%s' machine netgroup: %i
+#\n\n"""
+
+ netgroups = {}
+ for host in self.xdata['hostbase.xml']:
+ if host.find('netgroup').get('netgroup') == "" or host.find('netgroup').get('netgroup')== 'none':
+ continue
+ if host.find('netgroup').get('netgroup') not in netgroups:
+ netgroups.update({host.find('netgroup').get('netgroup') :
+ [".".join([host.get('hostname'), host.get('domain')])]})
+ else:
+ netgroups[host.find('netgroup').get('netgroup')].append(".".join([host.get('hostname'),
+ host.get('domain')]))
+
+ for name in host.findall('interface/ip/name'):
+ if name.get('only') != 'no':
+ netgroups[host.find('netgroup').get('netgroup')].append(name.get('name'))
+
+ for netgroup in netgroups:
+ self.filedata["%s-machines" % netgroup] = header % (netgroup, netgroup, len(netgroups[netgroup]))
+ netgroups[netgroup].sort()
+ for each in netgroups[netgroup]:
+ self.filedata["%s-machines" % netgroup] += each + "\n"
+ self.Entries['ConfigFile']["/var/yp/netgroups/%s-machines" % netgroup] = self.FetchFile
+
+ def dumpXML(self):
+ '''this just dumps the info in the hostbase.xml file to be used
+ with external programs'''
+ self.filedata['hostbase.xml'] = self.xdata['hostbase.xml']
+ self.Entries['ConfigFile']['/etc/hostbase.xml'] = self.FetchFile
+
diff --git a/src/lib/Server/Plugins/Pkgmgr.py b/src/lib/Server/Plugins/Pkgmgr.py
index 8521994e0..e77dd99e5 100644
--- a/src/lib/Server/Plugins/Pkgmgr.py
+++ b/src/lib/Server/Plugins/Pkgmgr.py
@@ -1,100 +1,54 @@
'''This module implements a package management scheme for all images'''
__revision__ = '$Revision$'
-from copy import deepcopy
-from re import compile as regcompile
+import re
from syslog import syslog, LOG_ERR
+import Bcfg2.Server.Plugin
-from Bcfg2.Server.Plugin import Plugin, PluginInitError, PluginExecutionError, DirectoryBacked, XMLFileBacked
-
-class PackageEntry(XMLFileBacked):
- '''PackageEntry is a set of packages and locations for a single image'''
- __identifier__ = 'image'
- splitters = {'rpm':regcompile('^(?P<name>[\w\+\d\.]+(-[\w\+\d\.]+)*)-' + \
+class PNode(Bcfg2.Server.Plugin.LNode):
+ '''PNode has a list of packages available at a particular group intersection'''
+ splitters = {'rpm':re.compile('^(?P<name>[\w\+\d\.]+(-[\w\+\d\.]+)*)-' + \
'(?P<version>[\w\d\.]+-([\w\d\.]+))\.(?P<arch>\w+)\.rpm$'),
- 'encap':regcompile('^(?P<name>\w+)-(?P<version>[\w\d\.-]+).encap.*$')}
-
- def __init__(self, filename):
- XMLFileBacked.__init__(self, filename)
- self.packages = {}
-
- def Index(self):
- '''Build internal data structures'''
- XMLFileBacked.Index(self)
- self.packages = {}
- for location in self.entries:
- for pkg in location.getchildren():
- if location.attrib.has_key('type'):
- pkg.set('type', location.get('type'))
- if pkg.attrib.has_key("simplefile"):
- self.packages[pkg.get('name')] = {}
- for key in pkg.attrib:
- self.packages[pkg.get('name')][key] = pkg.attrib[key]
- # most attribs will be set from pkg
- self.packages[pkg.get('name')]['url'] = "%s/%s" % (location.get('uri'), pkg.get('simplefile'))
- elif pkg.attrib.has_key("file"):
- if self.splitters.has_key(pkg.get('type')):
- mdata = self.splitters[pkg.get('type')].match(pkg.get('file'))
- if not mdata:
- syslog(LOG_ERR, "Failed to match pkg %s" % pkg.get('file'))
- continue
- pkgname = mdata.group('name')
- self.packages[pkgname] = mdata.groupdict()
- self.packages[pkgname]['url'] = location.get('uri') + '/' + pkg.get('file')
- self.packages[pkgname]['type'] = pkg.get('type')
- else:
- derived = [(ptype, self.splitters[ptype].match(pkg.get('file')).groupdict())
- for ptype in self.splitters if self.splitters[ptype].match(pkg.get('file'))]
- if not derived:
- syslog("Failed to match pkg %s" % pkg.get('file'))
- else:
- (ptype, mdata) = derived[0]
- pkgname = mdata['name']
- self.packages[pkgname] = mdata
- self.packages[pkgname]['url'] = location.get('uri') + '/' + pkg.get('file')
- self.packages[pkgname]['type'] = ptype
+ 'encap':re.compile('^(?P<name>\w+)-(?P<version>[\w\d\.-]+).encap.*$')}
+
+ def __init__(self, data, plist, parent=None):
+ # copy local attributes to all child nodes if no local attribute exists
+ for child in data.getchildren():
+ for attr in [key for key in data.attrib.keys() if key != 'name' and not child.attrib.has_key(key)]:
+ child.set(attr, data.get(attr))
+ Bcfg2.Server.Plugin.LNode.__init__(self, data, plist, parent)
+ for pkg in data.findall('./Package'):
+ if pkg.attrib.has_key('name') and pkg.get('name') not in plist:
+ plist.append(pkg.get('name'))
+ if pkg.attrib.has_key('simplefile'):
+ pkg.set('url', "%s/%s" % (pkg.get('uri'), pkg.get('simplefile')))
+ self.contents[pkg.get('name')] = pkg.attrib
+ else:
+ if pkg.attrib.has_key('file'):
+ pkg.set('url', '%s/%s' % (pkg.get('uri'), pkg.get('file')))
+ if self.splitters.has_key(pkg.get('type')):
+ mdata = self.splitters[pkg.get('type')].match(pkg.get('file'))
+ if not mdata:
+ syslog(LOG_ERR, "Pkgmgr: Failed to match pkg %s" % pkg.get('file'))
+ continue
+ pkgname = mdata.group('name')
+ self.contents[pkgname] = mdata.groupdict()
+ if pkg.attrib.get('file'):
+ self.contents[pkgname]['url'] = pkg.get('url')
+ self.contents[pkgname]['type'] = pkg.get('type')
+ if pkgname not in plist:
+ plist.append(pkgname)
else:
- self.packages[pkg.get('name')] = pkg.attrib
+ self.contents[pkg.get('name')] = pkg.attrib
-class PackageDir(DirectoryBacked):
- '''A directory of package files'''
- __child__ = PackageEntry
+class PkgSrc(Bcfg2.Server.Plugin.XMLSrc):
+ '''PkgSrc files contain a PNode hierarchy that returns matching package entries'''
+ __node__ = PNode
-class Pkgmgr(Plugin):
+class Pkgmgr(Bcfg2.Server.Plugin.XMLPrioDir):
'''This is a generator that handles package assignments'''
__name__ = 'Pkgmgr'
__version__ = '$Id$'
__author__ = 'bcfg-dev@mcs.anl.gov'
-
- def __init__(self, core, datastore):
- Plugin.__init__(self, core, datastore)
- try:
- self.pkgdir = PackageDir(self.data, self.core.fam)
- except OSError:
- self.LogError("Pkgmgr: Failed to load package indices")
- raise PluginInitError
-
- def FindHandler(self, entry):
- '''Non static mechanism of determining entry provisioning'''
- if entry.tag != 'Package':
- raise PluginExecutionError, (entry.tag, entry.get('name'))
- return self.LocatePackage
-
- def LocatePackage(self, entry, metadata):
- '''Locates a package entry for particular metadata'''
- pkgname = entry.get('name')
- if self.pkgdir.entries.has_key("%s.xml" % metadata.hostname):
- pkglist = self.pkgdir["%s.xml" % metadata.hostname]
- if pkglist.packages.has_key(pkgname):
- pkginfo = pkglist.packages[pkgname]
- [entry.attrib.__setitem__(field, pkginfo[field]) for field in pkginfo]
- return
- elif not self.pkgdir.entries.has_key("%s.xml" % metadata.image):
- self.LogError("Pkgmgr: no package index for image %s" % metadata.image)
- raise PluginExecutionError, ("Image", metadata.image)
- pkglist = self.pkgdir["%s.xml" % (metadata.image)]
- if pkglist.packages.has_key(pkgname):
- pkginfo = pkglist.packages[pkgname]
- [entry.attrib.__setitem__(x, pkginfo[x]) for x in pkginfo]
- else:
- raise PluginExecutionError, ("Package", pkgname)
+ __child__ = PkgSrc
+ __element__ = 'Package'
diff --git a/src/lib/Server/Plugins/Svcmgr.py b/src/lib/Server/Plugins/Svcmgr.py
index 2f2c7e5eb..da5ab341c 100644
--- a/src/lib/Server/Plugins/Svcmgr.py
+++ b/src/lib/Server/Plugins/Svcmgr.py
@@ -1,23 +1,20 @@
'''This generator provides service mappings'''
__revision__ = '$Revision$'
-from Bcfg2.Server.Plugin import Plugin, ScopedXMLFile, PluginInitError
+import Bcfg2.Server.Plugin
-class Svcmgr(Plugin):
+class SNode(Bcfg2.Server.Plugin.LNode):
+ '''SNode has a list of services available at a particular group intersection'''
+ __leaf__ = './Service'
+
+class SvcSrc(Bcfg2.Server.Plugin.XMLSrc):
+ '''SvcSrc files contain prioritized service definitions'''
+ __node__ = SNode
+
+class Svcmgr(Bcfg2.Server.Plugin.XMLPrioDir):
'''This is a generator that handles service assignments'''
__name__ = 'Svcmgr'
__version__ = '$Id$'
__author__ = 'bcfg-dev@mcs.anl.gov'
-
- def __init__(self, core, datastore):
- Plugin.__init__(self, core, datastore)
- try:
- self.svc = ScopedXMLFile("%s/etc/services.xml"%(datastore), self.core.fam)
- except OSError:
- self.LogError("Failed to load service definition file")
- raise PluginInitError
- self.Entries = self.svc.__provides__
-
-
-
-
+ __child__ = SvcSrc
+ __element__ = 'Service'