summaryrefslogtreecommitdiffstats
path: root/src/lib/Server
diff options
context:
space:
mode:
authorNarayan Desai <desai@mcs.anl.gov>2006-01-23 22:35:40 +0000
committerNarayan Desai <desai@mcs.anl.gov>2006-01-23 22:35:40 +0000
commitedca0b698637c3fd0a70af7e4752a46afca938d3 (patch)
tree658fad717833200ccb4e3725c811ccce7c10fc8d /src/lib/Server
parent8ca8a153dfc6bd81ede9f5cff1ee3f111ae053ee (diff)
downloadbcfg2-edca0b698637c3fd0a70af7e4752a46afca938d3.tar.gz
bcfg2-edca0b698637c3fd0a70af7e4752a46afca938d3.tar.bz2
bcfg2-edca0b698637c3fd0a70af7e4752a46afca938d3.zip
last step of repo switches
git-svn-id: https://svn.mcs.anl.gov/repos/bcfg/trunk/bcfg2@1716 ce84e21b-d406-0410-9b95-82705330c041
Diffstat (limited to 'src/lib/Server')
-rw-r--r--src/lib/Server/Component.py1
-rw-r--r--src/lib/Server/Core.py18
-rw-r--r--src/lib/Server/Metadata.py274
-rw-r--r--src/lib/Server/Plugin.py232
-rw-r--r--src/lib/Server/Plugins/Base.py59
-rw-r--r--src/lib/Server/Plugins/Bundler.py120
-rw-r--r--src/lib/Server/Plugins/Cfg.py147
-rw-r--r--src/lib/Server/Plugins/Hostbase.py391
-rw-r--r--src/lib/Server/Plugins/Pkgmgr.py128
-rw-r--r--src/lib/Server/Plugins/Svcmgr.py27
10 files changed, 751 insertions, 646 deletions
diff --git a/src/lib/Server/Component.py b/src/lib/Server/Component.py
index 97444bb10..5c19c3bdd 100644
--- a/src/lib/Server/Component.py
+++ b/src/lib/Server/Component.py
@@ -5,7 +5,6 @@ from ConfigParser import ConfigParser, NoOptionError
from cPickle import loads, dumps
from M2Crypto import SSL
from SimpleXMLRPCServer import SimpleXMLRPCRequestHandler
-from select import select
from socket import gethostname
from sys import exc_info
import sys
diff --git a/src/lib/Server/Core.py b/src/lib/Server/Core.py
index a2100ea53..91da366b8 100644
--- a/src/lib/Server/Core.py
+++ b/src/lib/Server/Core.py
@@ -11,9 +11,11 @@ from ConfigParser import ConfigParser
from lxml.etree import Element
from Bcfg2.Server.Plugin import PluginInitError, PluginExecutionError
-from Bcfg2.Server.Metadata import MetadataStore, MetadataConsistencyError
+
from Bcfg2.Server.Statistics import Statistics
+import Bcfg2.Server.Metadata
+
def log_failure(msg):
syslog(LOG_ERR, "Unexpected failure in %s" % (msg))
(trace, val, trb) = exc_info()
@@ -210,7 +212,7 @@ class Core(object):
mpath = cfile.get('server','repository')
try:
- self.metadata = MetadataStore("%s/etc/metadata.xml" % mpath, self.fam)
+ self.metadata = Bcfg2.Server.Metadata.Metadata(self.fam, mpath)
except OSError:
raise CoreInitError, "metadata path incorrect"
@@ -269,21 +271,15 @@ class Core(object):
generators = ", ".join([gen.__name__ for gen in glist])
syslog(LOG_ERR, "%s %s served by multiple generators: %s" % (entry.tag,
entry.get('name'), generators))
- raise PluginExecutionError, (entry.tag, entry.get('name'))
- else:
- for gen in self.generators:
- if hasattr(gen, "FindHandler"):
- return gen.FindHandler(entry)(entry, metadata)
- syslog(LOG_ERR, "Failed to find handler for %s:%s" % (entry.tag, entry.get('name')))
- raise PluginExecutionError, (entry.tag, entry.get('name'))
+ raise PluginExecutionError, (entry.tag, entry.get('name'))
def BuildConfiguration(self, client):
'''Build Configuration for client'''
start = time()
config = Element("Configuration", version='2.0')
try:
- meta = self.metadata.FetchMetadata(client)
- except MetadataConsistencyError:
+ meta = self.metadata.get_metadata(client)
+ except Bcfg2.Server.Metadata.MetadataConsistencyError:
syslog(LOG_ERR, "Metadata consistency error for client %s" % client)
return Element("error", type='metadata error')
diff --git a/src/lib/Server/Metadata.py b/src/lib/Server/Metadata.py
index 47bbb3ecb..ecf636476 100644
--- a/src/lib/Server/Metadata.py
+++ b/src/lib/Server/Metadata.py
@@ -1,130 +1,190 @@
'''This file stores persistent metadata for the BCFG Configuration Repository'''
__revision__ = '$Revision$'
-from lxml.etree import XML, SubElement, Element, _Comment, tostring
from syslog import syslog, LOG_ERR, LOG_INFO
-from Bcfg2.Server.Plugin import SingleXMLFileBacked
+import lxml.etree, os, time, threading
class MetadataConsistencyError(Exception):
'''This error gets raised when metadata is internally inconsistent'''
pass
-class Metadata(object):
- '''The Metadata class is a container for all classes of metadata used by Bcfg2'''
- def __init__(self, all, image, classes, bundles, attributes, hostname, toolset):
- self.all = all
- self.image = image
- self.classes = classes
+class MetadataRuntimeError(Exception):
+ '''This error is raised when the metadata engine is called prior to reading enough data'''
+ pass
+
+class ClientMetadata(object):
+ '''This object contains client metadata'''
+ def __init__(self, client, groups, bundles, toolset):
+ self.hostname = client
self.bundles = bundles
- self.attributes = attributes
- self.hostname = hostname
+ self.groups = groups
self.toolset = toolset
- def Applies(self, other):
- '''Check if metadata styled object applies to current metadata'''
- if (other.all or (other.image and (self.image == other.image)) or
- (other.classes and (other.classes in self.classes)) or
- (other.attributes and (other.attributes in self.attributes)) or
- (other.bundles and (other.bundles in self.bundles)) or
- (other.hostname and (self.hostname == other.hostname)) or
- (other.hostname and (self.hostname.split('.')[0] == other.hostname))):
- return True
- else:
- return False
-
-class Profile(object):
- '''Profiles are configuration containers for sets of classes and attributes'''
- def __init__(self, xml):
- object.__init__(self)
- self.classes = [cls.attrib['name'] for cls in xml.findall("Class")]
- self.attributes = ["%s.%s" % (attr.attrib['scope'], attr.attrib['name']) for
- attr in xml.findall("Attribute")]
-
-class MetadataStore(SingleXMLFileBacked):
- '''The MetadataStore is a filebacked xml repository that contains all setup info for all clients'''
+class Metadata:
+ '''This class contains data for bcfg2 server metadata'''
+ __name__ = 'Metadata'
+ __version__ = '$Id$'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
- def __init__(self, filename, fam):
- # initialize Index data to avoid race
- self.defaults = {}
+ def __init__(self, fam, datastore):
+ self.data = "%s/%s" % (datastore, self.__name__)
+ fam.AddMonitor("%s/%s" % (self.data, "groups.xml"), self)
+ fam.AddMonitor("%s/%s" % (self.data, "clients.xml"), self)
+ self.states = {'groups.xml':False, 'clients.xml':False}
self.clients = {}
- self.profiles = {}
- self.classes = {}
- self.images = {}
- self.element = Element("dummy")
- SingleXMLFileBacked.__init__(self, filename, fam)
-
- def Index(self):
- '''Build data structures for XML data'''
- self.element = XML(self.data)
- self.defaults = {}
- self.clients = {}
- self.profiles = {}
- self.classes = {}
- self.images = {}
- for prof in self.element.findall("Profile"):
- self.profiles[prof.attrib['name']] = Profile(prof)
- for cli in self.element.findall("Client"):
- self.clients[cli.attrib['name']] = (cli.attrib['image'], cli.attrib['profile'])
- for cls in self.element.findall("Class"):
- self.classes[cls.attrib['name']] = [bundle.attrib['name'] for bundle in cls.findall("Bundle")]
- for img in self.element.findall("Image"):
- self.images[img.attrib['name']] = img.attrib['toolset']
- for key in [key[8:] for key in self.element.attrib if key[:8] == 'default_']:
- self.defaults[key] = self.element.get("default_%s" % key)
+ self.aliases = {}
+ self.groups = {}
+ self.public = []
+ self.profiles = []
+ self.toolsets = {}
+ self.categories = {}
+ self.clientdata = None
+ self.default = None
- def FetchMetadata(self, client, image=None, profile=None):
- '''Get metadata for client'''
- if ((image != None) and (profile != None)):
- # Client asserted profile/image
- self.clients[client] = (image, profile)
- syslog(LOG_INFO, "Metadata: Asserted metadata for %s: %s, %s" % (client, image, profile))
- [self.element.remove(cli) for cli in self.element.findall("Client") if cli.get('name') == client]
- SubElement(self.element, "Client", name=client, image=image, profile=profile)
- self.WriteBack()
+ def HandleEvent(self, event):
+ '''Handle update events for data files'''
+ filename = event.filename.split('/')[-1]
+ if filename not in ['groups.xml', 'clients.xml']:
+ return
+ if event.code2str() == 'endExist':
+ return
+ try:
+ xdata = lxml.etree.parse("%s/%s" % (self.data, filename))
+ except lxml.etree.XMLSyntaxError:
+ syslog(LOG_ERR, 'Metadata: Failed to parse %s' % (filename))
+ return
+ if filename == 'clients.xml':
+ self.clients = {}
+ self.aliases = {}
+ self.clientdata = xdata
+ for client in xdata.findall('./Client'):
+ self.clients.update({client.get('name'): client.get('profile')})
+ [self.aliases.update({alias.get('name'): client.get('name')}) for alias in client.findall('Alias')]
else:
- # no asserted metadata
- if self.clients.has_key(client):
- (image, profile) = self.clients[client]
- else:
- # default profile stuff goes here
- (image, profile) = (self.defaults['image'], self.defaults['profile'])
- SubElement(self.element, "Client", name=client, profile=profile, image=image)
- self.WriteBack()
+ self.public = []
+ self.profiles = []
+ self.toolsets = {}
+ self.groups = {}
+ grouptmp = {}
+ self.categories = {}
+ for group in xdata.findall('./Group'):
+ grouptmp[group.get('name')] = tuple([[item.get('name') for item in group.findall(spec)]
+ for spec in ['./Bundle', './Group']])
+ grouptmp[group.get('name')][1].append(group.get('name'))
+ if group.get('default', 'false') == 'true':
+ self.default = group.get('name')
+ if group.get('profile', 'false') == 'true':
+ self.profiles.append(group.get('name'))
+ if group.get('public', 'false') == 'true':
+ self.public.append(group.get('name'))
+ if group.attrib.has_key('toolset'):
+ self.toolsets[group.get('name')] = group.get('toolset')
+ if group.attrib.has_key('category'):
+ self.categories[group.get('name')] = group.get('category')
+ for group in grouptmp:
+ self.groups[group] = ([], [])
+ gcategories = []
+ tocheck = [group]
+ while tocheck:
+ now = tocheck.pop()
+ if now not in self.groups[group][1]:
+ self.groups[group][1].append(now)
+ if grouptmp.has_key(now):
+ (bundles, groups) = grouptmp[now]
+ for ggg in [ggg for ggg in groups if ggg not in self.groups[group][1]]:
+ if not self.categories.has_key(ggg) or (self.categories[ggg] not in gcategories):
+ self.groups[group][1].append(ggg)
+ tocheck.append(ggg)
+ if self.categories.has_key(ggg):
+ gcategories.append(self.categories[ggg])
+ [self.groups[group][0].append(bund) for bund in bundles
+ if bund not in self.groups[group][0]]
+ self.states[filename] = True
+ if False not in self.states.values():
+ # check that all client groups are real and complete
+ real = self.groups.keys()
+ for client in self.clients.keys():
+ if self.clients[client] not in real or self.clients[client] not in self.profiles:
+ syslog(LOG_ERR, "Metadata: Client %s set as nonexistant or incomplete group %s" \
+ % (client, self.clients[client]))
+ syslog(LOG_ERR, "Metadata: Removing client mapping for %s" % (client))
+ del self.clients[client]
- if not self.profiles.has_key(profile):
- syslog(LOG_ERR, "Metadata: profile %s not defined" % profile)
- raise MetadataConsistencyError
- prof = self.profiles[profile]
- # should we uniq here? V
- bundles = reduce(lambda x, y:x + y, [self.classes.get(cls, []) for cls in prof.classes])
- if not self.images.has_key(image):
- syslog(LOG_ERR, "Metadata: Image %s not defined" % image)
+ def set_group(self, client, group):
+ '''Set group parameter for provided client'''
+ if False in self.states.values():
+ raise MetadataRuntimeError
+ if group not in self.public:
+ syslog(LOG_ERR, "Metadata: Failed to set client %s to private group %s" % (client,
+ group))
raise MetadataConsistencyError
- toolset = self.images[image]
- return Metadata(False, image, prof.classes, bundles, prof.attributes, client, toolset)
+ if self.clients.has_key(client):
+ syslog(LOG_INFO, "Metadata: Changing %s group from %s to %s" % (client,
+ self.clients[client], group))
+ cli = self.clientdata.xpath('/Clients/Client[@name="%s"]' % (client))
+ cli[0].set('group', group)
+ else:
+ lxml.etree.SubElement(self.clientdata.getroot(), 'Client', name=client, group=group)
+ self.clients[client] = group
+ self.write_back_clients()
+
+ def write_back_clients(self):
+ '''Write changes to client.xml back to disk'''
+ try:
+ datafile = open("%s/%s" % (self.data, 'clients.xml'), 'w')
+ except IOError:
+ syslog(LOG_ERR, "Metadata: Failed to write clients.xml")
+ raise MetadataRuntimeError
+ datafile.write(lxml.etree.tostring(self.clientdata))
+ datafile.close()
- def pretty_print(self, element, level=0):
- '''Produce a pretty-printed text representation of element'''
- if isinstance(element, _Comment):
- return (level * " ") + tostring(element)
- if element.text:
- fmt = "%s<%%s %%s>%%s</%%s>" % (level*" ")
- data = (element.tag, (" ".join(["%s='%s'" % (key, element.attrib[key]) for key in element.attrib])),
- element.text, element.tag)
- numchild = len(element.getchildren())
- if numchild:
- fmt = "%s<%%s %%s>\n" % (level*" ",) + (numchild * "%s") + "%s</%%s>\n" % (level*" ")
- data = (element.tag, ) + (" ".join(["%s='%s'" % (key, element.attrib[key]) for key in element.attrib]),)
- data += tuple([self.pretty_print(entry, level+2) for entry in element.getchildren()]) + (element.tag, )
+ def find_toolset(self, client):
+ '''Find the toolset for a given client'''
+ tgroups = [self.toolsets[group] for group in self.groups[client][1] if self.toolsets.has_key(group)]
+ if len(tgroups) == 1:
+ return tgroups[0]
+ elif len(tgroups) == 0:
+ syslog(LOG_ERR, "Metadata: Couldn't find toolset for client %s" % (client))
+ raise MetadataConsistencyError
else:
- fmt = "%s<%%s %%s/>\n" % (level * " ")
- data = (element.tag, " ".join(["%s='%s'" % (key, element.attrib[key]) for key in element.attrib]))
- return fmt % data
+ syslog(LOG_ERR, "Metadata: Got goofy toolset result for client %s" % (client))
+ raise MetadataConsistencyError
- def WriteBack(self):
- '''Write metadata changes back to persistent store'''
- fout = open(self.name, 'w')
- fout.write(self.pretty_print(self.element))
- fout.close()
+ def get_config_template(self, client):
+ '''Build the configuration header for a client configuration'''
+ return lxml.etree.Element("Configuration", version='2.0', toolset=self.find_toolset(client))
+ def get_metadata(self, client):
+ '''Return the metadata for a given client'''
+ if self.aliases.has_key(client):
+ client = self.aliases[client]
+ if self.clients.has_key(client):
+ [bundles, groups] = self.groups[self.clients[client]]
+ else:
+ if self.default == None:
+ syslog(LOG_ERR, "Cannot set group for client %s; no default group set" % (client))
+ raise MetadataConsistencyError
+ [bundles, groups] = self.groups[self.default]
+ toolinfo = [self.toolsets[group] for group in groups if self.toolsets.has_key(group)]
+ if len(toolinfo) > 1:
+ syslog(LOG_ERR, "Metadata: Found multiple toolsets for client %s; choosing one" % (client))
+ elif len(toolinfo) == 0:
+ syslog(LOG_ERR, "Metadata: Cannot determine toolset for client %s" % (client))
+ raise MetadataConsistencyError
+ toolset = toolinfo[0]
+ return ClientMetadata(client, groups, bundles, toolset)
+
+ def ping_sweep_clients(self):
+ '''Find live and dead clients'''
+ live = {}
+ dead = {}
+ work = self.clients.keys()
+ while work:
+ client = work.pop()
+ rc = os.system("/bin/ping -w 5 -c 1 %s > /dev/null 2>&1" % client)
+ if not rc:
+ live[client] = time.time()
+ else:
+ dead[client] = time.time()
+
diff --git a/src/lib/Server/Plugin.py b/src/lib/Server/Plugin.py
index 262d6092d..890084c98 100644
--- a/src/lib/Server/Plugin.py
+++ b/src/lib/Server/Plugin.py
@@ -1,10 +1,12 @@
'''This module provides the baseclass for Bcfg2 Server Plugins'''
-__revision__ = '$Revision:$'
+__revision__ = '$Revision$'
-from lxml.etree import XML, XMLSyntaxError, _Comment, tostring
-from os import stat
-from stat import ST_MTIME
-from syslog import syslog, LOG_ERR, LOG_INFO
+import lxml.etree
+import os
+import stat
+import syslog
+
+from lxml.etree import XML, XMLSyntaxError
class PluginInitError(Exception):
'''Error raised in cases of Plugin initialization errors'''
@@ -39,7 +41,7 @@ class Plugin(object):
def LogError(self, msg):
'''Log error message tagged with Plugin name'''
- syslog(LOG_ERR, "%s: %s" % (self.__name__, msg))
+ syslog.syslog(syslog.LOG_ERR, "%s: %s" % (self.__name__, msg))
def BuildStructures(self, metadata):
'''Build a set of structures tailored to the client metadata'''
@@ -73,15 +75,15 @@ class FileBacked(object):
'''Read file upon update'''
oldmtime = self.mtime
try:
- self.mtime = stat(self.name)[ST_MTIME]
+ self.mtime = os.stat(self.name)[stat.ST_MTIME]
except OSError:
- syslog(LOG_ERR, "Failed to stat file %s" % (self.name))
+ syslog.syslog(syslog.LOG_ERR, "Failed to stat file %s" % (self.name))
try:
self.data = file(self.name).read()
self.Index()
except IOError:
- syslog(LOG_ERR, "Failed to read file %s" % (self.name))
+ syslog.syslog(syslog.LOG_ERR, "Failed to read file %s" % (self.name))
def Index(self):
'''Update local data structures based on current file state'''
@@ -108,9 +110,9 @@ class DirectoryBacked(object):
def AddEntry(self, name):
'''Add new entry to data structures upon file creation'''
if name == '':
- syslog(LOG_INFO, "got add for empty name")
+ syslog.syslog(syslog.LOG_INFO, "got add for empty name")
elif self.entries.has_key(name):
- syslog(LOG_INFO, "got multiple adds for %s" % name)
+ syslog.syslog(syslog.LOG_INFO, "got multiple adds for %s" % name)
else:
if ((name[-1] == '~') or (name[:2] == '.#') or (name[-4:] == '.swp') or (name in ['SCCS', '.svn'])):
return
@@ -121,7 +123,7 @@ class DirectoryBacked(object):
'''Propagate fam events to underlying objects'''
action = event.code2str()
if event.filename == '':
- syslog(LOG_INFO, "Got event for blank filename")
+ syslog.syslog(syslog.LOG_INFO, "Got event for blank filename")
return
if action == 'exists':
if event.filename != self.name:
@@ -153,7 +155,7 @@ class XMLFileBacked(FileBacked):
try:
xdata = XML(self.data)
except XMLSyntaxError:
- syslog(LOG_ERR, "Failed to parse %s"%(self.name))
+ syslog.syslog(syslog.LOG_ERR, "Failed to parse %s"%(self.name))
return
self.label = xdata.attrib[self.__identifier__]
self.entries = xdata.getchildren()
@@ -167,83 +169,141 @@ class SingleXMLFileBacked(XMLFileBacked):
XMLFileBacked.__init__(self, filename)
fam.AddMonitor(filename, self)
-class ScopedXMLFile(SingleXMLFileBacked):
- '''Scoped XML files are coherent files with Metadata structured data'''
- __containers__ = ['Class', 'Host', 'Image']
-
- def __init__(self, filename, fam):
- self.store = {}
- self.__provides__ = {}
- SingleXMLFileBacked.__init__(self, filename, fam)
+class StructFile(XMLFileBacked):
+ '''This file contains a set of structure file formatting logic'''
+ def __init__(self, name):
+ XMLFileBacked.__init__(self, name)
+ self.fragments = {}
- def StoreRecord(self, metadata, entry):
- '''Store scoped record based on metadata'''
- if isinstance(entry, _Comment):
- return
- elif not entry.attrib.has_key('name'):
- syslog(LOG_ERR, "Got malformed record %s" % (tostring(entry)))
- if not self.store.has_key(entry.tag):
- self.store[entry.tag] = {}
- if not self.store[entry.tag].has_key(entry.attrib['name']):
- self.store[entry.tag][entry.attrib['name']] = []
- self.store[entry.tag][entry.attrib['name']].append((metadata, entry))
-
def Index(self):
'''Build internal data structures'''
try:
- xdata = XML(self.data)
- except XMLSyntaxError, msg:
- syslog(LOG_ERR, "Failed to parse %s"%(self.name))
- # need to add in lxml error messages, once they are supported
+ xdata = lxml.etree.XML(self.data)
+ except lxml.etree.XMLSyntaxError:
+ syslog.syslog(syslog.LOG_ERR, "Failed to parse file %s" % self.name)
return
- self.store = {}
- for entry in [ent for ent in xdata.getchildren() if not isinstance(ent, _Comment)]:
- if entry.tag not in self.__containers__:
- self.StoreRecord(('Global','all'), entry)
+ self.fragments = {}
+ work = {lambda x:True: xdata.getchildren()}
+ while work:
+ (predicate, worklist) = work.popitem()
+ self.fragments[predicate] = [item for item in worklist if item.tag != 'Group'
+ and not isinstance(item, lxml.etree._Comment)]
+ for group in [item for item in worklist if item.tag == 'Group']:
+ # if only python had forceable early-binding
+ newpred = eval("lambda x:'%s' in x.groups and predicate(x)" % (group.get('name')),
+ {'predicate':predicate})
+ work[newpred] = group.getchildren()
+
+ def Match(self, metadata):
+ '''Return matching fragments of independant'''
+ return reduce(lambda x, y:x+y, [frag for (pred, frag) in self.fragments.iteritems()
+ if pred(metadata)])
+
+class LNode:
+ '''LNodes provide lists of things available at a particular group intersection'''
+ raw = {'Client':"lambda x:'%s' == x.hostname and predicate(x)",
+ 'Group':"lambda x:'%s' in x.groups and predicate(x)"}
+ __leaf__ = './Child'
+
+ def __init__(self, data, plist, parent=None):
+ self.data = data
+ self.contents = {}
+ if parent == None:
+ self.predicate = lambda x:True
+ else:
+ predicate = parent.predicate
+ if data.tag in self.raw.keys():
+ self.predicate = eval(self.raw[data.tag] % (data.get('name')), {'predicate':predicate})
else:
- name = (entry.tag, entry.get('name'))
- [self.StoreRecord(name, child)
- for child in entry.getchildren() if not isinstance(entry, _Comment)]
- # now to build the __provides__ table
- for key in self.__provides__.keys():
- del self.__provides__[key]
- for key in self.store.keys():
- self.__provides__[key] = {}
- for name in self.store[key].keys():
- self.__provides__[key][name] = self.FetchRecord
- # also need to sort all leaf node lists
- self.store[key][name].sort(self.Sort)
-
- def Sort(self, meta1, meta2):
- '''Sort based on specificity'''
- order = ['Global', 'Image', 'Profile', 'Class', 'Host']
- return order.index(meta1[0][0]) - order.index(meta2[0][0])
-
- def MatchMetadata(self, mdata, metadata):
- '''Match internal metadata representation against metadata'''
- (mtype, mvalue) = mdata
- if mtype == 'Global':
- return True
- elif mtype == 'Profile':
- if mvalue == metadata.profile:
- return True
- elif mtype == 'Image':
- if mvalue == metadata.image:
- return True
- elif mtype == 'Class':
- if mvalue in metadata.classes:
- return True
- elif mtype == 'Host':
- if mvalue == metadata.hostname:
- return True
- return False
-
- def FetchRecord(self, entry, metadata):
- '''Build a data for specified metadata'''
- dlist = self.store[entry.tag][entry.get('name')]
- useful = [ent for ent in dlist if self.MatchMetadata(ent[0], metadata)]
- if not useful:
- syslog(LOG_ERR, "Failed to FetchRecord %s:%s"%(entry.tag, entry.get('name')))
+ print data.tag
+ raise Exception
+ mytype = self.__class__
+ self.children = [mytype(child, plist, self) for child in data.getchildren()
+ if child.tag in ['Group', 'Client']]
+ for leaf in data.findall(self.__leaf__):
+ self.contents[leaf.get('name')] = leaf.attrib
+ if leaf.get('name') not in plist:
+ plist.append(leaf.get('name'))
+
+ def Match(self, metadata, data):
+ '''Return a dictionary of package mappings'''
+ if self.predicate(metadata):
+ data.update(self.contents)
+ for child in self.children:
+ child.Match(metadata, data)
+
+class XMLSrc(XMLFileBacked):
+ '''XMLSrc files contain a LNode hierarchy that returns matching entries'''
+ __node__ = LNode
+
+ def __init__(self, filename):
+ XMLFileBacked.__init__(self, filename)
+ self.names = []
+ self.cache = None
+ self.pnode = None
+ self.priority = '1000'
+
+ def Index(self):
+ self.names = []
+ xdata = XML(self.data)
+ self.pnode = self.__node__(xdata, self.names)
+ self.cache = None
+ self.priority = xdata.attrib['priority']
+
+ def Cache(self, metadata):
+ '''Build a package dict for a given host'''
+ if self.cache == None or self.cache[0] != metadata:
+ cache = (metadata, {})
+ if self.pnode == None:
+ syslog.syslog(syslog.LOG_ERR,
+ "Cache method called early for %s; forcing data load" % (self.name))
+ self.HandleEvent()
+ return
+ self.pnode.Match(metadata, cache[1])
+ self.cache = cache
+
+class XMLPrioDir(Plugin, DirectoryBacked):
+ '''This is a generator that handles package assignments'''
+ __name__ = 'XMLPrioDir'
+ __child__ = XMLSrc
+ __element__ = 'Dummy'
+
+ def __init__(self, core, datastore):
+ Plugin.__init__(self, core, datastore)
+ self.Entries[self.__element__] = {}
+ try:
+ DirectoryBacked.__init__(self, self.data, self.core.fam)
+ except OSError:
+ self.LogError("Failed to load %s indices" % (self.__element__.lower()))
+ raise PluginInitError
+
+ def HandleEvent(self, event):
+ '''Handle events and update dispatch table'''
+ DirectoryBacked.HandleEvent(self, event)
+ for src in self.entries.values():
+ for child in src.names:
+ self.Entries[self.__element__][child] = self.BindEntry
+
+ def BindEntry(self, entry, metadata):
+ '''Check package lists of package entries'''
+ [src.Cache(metadata) for src in self.entries.values()]
+ name = entry.get('name')
+ if not src.cache:
+ self.LogError("Called before data loaded")
+ raise PluginExecutionError
+ matching = [src for src in self.entries.values()
+ if src.cache[1].has_key(name)]
+ if len(matching) == 0:
+ raise PluginExecutionError
+ elif len(matching) == 1:
+ index = 0
else:
- data = useful[-1][-1]
- [entry.attrib.__setitem__(x, data.attrib[x]) for x in data.attrib]
+ prio = [int(src.priority) for src in matching]
+ if prio.count(max(prio)) > 1:
+ self.LogError("Found multiple %s sources with same priority for %s, pkg %s" %
+ (self.__element__.lower(), metadata.hostname, entry.get('name')))
+ raise PluginExecutionError
+ index = prio.index(max(prio))
+
+ data = matching[index].cache[1][name]
+ [entry.attrib.__setitem__(key, data[key]) for key in data.keys()]
diff --git a/src/lib/Server/Plugins/Base.py b/src/lib/Server/Plugins/Base.py
index 1cdd7599c..3be30bc6a 100644
--- a/src/lib/Server/Plugins/Base.py
+++ b/src/lib/Server/Plugins/Base.py
@@ -1,62 +1,31 @@
'''This module sets up a base list of configuration entries'''
__revision__ = '$Revision$'
-from copy import deepcopy
-from lxml.etree import Element, XML, XMLSyntaxError, _Comment
+import Bcfg2.Server.Plugin
+import copy
+import lxml.etree
-from Bcfg2.Server.Plugin import Plugin, PluginInitError, SingleXMLFileBacked
-
-class Base(Plugin, SingleXMLFileBacked):
+class Base(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.DirectoryBacked):
'''This Structure is good for the pile of independent configs needed for most actual systems'''
__name__ = 'Base'
__version__ = '$Id$'
__author__ = 'bcfg-dev@mcs.anl.gov'
+ __child__ = Bcfg2.Server.Plugin.StructFile
'''base creates independent clauses based on client metadata'''
def __init__(self, core, datastore):
- Plugin.__init__(self, core, datastore)
- self.store = {'all':[], 'Class':{'all':[]}, 'Image':{'all':[]}, 'all':[]}
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ self.fragements = {}
try:
- SingleXMLFileBacked.__init__(self, "%s/etc/base.xml"%(datastore), self.core.fam)
+ Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data, self.core.fam)
except OSError:
- self.LogError("Failed to load base.xml")
- raise PluginInitError
+ self.LogError("Failed to load Base repository")
+ raise Bcfg2.Server.Plugin.PluginInitError
- def Index(self):
- '''Store XML data in reasonable structures'''
- try:
- xdata = XML(self.data)
- except XMLSyntaxError:
- self.LogError("Failed to parse base.xml")
- return
- self.store = {'all':[], 'Class':{'all':[]}, 'Image':{'all':[]}, 'all':[]}
- for entry in [ent for ent in xdata.getchildren() if not isinstance(ent, _Comment)]:
- if entry.tag in ['Image', 'Class']:
- if not self.store[entry.tag].has_key(entry.get('name')):
- self.store[entry.tag][entry.get('name')] = {'all':[], 'Class':{}, 'Image':{}}
- for child in [ent for ent in entry.getchildren() if not isinstance(ent, _Comment)]:
- if child.tag in ['Image', 'Class']:
- self.store[entry.tag][entry.get('name')][child.tag][child.get('name')] = \
- [ent for ent in child.getchildren() if \
- not isinstance(ent, _Comment)]
- else:
- self.store[entry.tag][entry.get('name')]['all'].append(child)
- else:
- self.store['all'].append(child)
-
def BuildStructures(self, metadata):
'''Build structures for client described by metadata'''
- ret = Element("Independant", version='2.0')
- [ret.append(deepcopy(entry)) for entry in self.store['all']]
- idata = self.store['Image'].get(metadata.image, {'all':[], 'Class':{}})
- for entry in idata['all']:
- ret.append(deepcopy(entry))
- for cls in metadata.classes:
- for entry in idata['Class'].get(cls, []):
- ret.append(deepcopy(entry))
- cdata = self.store['Class'].get(cls, {'all':[], 'Image':{}})
- for entry in cdata['all']:
- ret.append(deepcopy(entry))
- for entry in cdata['Image'].get(metadata.image, []):
- ret.append(deepcopy(entry))
+ ret = lxml.etree.Element("Independant", version='2.0')
+ fragments = reduce(lambda x, y: x+y,
+ [base.Match(metadata) for base in self.entries.values()])
+ [ret.append(copy.deepcopy(frag)) for frag in fragments]
return [ret]
diff --git a/src/lib/Server/Plugins/Bundler.py b/src/lib/Server/Plugins/Bundler.py
index 4b357f121..cbbb6c671 100644
--- a/src/lib/Server/Plugins/Bundler.py
+++ b/src/lib/Server/Plugins/Bundler.py
@@ -1,122 +1,36 @@
'''This provides bundle clauses with translation functionality'''
__revision__ = '$Revision$'
-from copy import deepcopy
-from syslog import LOG_ERR, syslog
-from lxml.etree import Element, XML, XMLSyntaxError, _Comment
+import Bcfg2.Server.Plugin
+import copy
+import lxml.etree
-from Bcfg2.Server.Plugin import Plugin, SingleXMLFileBacked, XMLFileBacked, DirectoryBacked
-
-
-class ImageFile(SingleXMLFileBacked):
- '''This file contains image -> system mappings'''
- def __init__(self, filename, fam):
- self.images = {}
- SingleXMLFileBacked.__init__(self, filename, fam)
-
- def Index(self):
- '''Build data structures out of the data'''
- try:
- xdata = XML(self.data)
- except XMLSyntaxError, err:
- syslog(LOG_ERR, "Failed to parse file %s" % (self.name))
- syslog(LOG_ERR, err)
- del self.data
- return
- self.images = {}
- for child in xdata.getchildren():
- [name, pkg, service] = [child.get(field) for field in ['name', 'package', 'service']]
- for grandchild in child.getchildren():
- self.images[grandchild.get('name')] = (name, pkg, service)
-
-class Bundle(XMLFileBacked):
- '''Bundles are configuration specifications (with image/translation abstraction)'''
-
- def __init__(self, filename):
- self.all = []
- self.attributes = {}
- self.systems = {}
- XMLFileBacked.__init__(self, filename)
-
- def Index(self):
- '''Build data structures from the source data'''
- try:
- xdata = XML(self.data)
- except XMLSyntaxError, err:
- syslog(LOG_ERR, "Failed to parse file %s" % (self.name))
- syslog(LOG_ERR, str(err))
- del self.data
- return
- self.all = []
- self.systems = {}
- self.attributes = {}
- for entry in [ent for ent in xdata.getchildren() if not isinstance(ent, _Comment)]:
- if entry.tag == 'System':
- self.systems[entry.attrib['name']] = [ent for ent in entry.getchildren() \
- if not isinstance(ent, _Comment)]
- elif entry.tag == 'Attribute':
- self.attributes[entry.get('name')] = [ent for ent in entry.getchildren() \
- if not isinstance(ent, _Comment)]
- else:
- self.all.append(entry)
- del self.data
-
- def BuildBundle(self, metadata, system):
- '''Build a bundle for a particular client'''
- bundlename = self.name.split('/')[-1]
- bundle = Element('Bundle', name=bundlename)
- for entry in self.all + self.systems.get(system, []):
- bundle.append(deepcopy(entry))
- for attribute in [aname for (scope, aname) in [item.split('.') for item in metadata.attributes]
- if scope == bundlename[:-4]]:
- for entry in self.attributes.get(attribute, []):
- bundle.append(deepcopy(entry))
- return bundle
-
-class BundleSet(DirectoryBacked):
- '''The Bundler handles creation of dependent clauses based on bundle definitions'''
- __child__ = Bundle
-
-class Bundler(Plugin):
+class Bundler(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.DirectoryBacked):
'''The bundler creates dependent clauses based on the bundle/translation scheme from bcfg1'''
__name__ = 'Bundler'
__version__ = '$Id$'
__author__ = 'bcfg-dev@mcs.anl.gov'
+ __child__ = Bcfg2.Server.Plugin.StructFile
def __init__(self, core, datastore):
- Plugin.__init__(self, core, datastore)
- self.imageinfo = ImageFile("%s/etc/imageinfo.xml"%(datastore), self.core.fam)
- self.bundles = BundleSet(self.data, self.core.fam)
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ try:
+ Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data, self.core.fam)
+ except OSError:
+ self.LogError("Failed to load Bundle repository")
+ raise Bcfg2.Server.Plugin.PluginInitError
def BuildStructures(self, metadata):
'''Build all structures for client (metadata)'''
- try:
- (system, package, service) = self.GetTransInfo(metadata)
- except KeyError:
- syslog(LOG_ERR, "Failed to find translation information for image %s" % metadata.image)
- return []
bundleset = []
for bundlename in metadata.bundles:
- if not self.bundles.entries.has_key("%s.xml"%(bundlename)):
- syslog(LOG_ERR, "Client %s requested nonexistent bundle %s"%(metadata.hostname, bundlename))
+ if not self.entries.has_key("%s.xml"%(bundlename)):
+ self.LogError("Client %s requested nonexistent bundle %s" % \
+ (metadata.hostname, bundlename))
continue
-
- bundle = self.bundles.entries["%s.xml" % (bundlename)].BuildBundle(metadata, system)
- # now we need to populate service/package types
- for entry in bundle.getchildren():
- if entry.tag == 'Package':
- entry.attrib['type'] = package
- elif entry.tag == 'Service':
- entry.attrib['type'] = service
+ bundle = lxml.etree.Element('Bundle', name=bundlename)
+ [bundle.append(copy.deepcopy(item))
+ for item in self.entries["%s.xml" % (bundlename)].Match(metadata)]
bundleset.append(bundle)
return bundleset
- def GetTransInfo(self, metadata):
- '''Get Translation info for metadata.image'''
- if self.imageinfo.images.has_key(metadata.image):
- return self.imageinfo.images[metadata.image]
- else:
- raise KeyError, metadata.image
-
-
-
diff --git a/src/lib/Server/Plugins/Cfg.py b/src/lib/Server/Plugins/Cfg.py
index 953401e7a..b325144e5 100644
--- a/src/lib/Server/Plugins/Cfg.py
+++ b/src/lib/Server/Plugins/Cfg.py
@@ -1,7 +1,6 @@
'''This module implements a config file repository'''
__revision__ = '$Revision$'
-from binascii import b2a_base64
from os import stat
from re import compile as regcompile
from stat import S_ISDIR, ST_MODE
@@ -9,54 +8,73 @@ from syslog import syslog, LOG_INFO, LOG_ERR
from Bcfg2.Server.Plugin import Plugin, PluginExecutionError, FileBacked
+import binascii
+import exceptions
+
+specific = regcompile('(.*/)(?P<filename>[\S\-.]+)\.((H_(?P<hostname>\S+))|' +
+ '(G(?P<prio>\d+)_(?P<group>\S+)))$')
+
+class SpecificityError(Exception):
+ '''Thrown in case of filename parse failure'''
+ pass
+
class FileEntry(FileBacked):
'''The File Entry class pertains to the config files contained in a particular directory.
This includes :info, all base files and deltas'''
-
- def __init__(self, name, all, image, classes, bundles, attribs, hostname):
+
+ def __init__(self, myid, name):
FileBacked.__init__(self, name)
- self.all = all
- self.image = image
- self.bundles = bundles
- self.classes = classes
- self.attributes = attribs
- self.hostname = hostname
+ self.name = name
+ self.identity = myid
+ self.all = False
+ self.hostname = False
+ self.group = False
+ self.op = False
+ self.prio = False
+ if name.split('.')[-1] in ['cat', 'diff']:
+ self.op = name.split('.')[-1]
+ name = name[:-(len(self.op) + 1)]
+ if self.name.split('/')[-1] == myid.split('/')[-1]:
+ self.all = True
+ else:
+ data = specific.match(name)
+ if not data:
+ syslog(LOG_ERR, "Cfg: Failed to match %s" % name)
+ raise SpecificityError
+ if data.group('hostname') != None:
+ self.hostname = data.group('hostname')
+ else:
+ self.group = data.group('group')
+ self.prio = int(data.group('prio'))
def __cmp__(self, other):
- fields = ['all', 'image', 'classes', 'bundles', 'attributes', 'hostname']
- try:
- most1 = [index for index in range(len(fields)) if getattr(self, fields[index])][0]
- except IndexError:
- most1 = 0
- try:
- most2 = [index for index in range(len(fields)) if getattr(other, fields[index])][0]
- except IndexError:
- most2 = 0
- if most1 == most2:
- if self.name.split('.')[-1] in ['cat', 'diff']:
- meta1 = self.name.split('.')[-2]
- else:
- meta1 = self.name.split('.')[-1]
- if other.name.split('.')[-1] in ['cat', 'diff']:
- meta2 = other.name.split('.')[-2]
+ data = [[getattr(self, field) for field in ['all', 'group', 'hostname']],
+ [getattr(other, field) for field in ['all', 'group', 'hostname']]]
+ for index in range(3):
+ if data[0][index] and not data[1][index]:
+ return -1
+ elif data[1][index] and not data[0][index]:
+ return 1
+ elif data[0][index] and data[1][index]:
+ if hasattr(self, 'prio') and hasattr(other, 'prio'):
+ return self.prio - other.prio
+ else:
+ return 0
else:
- meta2 = other.name.split('.')[-1]
+ pass
+ syslog(LOG_ERR, "Cfg: Critical: Ran off of the end of the world sorting %s" % (self.name))
- if meta1[0] not in ['C', 'B']:
- return 0
- # need to tiebreak with numeric prio
- prio1 = int(meta1[1:3])
- prio2 = int(meta2[1:3])
- return prio1 - prio2
+ def applies(self, metadata):
+ '''Predicate if fragment matches client metadata'''
+ if self.all or (self.hostname == metadata.hostname) or \
+ (self.group in metadata.groups):
+ return True
else:
- return most1 - most2
+ return False
class ConfigFileEntry(object):
'''ConfigFileEntry is a repository entry for a single file, containing
all data for all clients.'''
- specific = regcompile('(.*/)(?P<filename>[\S\-.]+)\.((H_(?P<hostname>\S+))|' +
- '(B(?P<bprio>\d+)_(?P<bundle>\S+))|(A(?P<aprio>\d+)_(?P<attr>\S+))|' +
- '(I_(?P<image>\S+))|(C(?P<cprio>\d+)_(?P<class>\S+)))$')
info = regcompile('^owner:(\s)*(?P<owner>\w+)|group:(\s)*(?P<group>\w+)|' +
'perms:(\s)*(?P<perms>\w+)|encoding:(\s)*(?P<encoding>\w+)|' +
'(?P<paranoid>paranoid(\s)*)$')
@@ -65,8 +83,7 @@ class ConfigFileEntry(object):
object.__init__(self)
self.path = path
self.repopath = repopath
- self.basefiles = []
- self.deltas = []
+ self.fragments = []
self.metadata = {'encoding': 'ascii', 'owner':'root', 'group':'root', 'perms':'0644'}
self.paranoid = False
@@ -94,41 +111,15 @@ class ConfigFileEntry(object):
def AddEntry(self, name):
'''add new file additions for a single cf file'''
- delta = False
- oldname = name
if name[-5:] == ':info':
return self.read_info()
- if name.split('/')[-1] == self.path.split('/')[-1]:
- self.basefiles.append(FileEntry(name, True, None, [], [], [], None))
- self.basefiles.sort()
- return
-
- if name.split('/')[-1].split('.')[-1] in ['cat']:
- delta = True
- oldname = name
- name = name[:-4]
-
- specmatch = self.specific.match(name)
- if specmatch == None:
- syslog(LOG_ERR, "Cfg: Failed to match file %s" % (name))
+ try:
+ self.fragments.append(FileEntry(self.path, name))
+ self.fragments.sort()
+ except SpecificityError:
return
- data = {}
- for item, value in specmatch.groupdict().iteritems():
- if value != None:
- data[item] = value
-
- cfile = FileEntry(oldname, False, data.get('image', None), data.get('class', []),
- data.get('bundle', []), data.get('attr', []), data.get('hostname', None))
-
- if delta:
- self.deltas.append(cfile)
- self.deltas.sort()
- else:
- self.basefiles.append(cfile)
- self.basefiles.sort()
-
def HandleEvent(self, event):
'''Handle FAM updates'''
action = event.code2str()
@@ -136,11 +127,11 @@ class ConfigFileEntry(object):
if action in ['changed', 'exists', 'created']:
return self.read_info()
if event.filename != self.path.split('/')[-1]:
- if not self.specific.match('/' + event.filename):
+ if not specific.match('/' + event.filename):
syslog(LOG_INFO, 'Cfg: Suppressing event for bogus file %s' % event.filename)
return
- entries = [entry for entry in self.basefiles + self.deltas if
+ entries = [entry for entry in self.fragments if
entry.name.split('/')[-1] == event.filename]
if len(entries) == 0:
@@ -152,10 +143,8 @@ class ConfigFileEntry(object):
syslog(LOG_INFO, "Cfg: Removing entry %s" % event.filename)
for entry in entries:
syslog(LOG_INFO, "Cfg: Removing entry %s" % (entry.name))
- if entry in self.basefiles:
- self.basefiles.remove(entry)
- if entry in self.deltas:
- self.deltas.remove(entry)
+ self.fragments.remove(entry)
+ self.fragments.sort()
syslog(LOG_INFO, "Cfg: Entry deletion completed")
elif action in ['changed', 'exists', 'created']:
[entry.HandleEvent(event) for entry in entries]
@@ -168,13 +157,13 @@ class ConfigFileEntry(object):
filedata = ""
# first find basefile
try:
- basefile = [bfile for bfile in self.basefiles if metadata.Applies(bfile)][-1]
+ basefile = [bfile for bfile in self.fragments if bfile.applies(metadata) and not bfile.op][-1]
except IndexError:
syslog(LOG_ERR, "Cfg: Failed to locate basefile for %s" % name)
raise PluginExecutionError, ('basefile', name)
filedata += basefile.data
- for delta in [x for x in self.deltas if metadata.Applies(x)]:
+ for delta in [delta for delta in self.fragments if delta.applies(metadata) and delta.op]:
# find applicable deltas
lines = filedata.split('\n')
if not lines[-1]:
@@ -188,15 +177,15 @@ class ConfigFileEntry(object):
lines.append(line[1:])
filedata = "\n".join(lines) + "\n"
- [entry.attrib.__setitem__(x,y) for (x,y) in self.metadata.iteritems()]
+ [entry.attrib.__setitem__(key, value) for (key, value) in self.metadata.iteritems()]
if self.paranoid:
entry.attrib['paranoid'] = 'true'
if entry.attrib['encoding'] == 'base64':
- entry.text = b2a_base64(filedata)
+ entry.text = binascii.b2a_base64(filedata)
else:
try:
entry.text = filedata
- except:
+ except exceptions.AttributeError:
syslog(LOG_ERR, "Failed to marshall file %s. Mark it as base64" % (entry.get('name')))
class Cfg(Plugin):
diff --git a/src/lib/Server/Plugins/Hostbase.py b/src/lib/Server/Plugins/Hostbase.py
index 9a488fc74..e729030fb 100644
--- a/src/lib/Server/Plugins/Hostbase.py
+++ b/src/lib/Server/Plugins/Hostbase.py
@@ -2,9 +2,12 @@
__revision__ = '$Revision$'
from syslog import syslog, LOG_INFO
-from lxml.etree import XML
+from lxml.etree import XML, SubElement
from Cheetah.Template import Template
from Bcfg2.Server.Plugin import Plugin, PluginExecutionError, PluginInitError, DirectoryBacked
+from time import strftime
+from sets import Set
+import re
class DataNexus(DirectoryBacked):
'''DataNexus is an object that watches multiple files and
@@ -39,7 +42,7 @@ class Hostbase(Plugin, DataNexus):
def __init__(self, core, datastore):
self.ready = False
- files = ['dnsdata.xml', 'hostbase.xml', 'networks.xml']
+ files = ['zones.xml', 'hostbase.xml', 'hostbase-dns.xml', 'hostbase-dhcp.xml']
Plugin.__init__(self, core, datastore)
try:
DataNexus.__init__(self, datastore + '/Hostbase/data',
@@ -49,11 +52,15 @@ class Hostbase(Plugin, DataNexus):
raise PluginInitError
self.xdata = {}
self.filedata = {}
+ self.dnsservers = ['scotty.mcs.anl.gov']
+ self.dhcpservers = ['thwap.mcs.anl.gov', 'squeak.mcs.anl.gov']
self.templates = {'zone':Template(open(self.data + '/templates/' + 'zonetemplate.tmpl').read()),
'reversesoa':Template(open(self.data + '/templates/' + 'reversesoa.tmpl').read()),
'named':Template(open(self.data + '/templates/' + 'namedtemplate.tmpl').read()),
'reverseapp':Template(open(self.data + '/templates/' + 'reverseappend.tmpl').read()),
- 'dhcp':Template(open(self.data + '/templates/' + 'dhcpd_template.tmpl').read())}
+ 'dhcp':Template(open(self.data + '/templates/' + 'dhcpd_template.tmpl').read()),
+ 'hosts':Template(open(self.data + '/templates/' + 'hosts.tmpl').read()),
+ 'hostsapp':Template(open(self.data + '/templates/' + 'hostsappend.tmpl').read())}
self.Entries['ConfigFile'] = {}
def FetchFile(self, entry, metadata):
@@ -65,8 +72,38 @@ class Hostbase(Plugin, DataNexus):
[entry.attrib.__setitem__(key, value) for (key, value) in perms.iteritems()]
entry.text = self.filedata[fname]
+ def BuildStructures(self, metadata):
+ '''Build hostbase bundle'''
+ if metadata.hostname in self.dnsservers or metadata.hostname in self.dhcpservers:
+ output = []
+ if metadata.hostname in self.dnsservers:
+ dnsbundle = XML(self.entries['hostbase-dns.xml'].data)
+ for configfile in self.Entries['ConfigFile']:
+ if re.search('/etc/bind/', configfile):
+ SubElement(dnsbundle, "ConfigFile", name=configfile)
+ output.append(dnsbundle)
+ if metadata.hostname in self.dhcpservers:
+ dhcpbundle = XML(self.entries['hostbase-dhcp.xml'].data)
+ output.append(dhcpbundle)
+ return output
+ else:
+ return []
+
def rebuildState(self, event):
'''Pre-cache all state information for hostbase config files'''
+ def get_serial(zone):
+ '''I think this does the zone file serial number hack but whatever'''
+ todaydate = (strftime('%Y%m%d'))
+ try:
+ if todaydate == zone.get('serial')[:8]:
+ serial = atoi(zone.get('serial')) + 1
+ else:
+ serial = atoi(todaydate) * 100
+ return str(serial)
+ except (KeyError):
+ serial = atoi(todaydate) * 100
+ return str(serial)
+
if self.entries.has_key(event.filename) and not self.xdata.has_key(event.filename):
self.xdata[event.filename] = XML(self.entries[event.filename].data)
if [item for item in self.files if not self.entries.has_key(item)]:
@@ -74,163 +111,293 @@ class Hostbase(Plugin, DataNexus):
# we might be able to rebuild data more sparsely,
# but hostbase.xml is the only one that will really change often
# rebuild zoneinfo
- iplist = []
- for zone in self.xdata['dnsdata.xml']:
+ hosts = {}
+ zones = self.xdata['zones.xml']
+ hostbase = self.xdata['hostbase.xml']
+ ## this now gets all hosts associated with the zone file being initialized
+ ## all ip addresses and cnames are grabbed from each host and passed to the appropriate template
+ for zone in zones:
+ hosts[zone.get('domain')] = []
+ for host in hostbase:
+ if host.get('domain') in hosts:
+ hosts[host.get('domain')].append(host)
+ for zone in zones:
zonehosts = []
- for host in [host for host in self.xdata['hostbase.xml']
- if host.get('domain') == zone.get('domain')]:
- hostname = host.get('hostname')
- if zone.get('domain') == 'mcs.anl.gov':
- ## special cases for the mcs.anl.gov domain
- ## all machines have a "-eth" entry as well as an entry identifying their subnet
- ## they also have their mail exchangers after every address
- ipnodes = host.findall("interface/ip")
- zonehosts.append((hostname, ipnodes[0].attrib['ip'], ipnodes[0].findall("name/mx"), None))
- [zonehosts.append(("-".join([hostname, ipnode.attrib['dnssuffix']]), \
- ipnode.attrib['ip'], ipnode.findall("name/mx"), None))
- for ipnode in ipnodes]
- [zonehosts.append(("-".join([hostname, namenode.attrib['name']]), \
- ipnode.attrib['ip'], namenode.findall("mx"), None))
- for ipnode in ipnodes
- for namenode in ipnode
- if namenode.attrib['name'] != ""]
- else:
- ipnodes = host.findall("interface/ip")
- zonehosts.append((host.attrib['hostname'], ipnodes[0].attrib['ip'], None, None))
- [zonehosts.append(("-".join([host.attrib['hostname'], namenode.attrib['name']]),
- ipnode.attrib['ip'], None, None))
- for ipnode in ipnodes
- for namenode in ipnode
- if namenode.attrib['name'] != ""]
-
- [zonehosts.append((host.attrib['hostname'], None, None, cnamenode.attrib['cname']))
- for cnamenode in host.findall("interface/ip/name/cname")
- if cnamenode.attrib['cname'] != ""]
-
- [iplist.append(ipnode.attrib['ip']) for ipnode in host.findall("interface/ip")]
+ for host in hosts[zone.get('domain')]:
+ hostname = host.attrib['hostname']
+ ipnodes = host.findall("interface/ip")
+ #gets all the forward look up stuff
+ [zonehosts.append((namenode.get('name').split(".")[0], ipnode.get('ip'),
+ namenode.findall('mx')))
+ for ipnode in ipnodes
+ for namenode in ipnode]
+ #gets cname stuff
+ [zonehosts.append((cnamenode.get('cname') + '.', namenode.get('name').split('.')[0], None))
+ for namenode in host.findall("interface/ip/name")
+ for cnamenode in namenode.findall("cname")
+ if (cnamenode.get('cname').split(".")[0], namenode.get('name').split('.')[0], None) not in zonehosts
+ and cnamenode.get('cname') is not None]
+
zonehosts.sort()
self.templates['zone'].zone = zone
- self.templates['zone'].root = self.xdata['dnsdata.xml']
+ self.templates['zone'].root = zones
self.templates['zone'].hosts = zonehosts
self.filedata[zone.get('domain')] = str(self.templates['zone'])
+ self.Entries['ConfigFile']["%s/%s" % (self.filepath, zone.get('domain'))] = self.FetchFile
# now all zone forward files are built
- iplist.sort()
filelist = []
- temp = None
- for x in range(len(iplist)-1):
- addressparts = iplist[x].split(".")
- if addressparts[:3] != iplist[x+1].split(".")[:3] and addressparts[:2] == iplist[x+1].split(".")[:2] \
- and ".".join([addressparts[1], addressparts[0]]) not in filelist:
- filelist.append(".".join([addressparts[1], addressparts[0]]))
- elif addressparts[:3] != iplist[x+1].split(".")[:3] and \
- addressparts[:2] != iplist[x+1].split(".")[:2] and \
- ".".join([addressparts[1], addressparts[0]]) not in filelist:
- filelist.append(".".join([addressparts[2], addressparts[1], addressparts[0]]))
- if x+1 == len(iplist) - 1:
- temp = iplist[x+1].split(".")
- if ".".join([temp[2], temp[1], temp[0]]) not in filelist \
- and ".".join([temp[1], temp[0]]) not in filelist:
- filelist.append(".".join([temp[2], temp[1], temp[0]]))
-
+ three_subnet = [ip.get('ip').rstrip('0123456789').rstrip('.')
+ for ip in hostbase.findall('host/interface/ip')]
+ three_subnet_set = Set(three_subnet)
+ two_subnet = [subnet.rstrip('0123456789').rstrip('.')
+ for subnet in three_subnet_set]
+ two_subnet_set = Set(two_subnet)
+ filelist = [each for each in two_subnet_set
+ if two_subnet.count(each) > 1]
+ [filelist.append(each) for each in three_subnet_set
+ if each.rstrip('0123456789').rstrip('.') not in filelist]
+
+ reversenames = []
for filename in filelist:
- self.templates['reversesoa'].inaddr = filename
+ towrite = filename.split('.')
+ towrite.reverse()
+ reversename = '.'.join(towrite)
+ self.templates['reversesoa'].inaddr = reversename
self.templates['reversesoa'].zone = zone
- self.templates['reversesoa'].root = self.xdata['dnsdata.xml']
- self.filedata["%s.rev" % filename] = str(self.templates['reversesoa'])
+ self.templates['reversesoa'].root = self.xdata['zones.xml']
+ self.filedata['%s.rev' % reversename] = str(self.templates['reversesoa'])
+ reversenames.append(reversename)
- self.templates['named'].zones = self.xdata['dnsdata.xml']
- self.templates['named'].reverses = filelist
+ self.templates['named'].zones = self.xdata['zones.xml']
+ self.templates['named'].reverses = reversenames
self.filedata["named.conf"] = str(self.templates['named'])
+ self.Entries['ConfigFile']["%s/%s" % (self.filepath, 'named.conf')] = self.FetchFile
- for filename in filelist:
+ reversenames.sort()
+ for filename in reversenames:
originlist = []
+ reversehosts = []
towrite = filename.split(".")
towrite.reverse()
if len(towrite) > 2:
- self.templates['reverseapp'].hosts = [(ipnode.get('ip').split('.'), host.get('hostname'),
- host.get('domain'), ipnode.get('num'), ipnode.get('dnssuffix'))
- for host in self.xdata['hostbase.xml']
- for ipnode in host.findall('interface/ip')
- if ipnode.get('ip').split('.')[:3] == towrite]
-
+ [reversehosts.append((ipnode.attrib['ip'].split("."), host.attrib['hostname'],
+ host.attrib['domain'], ipnode.get('num'), None))
+ for host in self.xdata['hostbase.xml']
+ for ipnode in host.findall("interface/ip")
+ if ipnode.attrib['ip'].split(".")[:3] == towrite]
+ self.templates['reverseapp'].hosts = reversehosts
self.templates['reverseapp'].inaddr = filename
self.templates['reverseapp'].fileorigin = None
self.filedata["%s.rev" % filename] += str(self.templates['reverseapp'])
else:
- revhosts = [(ipnode.get('ip').split('.'), host.get('hostname'), host.get('domain'),
- ipnode.get('num'), ipnode.get('dnssuffix'))
- for host in self.xdata['hostbase.xml']
- for ipnode in host.findall("interface/ip")
- if ipnode.get('ip').split(".")[:2] == towrite]
+ [reversehosts.append((ipnode.attrib['ip'].split("."), host.attrib['hostname'],
+ host.attrib['domain'], ipnode.get('num'), None))
+ for host in self.xdata['hostbase.xml']
+ for ipnode in host.findall("interface/ip")
+ if ipnode.attrib['ip'].split(".")[:2] == towrite]
[originlist.append(".".join([reversehost[0][2], reversehost[0][1], reversehost[0][0]]))
- for reversehost in revhosts
+ for reversehost in reversehosts
if ".".join([reversehost[0][2], reversehost[0][1], reversehost[0][0]]) not in originlist]
- revhosts.sort()
+ reversehosts.sort()
originlist.sort()
for origin in originlist:
- outputlist = [rhost for rhost in revhosts
- if ".".join([rhost[0][2], rhost[0][1], rhost[0][0]]) == origin]
+ outputlist = []
+ [outputlist.append(reversehost)
+ for reversehost in reversehosts
+ if ".".join([reversehost[0][2], reversehost[0][1], reversehost[0][0]]) == origin]
self.templates['reverseapp'].fileorigin = filename
self.templates['reverseapp'].hosts = outputlist
self.templates['reverseapp'].inaddr = origin
self.filedata["%s.rev" % filename] += str(self.templates['reverseapp'])
+ self.Entries['ConfigFile']["%s/%s.rev" % (self.filepath, filename)] = self.FetchFile
self.buildDHCP()
- for key in self.filedata:
- self.Entries['ConfigFile']["%s/%s" % (self.filepath, key)] = self.FetchFile
+ self.buildHosts()
+ self.buildHostsLPD()
+ self.buildPrinters()
+ self.buildNetgroups()
def buildDHCP(self):
'''Pre-build dhcpd.conf and stash in the filedata table'''
- if 'networks.xml' not in self.xdata.keys():
- print "not running before networks is cached"
- return
- networkroot = self.xdata['networks.xml']
if 'hostbase.xml' not in self.xdata.keys():
print "not running before hostbase is cached"
return
hostbase = self.xdata['hostbase.xml']
- vlanandsublist = []
- subnets = networkroot.findall("subnet")
- for vlan in networkroot.findall("vlan"):
- vlansubs = vlan.findall("subnet")
- vlansubs.sort(lambda x, y: cmp(x.get("address"), y.get("address")))
- vlanandsublist.append((vlan, vlansubs))
-
- subnets140 = [subnet for subnet in subnets if subnet.attrib['address'].split(".")[0] == "140"]
- privatesubnets = [subnet for subnet in subnets if subnet.attrib['address'].split(".")[0] != "140"]
- subnets140.sort(lambda x, y: cmp(x.get("address"), y.get("address")))
- privatesubnets.sort(lambda x, y: cmp(x.get("address"), y.get("address")))
-
- dhcphosts = [host for host in hostbase if host.get('dhcp') == 'y' \
- and host.find("interface").get('mac') != 'float' \
- and host.find("interface").get('mac') != ""]
+ dhcphosts = [host for host in hostbase if host.find('dhcp').get('dhcp') == 'y'
+ and host.find("interface").attrib['mac'] != 'float'
+ and host.find("interface").attrib['mac'] != ""
+ and host.find("interface").attrib['mac'] != "unknown"]
+ numips = 0
hosts = []
for host in dhcphosts:
if len(host.findall("interface")) == 1 and len(host.findall("interface/ip")) == 1:
- hosts.append([host.get('hostname'), host.get('domain'), \
- host.find("interface").get('mac'), \
- host.find("interface/ip").get('ip')])
- elif len(host.findall("interface")) > 1:
+ hosts.append([host.attrib['hostname'], host.attrib['domain'], \
+ host.find("interface").attrib['mac'], \
+ host.find("interface/ip").attrib['ip']])
+ else:
count = 0
- for interface in host.findall("interface"):
+ for interface in host.findall('interface'):
if count == 0 and interface.find("ip") is not None:
- hostdata = [host.get('hostname'), host.get('domain'), \
- interface.get('mac'), interface.find("ip").get('ip')]
+ hostdata = [host.attrib['hostname'], host.attrib['domain'],
+ interface.attrib['mac'], interface.find("ip").attrib['ip']]
elif count != 0 and interface.find("ip") is not None:
- hostdata = [host.get('hostname'), "-".join([host.get('domain'), str(count)]), \
- interface.get('mac'), interface.find("ip").get('ip')]
+ hostdata = [host.attrib['hostname'], "-".join([host.attrib['domain'], str(count)]),
+ interface.attrib['mac'], interface.find("ip").attrib['ip']]
if len(interface.findall("ip")) > 1:
- for ipnode in interface.findall("ip")[1:]:
- hostdata[3] = ", ".join([hostdata[3], ipnode.get('ip')])
+ for ip in interface.findall("ip")[1:]:
+ hostdata[3] = ", ".join([hostdata[3], ip.attrib['ip']])
count += 1
hosts.append(hostdata)
+
+ numips += len(host.findall("interface/ip"))
hosts.sort(lambda x, y: cmp(x[0], y[0]))
self.templates['dhcp'].hosts = hosts
- self.templates['dhcp'].privatesubnets = privatesubnets
- self.templates['dhcp'].subnets140 = subnets140
- self.templates['dhcp'].vlans = vlanandsublist
- self.templates['dhcp'].networkroot = networkroot
- self.filedata['/etc/dhcpd.conf'] = str(self.templates['dhcp'])
+ self.templates['dhcp'].numips = numips
+ self.templates['dhcp'].timecreated = strftime("%a %b %d %H:%M:%S %Z %Y")
+ self.filedata['dhcpd.conf'] = str(self.templates['dhcp'])
+ self.Entries['ConfigFile']['/etc/dhcpd.conf'] = self.FetchFile
+
+ def buildHosts(self):
+ '''This will rebuild the hosts file to include all important machines'''
+ hostbase = self.xdata['hostbase.xml']
+ domains = [host.get('domain') for host in hostbase]
+ domains_set = Set(domains)
+ domain_data = [(domain, domains.count(domain)) for domain in domains_set]
+ domain_data.sort()
+ ips = [(ip, host) for host in hostbase.findall('host')
+ for ip in host.findall("interface/ip")]
+ three_octets = [ip[0].get('ip').rstrip('0123456789').rstrip('.')
+ for ip in ips]
+ three_octets_set = list(Set(three_octets))
+ three_sort = [tuple([int(num) for num in each.split('.')]) for each in three_octets_set]
+ three_sort.sort()
+ three_octets_set = ['.'.join([str(num) for num in each]) for each in three_sort]
+ three_octets_data = [(octet, three_octets.count(octet))
+ for octet in three_octets_set]
+ append_data = [(subnet, [ip for ip in ips \
+ if ip[0].get('ip').rstrip("0123456789").rstrip('.')
+ == subnet[0]]) for subnet in three_octets_data]
+ for each in append_data:
+ each[1].sort(lambda x, y: cmp(int(x[0].get('ip').split('.')[-1]), int(y[0].get('ip').split('.')[-1])))
+ two_octets = [ip.rstrip('0123456789').rstrip('.') for ip in three_octets]
+ two_octets_set = list(Set(two_octets))
+ two_sort = [tuple([int(num) for num in each.split('.')]) for each in two_octets_set]
+ two_sort.sort()
+ two_octets_set = ['.'.join([str(num) for num in each]) for each in two_sort]
+ two_octets_data = [(octet, two_octets.count(octet)) for octet in two_octets_set]
+ self.templates['hosts'].domain_data = domain_data
+ self.templates['hosts'].three_octets_data = three_octets_data
+ self.templates['hosts'].two_octets_data = two_octets_data
+ self.templates['hosts'].three_octets = len(three_octets)
+ self.templates['hosts'].timecreated = strftime("%a %b %d %H:%M:%S %Z %Y")
+ self.filedata['hosts'] = str(self.templates['hosts'])
+ for subnet in append_data:
+ self.templates['hostsapp'].ips = subnet[1]
+ self.templates['hostsapp'].subnet = subnet[0]
+ self.filedata['hosts'] += str(self.templates['hostsapp'])
+ self.Entries['ConfigFile']['/mcs/etc/hosts'] = self.FetchFile
+
+
+ def buildPrinters(self):
+ '''this will rebuild the printers.data file used in
+ our local printers script'''
+ header = """# This file is automatically generated. DO NOT EDIT IT!
+# This datafile is for use with /mcs/bin/printers.
+#
+Name Room User Type Notes
+============== ========== ============ ======================== ====================
+"""
+
+ printers = [host for host in self.xdata['hostbase.xml']
+ if host.find('whatami').get('whatami') == "printer"
+ and host.get('domain') == 'mcs.anl.gov']
+ self.filedata['printers.data'] = header
+ output_list = []
+ for printer in printers:
+ if printer.find('printq').get('printq'):
+ for printq in re.split(',[ ]*', printer.find('printq').get('printq')):
+ output_list.append((printq, printer.find('room').get('room'), printer.find('user').get('user'),
+ printer.find('model').get('model'), printer.find('note').get('note')))
+ output_list.sort()
+ for printer in output_list:
+ self.filedata['printers.data'] += ("%-16s%-12s%-14s%-26s%s\n" % printer)
+ self.Entries['ConfigFile']['/mcs/etc/printers.data'] = self.FetchFile
+
+ def buildHostsLPD(self):
+ '''this rebuilds the hosts.lpd file'''
+ header = """+@machines
++@all-machines
+achilles.ctd.anl.gov
+raven.ops.anl.gov
+seagull.hr.anl.gov
+parrot.ops.anl.gov
+condor.ops.anl.gov
+delphi.esh.anl.gov
+anlcv1.ctd.anl.gov
+anlvms.ctd.anl.gov
+olivia.ctd.anl.gov\n\n"""
+
+ hostbase = self.xdata['hostbase.xml']
+ redmachines = [".".join([host.get('hostname'), host.get('domain')])
+ for host in hostbase if host.find('netgroup').get('netgroup') == 'red']
+ winmachines = [".".join([host.get('hostname'), host.get('domain')])
+ for host in hostbase if host.find('netgroup').get('netgroup') == 'win']
+ redmachines += [name.get('name') for host in hostbase
+ for name in host.findall('interface/ip/name')
+ if host.find('netgroup').get('netgroup') == 'red' and name.get('only') != 'no']
+ winmachines += [name.get('name') for host in hostbase
+ for name in host.findall('interface/ip/name')
+ if host.find('netgroup').get('netgroup') == 'win' and name.get('only') != 'no']
+ redmachines.sort()
+ winmachines.sort()
+ self.filedata['hosts.lpd'] = header
+ for machine in redmachines:
+ self.filedata['hosts.lpd'] += machine + "\n"
+ self.filedata['hosts.lpd'] += "\n"
+ for machine in winmachines:
+ self.filedata['hosts.lpd'] += machine + "\n"
+ self.Entries['ConfigFile']['/mcs/etc/hosts.lpd'] = self.FetchFile
+
+ def buildNetgroups(self):
+ '''this rebuilds the many different files that will eventually
+ get post processed and converted into a ypmap for netgroups'''
+ header = """###################################################################
+# This file lists hosts in the '%s' machine netgroup, it is
+# automatically generated. DO NOT EDIT THIS FILE! To update
+# the hosts in this file, edit hostbase and do a 'make nets'
+# in /mcs/adm/hostbase.
+#
+# Number of hosts in '%s' machine netgroup: %i
+#\n\n"""
+
+ netgroups = {}
+ for host in self.xdata['hostbase.xml']:
+ if host.find('netgroup').get('netgroup') == "" or host.find('netgroup').get('netgroup')== 'none':
+ continue
+ if host.find('netgroup').get('netgroup') not in netgroups:
+ netgroups.update({host.find('netgroup').get('netgroup') :
+ [".".join([host.get('hostname'), host.get('domain')])]})
+ else:
+ netgroups[host.find('netgroup').get('netgroup')].append(".".join([host.get('hostname'),
+ host.get('domain')]))
+
+ for name in host.findall('interface/ip/name'):
+ if name.get('only') != 'no':
+ netgroups[host.find('netgroup').get('netgroup')].append(name.get('name'))
+
+ for netgroup in netgroups:
+ self.filedata["%s-machines" % netgroup] = header % (netgroup, netgroup, len(netgroups[netgroup]))
+ netgroups[netgroup].sort()
+ for each in netgroups[netgroup]:
+ self.filedata["%s-machines" % netgroup] += each + "\n"
+ self.Entries['ConfigFile']["/var/yp/netgroups/%s-machines" % netgroup] = self.FetchFile
+
+ def dumpXML(self):
+ '''this just dumps the info in the hostbase.xml file to be used
+ with external programs'''
+ self.filedata['hostbase.xml'] = self.xdata['hostbase.xml']
+ self.Entries['ConfigFile']['/etc/hostbase.xml'] = self.FetchFile
+
diff --git a/src/lib/Server/Plugins/Pkgmgr.py b/src/lib/Server/Plugins/Pkgmgr.py
index 8521994e0..e77dd99e5 100644
--- a/src/lib/Server/Plugins/Pkgmgr.py
+++ b/src/lib/Server/Plugins/Pkgmgr.py
@@ -1,100 +1,54 @@
'''This module implements a package management scheme for all images'''
__revision__ = '$Revision$'
-from copy import deepcopy
-from re import compile as regcompile
+import re
from syslog import syslog, LOG_ERR
+import Bcfg2.Server.Plugin
-from Bcfg2.Server.Plugin import Plugin, PluginInitError, PluginExecutionError, DirectoryBacked, XMLFileBacked
-
-class PackageEntry(XMLFileBacked):
- '''PackageEntry is a set of packages and locations for a single image'''
- __identifier__ = 'image'
- splitters = {'rpm':regcompile('^(?P<name>[\w\+\d\.]+(-[\w\+\d\.]+)*)-' + \
+class PNode(Bcfg2.Server.Plugin.LNode):
+ '''PNode has a list of packages available at a particular group intersection'''
+ splitters = {'rpm':re.compile('^(?P<name>[\w\+\d\.]+(-[\w\+\d\.]+)*)-' + \
'(?P<version>[\w\d\.]+-([\w\d\.]+))\.(?P<arch>\w+)\.rpm$'),
- 'encap':regcompile('^(?P<name>\w+)-(?P<version>[\w\d\.-]+).encap.*$')}
-
- def __init__(self, filename):
- XMLFileBacked.__init__(self, filename)
- self.packages = {}
-
- def Index(self):
- '''Build internal data structures'''
- XMLFileBacked.Index(self)
- self.packages = {}
- for location in self.entries:
- for pkg in location.getchildren():
- if location.attrib.has_key('type'):
- pkg.set('type', location.get('type'))
- if pkg.attrib.has_key("simplefile"):
- self.packages[pkg.get('name')] = {}
- for key in pkg.attrib:
- self.packages[pkg.get('name')][key] = pkg.attrib[key]
- # most attribs will be set from pkg
- self.packages[pkg.get('name')]['url'] = "%s/%s" % (location.get('uri'), pkg.get('simplefile'))
- elif pkg.attrib.has_key("file"):
- if self.splitters.has_key(pkg.get('type')):
- mdata = self.splitters[pkg.get('type')].match(pkg.get('file'))
- if not mdata:
- syslog(LOG_ERR, "Failed to match pkg %s" % pkg.get('file'))
- continue
- pkgname = mdata.group('name')
- self.packages[pkgname] = mdata.groupdict()
- self.packages[pkgname]['url'] = location.get('uri') + '/' + pkg.get('file')
- self.packages[pkgname]['type'] = pkg.get('type')
- else:
- derived = [(ptype, self.splitters[ptype].match(pkg.get('file')).groupdict())
- for ptype in self.splitters if self.splitters[ptype].match(pkg.get('file'))]
- if not derived:
- syslog("Failed to match pkg %s" % pkg.get('file'))
- else:
- (ptype, mdata) = derived[0]
- pkgname = mdata['name']
- self.packages[pkgname] = mdata
- self.packages[pkgname]['url'] = location.get('uri') + '/' + pkg.get('file')
- self.packages[pkgname]['type'] = ptype
+ 'encap':re.compile('^(?P<name>\w+)-(?P<version>[\w\d\.-]+).encap.*$')}
+
+ def __init__(self, data, plist, parent=None):
+ # copy local attributes to all child nodes if no local attribute exists
+ for child in data.getchildren():
+ for attr in [key for key in data.attrib.keys() if key != 'name' and not child.attrib.has_key(key)]:
+ child.set(attr, data.get(attr))
+ Bcfg2.Server.Plugin.LNode.__init__(self, data, plist, parent)
+ for pkg in data.findall('./Package'):
+ if pkg.attrib.has_key('name') and pkg.get('name') not in plist:
+ plist.append(pkg.get('name'))
+ if pkg.attrib.has_key('simplefile'):
+ pkg.set('url', "%s/%s" % (pkg.get('uri'), pkg.get('simplefile')))
+ self.contents[pkg.get('name')] = pkg.attrib
+ else:
+ if pkg.attrib.has_key('file'):
+ pkg.set('url', '%s/%s' % (pkg.get('uri'), pkg.get('file')))
+ if self.splitters.has_key(pkg.get('type')):
+ mdata = self.splitters[pkg.get('type')].match(pkg.get('file'))
+ if not mdata:
+ syslog(LOG_ERR, "Pkgmgr: Failed to match pkg %s" % pkg.get('file'))
+ continue
+ pkgname = mdata.group('name')
+ self.contents[pkgname] = mdata.groupdict()
+ if pkg.attrib.get('file'):
+ self.contents[pkgname]['url'] = pkg.get('url')
+ self.contents[pkgname]['type'] = pkg.get('type')
+ if pkgname not in plist:
+ plist.append(pkgname)
else:
- self.packages[pkg.get('name')] = pkg.attrib
+ self.contents[pkg.get('name')] = pkg.attrib
-class PackageDir(DirectoryBacked):
- '''A directory of package files'''
- __child__ = PackageEntry
+class PkgSrc(Bcfg2.Server.Plugin.XMLSrc):
+ '''PkgSrc files contain a PNode hierarchy that returns matching package entries'''
+ __node__ = PNode
-class Pkgmgr(Plugin):
+class Pkgmgr(Bcfg2.Server.Plugin.XMLPrioDir):
'''This is a generator that handles package assignments'''
__name__ = 'Pkgmgr'
__version__ = '$Id$'
__author__ = 'bcfg-dev@mcs.anl.gov'
-
- def __init__(self, core, datastore):
- Plugin.__init__(self, core, datastore)
- try:
- self.pkgdir = PackageDir(self.data, self.core.fam)
- except OSError:
- self.LogError("Pkgmgr: Failed to load package indices")
- raise PluginInitError
-
- def FindHandler(self, entry):
- '''Non static mechanism of determining entry provisioning'''
- if entry.tag != 'Package':
- raise PluginExecutionError, (entry.tag, entry.get('name'))
- return self.LocatePackage
-
- def LocatePackage(self, entry, metadata):
- '''Locates a package entry for particular metadata'''
- pkgname = entry.get('name')
- if self.pkgdir.entries.has_key("%s.xml" % metadata.hostname):
- pkglist = self.pkgdir["%s.xml" % metadata.hostname]
- if pkglist.packages.has_key(pkgname):
- pkginfo = pkglist.packages[pkgname]
- [entry.attrib.__setitem__(field, pkginfo[field]) for field in pkginfo]
- return
- elif not self.pkgdir.entries.has_key("%s.xml" % metadata.image):
- self.LogError("Pkgmgr: no package index for image %s" % metadata.image)
- raise PluginExecutionError, ("Image", metadata.image)
- pkglist = self.pkgdir["%s.xml" % (metadata.image)]
- if pkglist.packages.has_key(pkgname):
- pkginfo = pkglist.packages[pkgname]
- [entry.attrib.__setitem__(x, pkginfo[x]) for x in pkginfo]
- else:
- raise PluginExecutionError, ("Package", pkgname)
+ __child__ = PkgSrc
+ __element__ = 'Package'
diff --git a/src/lib/Server/Plugins/Svcmgr.py b/src/lib/Server/Plugins/Svcmgr.py
index 2f2c7e5eb..da5ab341c 100644
--- a/src/lib/Server/Plugins/Svcmgr.py
+++ b/src/lib/Server/Plugins/Svcmgr.py
@@ -1,23 +1,20 @@
'''This generator provides service mappings'''
__revision__ = '$Revision$'
-from Bcfg2.Server.Plugin import Plugin, ScopedXMLFile, PluginInitError
+import Bcfg2.Server.Plugin
-class Svcmgr(Plugin):
+class SNode(Bcfg2.Server.Plugin.LNode):
+ '''SNode has a list of services available at a particular group intersection'''
+ __leaf__ = './Service'
+
+class SvcSrc(Bcfg2.Server.Plugin.XMLSrc):
+ '''SvcSrc files contain prioritized service definitions'''
+ __node__ = SNode
+
+class Svcmgr(Bcfg2.Server.Plugin.XMLPrioDir):
'''This is a generator that handles service assignments'''
__name__ = 'Svcmgr'
__version__ = '$Id$'
__author__ = 'bcfg-dev@mcs.anl.gov'
-
- def __init__(self, core, datastore):
- Plugin.__init__(self, core, datastore)
- try:
- self.svc = ScopedXMLFile("%s/etc/services.xml"%(datastore), self.core.fam)
- except OSError:
- self.LogError("Failed to load service definition file")
- raise PluginInitError
- self.Entries = self.svc.__provides__
-
-
-
-
+ __child__ = SvcSrc
+ __element__ = 'Service'