summaryrefslogtreecommitdiffstats
path: root/build/lib
diff options
context:
space:
mode:
authorFabian Affolter <fabian@bernewireless.net>2010-12-08 23:27:15 +0100
committerFabian Affolter <fabian@bernewireless.net>2010-12-08 23:27:15 +0100
commite833a7a76b231cd346f09c9a422ecb855d1cc6b4 (patch)
treee2762fc31e1162a6859dbd6cd7b5c28208296d9d /build/lib
parentbaf5854f414c2a35887e3fbbcfd094e12e7c6ee7 (diff)
downloadbcfg2-e833a7a76b231cd346f09c9a422ecb855d1cc6b4.tar.gz
bcfg2-e833a7a76b231cd346f09c9a422ecb855d1cc6b4.tar.bz2
bcfg2-e833a7a76b231cd346f09c9a422ecb855d1cc6b4.zip
Merge with upstream
Diffstat (limited to 'build/lib')
-rw-r--r--build/lib/Bcfg2/Client/Frame.py418
-rw-r--r--build/lib/Bcfg2/Client/Tools/APT.py218
-rw-r--r--build/lib/Bcfg2/Client/Tools/Action.py83
-rw-r--r--build/lib/Bcfg2/Client/Tools/Blast.py34
-rw-r--r--build/lib/Bcfg2/Client/Tools/Chkconfig.py101
-rw-r--r--build/lib/Bcfg2/Client/Tools/DebInit.py117
-rw-r--r--build/lib/Bcfg2/Client/Tools/Encap.py55
-rw-r--r--build/lib/Bcfg2/Client/Tools/FreeBSDInit.py27
-rw-r--r--build/lib/Bcfg2/Client/Tools/FreeBSDPackage.py47
-rw-r--r--build/lib/Bcfg2/Client/Tools/IPS.py61
-rw-r--r--build/lib/Bcfg2/Client/Tools/MacPorts.py60
-rw-r--r--build/lib/Bcfg2/Client/Tools/POSIX.py766
-rw-r--r--build/lib/Bcfg2/Client/Tools/Pacman.py82
-rw-r--r--build/lib/Bcfg2/Client/Tools/Portage.py71
-rw-r--r--build/lib/Bcfg2/Client/Tools/RPMng.py1034
-rw-r--r--build/lib/Bcfg2/Client/Tools/RcUpdate.py93
-rw-r--r--build/lib/Bcfg2/Client/Tools/SMF.py132
-rw-r--r--build/lib/Bcfg2/Client/Tools/SYSV.py107
-rw-r--r--build/lib/Bcfg2/Client/Tools/Upstart.py78
-rw-r--r--build/lib/Bcfg2/Client/Tools/YUMng.py848
-rw-r--r--build/lib/Bcfg2/Client/Tools/__init__.py370
-rw-r--r--build/lib/Bcfg2/Client/Tools/launchd.py131
-rw-r--r--build/lib/Bcfg2/Client/Tools/rpmtools.py1115
-rw-r--r--build/lib/Bcfg2/Client/XML.py37
-rw-r--r--build/lib/Bcfg2/Client/__init__.py4
-rw-r--r--build/lib/Bcfg2/Component.py277
-rw-r--r--build/lib/Bcfg2/Logger.py233
-rw-r--r--build/lib/Bcfg2/Options.py334
-rw-r--r--build/lib/Bcfg2/Proxy.py316
-rw-r--r--build/lib/Bcfg2/SSLServer.py416
-rw-r--r--build/lib/Bcfg2/Server/Admin/Backup.py33
-rw-r--r--build/lib/Bcfg2/Server/Admin/Bundle.py100
-rw-r--r--build/lib/Bcfg2/Server/Admin/Client.py64
-rw-r--r--build/lib/Bcfg2/Server/Admin/Compare.py137
-rw-r--r--build/lib/Bcfg2/Server/Admin/Examples.py71
-rw-r--r--build/lib/Bcfg2/Server/Admin/Group.py66
-rw-r--r--build/lib/Bcfg2/Server/Admin/Init.py280
-rw-r--r--build/lib/Bcfg2/Server/Admin/Minestruct.py69
-rw-r--r--build/lib/Bcfg2/Server/Admin/Perf.py37
-rw-r--r--build/lib/Bcfg2/Server/Admin/Pull.py138
-rw-r--r--build/lib/Bcfg2/Server/Admin/Query.py78
-rw-r--r--build/lib/Bcfg2/Server/Admin/Reports.py357
-rw-r--r--build/lib/Bcfg2/Server/Admin/Snapshots.py163
-rw-r--r--build/lib/Bcfg2/Server/Admin/Tidy.py66
-rw-r--r--build/lib/Bcfg2/Server/Admin/Viz.py101
-rw-r--r--build/lib/Bcfg2/Server/Admin/Web.py79
-rw-r--r--build/lib/Bcfg2/Server/Admin/Xcmd.py49
-rw-r--r--build/lib/Bcfg2/Server/Admin/__init__.py114
-rw-r--r--build/lib/Bcfg2/Server/Admin/test.py73
-rw-r--r--build/lib/Bcfg2/Server/Core.py439
-rw-r--r--build/lib/Bcfg2/Server/FileMonitor.py307
-rw-r--r--build/lib/Bcfg2/Server/Hostbase/__init__.py0
-rw-r--r--build/lib/Bcfg2/Server/Hostbase/backends.py68
-rw-r--r--build/lib/Bcfg2/Server/Hostbase/hostbase/__init__.py0
-rw-r--r--build/lib/Bcfg2/Server/Hostbase/hostbase/admin.py15
-rw-r--r--build/lib/Bcfg2/Server/Hostbase/hostbase/models.py210
-rw-r--r--build/lib/Bcfg2/Server/Hostbase/hostbase/urls.py68
-rw-r--r--build/lib/Bcfg2/Server/Hostbase/hostbase/views.py972
-rw-r--r--build/lib/Bcfg2/Server/Hostbase/ldapauth.py172
-rw-r--r--build/lib/Bcfg2/Server/Hostbase/manage.py11
-rw-r--r--build/lib/Bcfg2/Server/Hostbase/nisauth.py42
-rw-r--r--build/lib/Bcfg2/Server/Hostbase/regex.py6
-rw-r--r--build/lib/Bcfg2/Server/Hostbase/settings.py142
-rw-r--r--build/lib/Bcfg2/Server/Hostbase/urls.py27
-rw-r--r--build/lib/Bcfg2/Server/Plugin.py886
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Account.py93
-rw-r--r--build/lib/Bcfg2/Server/Plugins/BB.py84
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Base.py38
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Bundler.py76
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Bzr.py36
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Cfg.py165
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Cvs.py47
-rw-r--r--build/lib/Bcfg2/Server/Plugins/DBStats.py110
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Darcs.py49
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Decisions.py64
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Deps.py103
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Editor.py73
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Fossil.py52
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Git.py45
-rw-r--r--build/lib/Bcfg2/Server/Plugins/GroupPatterns.py117
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Guppy.py63
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Hg.py47
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Hostbase.py585
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Metadata.py809
-rw-r--r--build/lib/Bcfg2/Server/Plugins/NagiosGen.py114
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Ohai.py79
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Packages.py869
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Pkgmgr.py155
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Probes.py150
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Properties.py37
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Rules.py11
-rw-r--r--build/lib/Bcfg2/Server/Plugins/SGenshi.py76
-rw-r--r--build/lib/Bcfg2/Server/Plugins/SSHbase.py279
-rw-r--r--build/lib/Bcfg2/Server/Plugins/SSLCA.py239
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Snapshots.py130
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Statistics.py161
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Svcmgr.py12
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Svn.py46
-rw-r--r--build/lib/Bcfg2/Server/Plugins/TCheetah.py78
-rw-r--r--build/lib/Bcfg2/Server/Plugins/TGenshi.py126
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Trigger.py37
-rw-r--r--build/lib/Bcfg2/Server/Plugins/Web.py47
-rw-r--r--build/lib/Bcfg2/Server/Plugins/__Web.py47
-rw-r--r--build/lib/Bcfg2/Server/Plugins/__init__.py35
-rw-r--r--build/lib/Bcfg2/Server/Reports/__init__.py1
-rw-r--r--build/lib/Bcfg2/Server/Reports/backends.py35
-rw-r--r--build/lib/Bcfg2/Server/Reports/importscript.py270
-rw-r--r--build/lib/Bcfg2/Server/Reports/manage.py11
-rw-r--r--build/lib/Bcfg2/Server/Reports/nisauth.py43
-rw-r--r--build/lib/Bcfg2/Server/Reports/reports/__init__.py1
-rw-r--r--build/lib/Bcfg2/Server/Reports/reports/fixtures/initial_version.xml35
-rw-r--r--build/lib/Bcfg2/Server/Reports/reports/models.py328
-rw-r--r--build/lib/Bcfg2/Server/Reports/reports/templates/404.html8
-rw-r--r--build/lib/Bcfg2/Server/Reports/reports/templates/base-timeview.html25
-rw-r--r--build/lib/Bcfg2/Server/Reports/reports/templates/base.html95
-rw-r--r--build/lib/Bcfg2/Server/Reports/reports/templates/clients/detail.html127
-rw-r--r--build/lib/Bcfg2/Server/Reports/reports/templates/clients/detailed-list.html46
-rw-r--r--build/lib/Bcfg2/Server/Reports/reports/templates/clients/history.html20
-rw-r--r--build/lib/Bcfg2/Server/Reports/reports/templates/clients/index.html34
-rw-r--r--build/lib/Bcfg2/Server/Reports/reports/templates/clients/manage.html45
-rw-r--r--build/lib/Bcfg2/Server/Reports/reports/templates/config_items/item.html109
-rw-r--r--build/lib/Bcfg2/Server/Reports/reports/templates/config_items/listing.html33
-rw-r--r--build/lib/Bcfg2/Server/Reports/reports/templates/displays/summary.html42
-rw-r--r--build/lib/Bcfg2/Server/Reports/reports/templates/displays/timing.html38
-rw-r--r--build/lib/Bcfg2/Server/Reports/reports/templates/widgets/filter_bar.html13
-rw-r--r--build/lib/Bcfg2/Server/Reports/reports/templates/widgets/interaction_list.inc38
-rw-r--r--build/lib/Bcfg2/Server/Reports/reports/templates/widgets/page_bar.html23
-rw-r--r--build/lib/Bcfg2/Server/Reports/reports/templatetags/__init__.py0
-rw-r--r--build/lib/Bcfg2/Server/Reports/reports/templatetags/bcfg2_tags.py274
-rw-r--r--build/lib/Bcfg2/Server/Reports/reports/templatetags/syntax_coloring.py41
-rw-r--r--build/lib/Bcfg2/Server/Reports/reports/urls.py55
-rw-r--r--build/lib/Bcfg2/Server/Reports/reports/views.py379
-rw-r--r--build/lib/Bcfg2/Server/Reports/settings.py144
-rw-r--r--build/lib/Bcfg2/Server/Reports/updatefix.py184
-rw-r--r--build/lib/Bcfg2/Server/Reports/urls.py14
-rw-r--r--build/lib/Bcfg2/Server/Reports/utils.py116
-rw-r--r--build/lib/Bcfg2/Server/Snapshots/__init__.py30
-rw-r--r--build/lib/Bcfg2/Server/Snapshots/model.py278
-rw-r--r--build/lib/Bcfg2/Server/__init__.py6
-rw-r--r--build/lib/Bcfg2/Statistics.py32
-rw-r--r--build/lib/Bcfg2/__init__.py4
141 files changed, 21466 insertions, 0 deletions
diff --git a/build/lib/Bcfg2/Client/Frame.py b/build/lib/Bcfg2/Client/Frame.py
new file mode 100644
index 000000000..545d4b584
--- /dev/null
+++ b/build/lib/Bcfg2/Client/Frame.py
@@ -0,0 +1,418 @@
+"""
+Frame is the Client Framework that verifies and
+installs entries, and generates statistics.
+"""
+__revision__ = '$Revision$'
+
+import logging
+import time
+import Bcfg2.Client.Tools
+
+def cmpent(ent1, ent2):
+ """Sort entries."""
+ if ent1.tag != ent2.tag:
+ return cmp(ent1.tag, ent2.tag)
+ else:
+ return cmp(ent1.get('name'), ent2.get('name'))
+
+def promptFilter(prompt, entries):
+ """Filter a supplied list based on user input."""
+ ret = []
+ entries.sort(cmpent)
+ for entry in entries[:]:
+ if 'qtext' in entry.attrib:
+ iprompt = entry.get('qtext')
+ else:
+ iprompt = prompt % (entry.tag, entry.get('name'))
+ try:
+ if raw_input(iprompt) in ['y', 'Y']:
+ ret.append(entry)
+ except EOFError:
+ # python 2.4.3 on CentOS doesn't like ^C for some reason
+ break
+ except:
+ print("Error while reading input")
+ continue
+ return ret
+
+def matches_entry(entryspec, entry):
+ # both are (tag, name)
+ if entryspec == entry:
+ return True
+ else:
+ for i in [0, 1]:
+ if entryspec[i] == entry[i]:
+ continue
+ elif entryspec[i] == '*':
+ continue
+ elif '*' in entryspec[i]:
+ starpt = entryspec[i].index('*')
+ if entry[i].startswith(entryspec[i][:starpt]):
+ continue
+ return False
+ return True
+
+def matches_white_list(entry, whitelist):
+ return True in [matches_entry(we, (entry.tag, entry.get('name'))) for we in whitelist]
+
+def passes_black_list(entry, blacklist):
+ return True not in [matches_entry(be, (entry.tag, entry.get('name'))) for be in blacklist]
+
+class Frame:
+ """Frame is the container for all Tool objects and state information."""
+ def __init__(self, config, setup, times, drivers, dryrun):
+ self.config = config
+ self.times = times
+ self.dryrun = dryrun
+ self.times['initialization'] = time.time()
+ self.setup = setup
+ self.tools = []
+ self.states = {}
+ self.whitelist = []
+ self.blacklist = []
+ self.removal = []
+ self.logger = logging.getLogger("Bcfg2.Client.Frame")
+ for driver in drivers[:]:
+ if driver not in Bcfg2.Client.Tools.drivers and \
+ isinstance(driver, str):
+ self.logger.error("Tool driver %s is not available" % driver)
+ drivers.remove(driver)
+
+ tclass = {}
+ for tool in drivers:
+ if not isinstance(tool, str):
+ tclass[time.time()] = tool
+ tool_class = "Bcfg2.Client.Tools.%s" % tool
+ try:
+ tclass[tool] = getattr(__import__(tool_class, globals(),
+ locals(), ['*']),
+ tool)
+ except ImportError:
+ continue
+ except:
+ self.logger.error("Tool %s unexpectedly failed to load" % tool,
+ exc_info=1)
+
+ for tool in list(tclass.values()):
+ try:
+ self.tools.append(tool(self.logger, setup, config))
+ except Bcfg2.Client.Tools.toolInstantiationError:
+ continue
+ except:
+ self.logger.error("Failed to instantiate tool %s" % \
+ (tool), exc_info=1)
+
+ for tool in self.tools[:]:
+ for conflict in getattr(tool, 'conflicts', []):
+ [self.tools.remove(item) for item in self.tools \
+ if item.name == conflict]
+
+ self.logger.info("Loaded tool drivers:")
+ self.logger.info([tool.name for tool in self.tools])
+ if not self.dryrun and not self.setup['bundle']:
+ for cfile in [cfl for cfl in config.findall(".//Path") \
+ if cfl.get('name') in self.__important__ and \
+ cfl.get('type') == 'file']:
+ tl = [t for t in self.tools if t.handlesEntry(cfile) \
+ and t.canVerify(cfile)]
+ if tl:
+ if not tl[0].VerifyPath(cfile, []):
+ if self.setup['interactive'] and not \
+ promptFilter("Install %s: %s? (y/N):", [cfile]):
+ continue
+ try:
+ self.states[cfile] = tl[0].InstallPath(cfile)
+ except:
+ self.logger.error("Unexpected tool failure",
+ exc_info=1)
+ # find entries not handled by any tools
+ problems = [entry for struct in config for \
+ entry in struct if entry not in self.handled]
+
+ if problems:
+ self.logger.error("The following entries are not handled by any tool:")
+ self.logger.error(["%s:%s:%s" % (entry.tag, entry.get('type'), \
+ entry.get('name')) for entry in problems])
+ self.logger.error("")
+ entries = [(entry.tag, entry.get('name')) for struct in config for entry in struct]
+ pkgs = [(entry.get('name'), entry.get('origin')) for struct in config for entry in struct if entry.tag == 'Package']
+ multi = []
+ for entry in entries[:]:
+ if entries.count(entry) > 1:
+ multi.append(entry)
+ entries.remove(entry)
+ if multi:
+ self.logger.debug("The following entries are included multiple times:")
+ self.logger.debug(["%s:%s" % entry for entry in multi])
+ self.logger.debug("")
+ if pkgs:
+ self.logger.debug("The following packages are specified in bcfg2:")
+ self.logger.debug([pkg[0] for pkg in pkgs if pkg[1] == None])
+ self.logger.debug("The following packages are prereqs added by Packages:")
+ self.logger.debug([pkg[0] for pkg in pkgs if pkg[1] == 'Packages'])
+
+
+ def __getattr__(self, name):
+ if name in ['extra', 'handled', 'modified', '__important__']:
+ ret = []
+ for tool in self.tools:
+ ret += getattr(tool, name)
+ return ret
+ elif name in self.__dict__:
+ return self.__dict__[name]
+ raise AttributeError(name)
+
+ def Inventory(self):
+ """
+ Verify all entries,
+ find extra entries,
+ and build up workqueues
+
+ """
+ # initialize all states
+ for struct in self.config.getchildren():
+ for entry in struct.getchildren():
+ self.states[entry] = False
+ for tool in self.tools:
+ try:
+ tool.Inventory(self.states)
+ except:
+ self.logger.error("%s.Inventory() call failed:" % tool.name, exc_info=1)
+
+ def Decide(self):
+ """Set self.whitelist based on user interaction."""
+ prompt = "Install %s: %s? (y/N): "
+ rprompt = "Remove %s: %s? (y/N): "
+ if self.setup['remove']:
+ if self.setup['remove'] == 'all':
+ self.removal = self.extra
+ elif self.setup['remove'] == 'services':
+ self.removal = [entry for entry in self.extra \
+ if entry.tag == 'Service']
+ elif self.setup['remove'] == 'packages':
+ self.removal = [entry for entry in self.extra \
+ if entry.tag == 'Package']
+
+ candidates = [entry for entry in self.states \
+ if not self.states[entry]]
+ self.whitelist = [entry for entry in self.states \
+ if not self.states[entry]]
+ # Need to process decision stuff early so that dryrun mode works with it
+ if self.setup['decision'] == 'whitelist':
+ dwl = self.setup['decision_list']
+ w_to_rem = [e for e in self.whitelist \
+ if not matches_white_list(e, dwl)]
+ if w_to_rem:
+ self.logger.info("In whitelist mode: suppressing installation of:")
+ self.logger.info(["%s:%s" % (e.tag, e.get('name')) for e in w_to_rem])
+ self.whitelist = [x for x in self.whitelist \
+ if x not in w_to_rem]
+
+ elif self.setup['decision'] == 'blacklist':
+ b_to_rem = [e for e in self.whitelist \
+ if not passes_black_list(e, self.setup['decision_list'])]
+ if b_to_rem:
+ self.logger.info("In blacklist mode: suppressing installation of:")
+ self.logger.info(["%s:%s" % (e.tag, e.get('name')) for e in b_to_rem])
+ self.whitelist = [x for x in self.whitelist if x not in b_to_rem]
+
+ if self.dryrun:
+ if self.whitelist:
+ self.logger.info("In dryrun mode: suppressing entry installation for:")
+ self.logger.info(["%s:%s" % (entry.tag, entry.get('name')) for entry \
+ in self.whitelist])
+ self.whitelist = []
+ if self.removal:
+ self.logger.info("In dryrun mode: suppressing entry removal for:")
+ self.logger.info(["%s:%s" % (entry.tag, entry.get('name')) for entry \
+ in self.removal])
+ self.removal = []
+ return
+ # Here is where most of the work goes
+ # first perform bundle filtering
+ if self.setup['bundle']:
+ all_bundle_names = [b.get('name') for b in
+ self.config.findall('./Bundle')]
+ # warn if non-existent bundle given
+ for bundle in self.setup['bundle']:
+ if bundle not in all_bundle_names:
+ self.logger.info("Warning: Bundle %s not found" % bundle)
+ bundles = [b for b in self.config.findall('./Bundle') \
+ if b.get('name') in self.setup['bundle']]
+ self.whitelist = [e for e in self.whitelist if \
+ True in [e in b for b in bundles]]
+ elif self.setup['indep']:
+ bundles = [nb for nb in self.config.getchildren() if nb.tag != \
+ 'Bundle']
+ else:
+ bundles = self.config.getchildren()
+
+ # first process prereq actions
+ for bundle in bundles[:]:
+ if bundle.tag != 'Bundle':
+ continue
+ actions = [a for a in bundle.findall('./Action') \
+ if a.get('timing') != 'post']
+ # now we process all "always actions"
+ bmodified = len([item for item in bundle if item in self.whitelist])
+ for action in actions:
+ if bmodified or action.get('when') == 'always':
+ self.DispatchInstallCalls([action])
+ # need to test to fail entries in whitelist
+ if False in [self.states[a] for a in actions]:
+ # then display bundles forced off with entries
+ self.logger.info("Bundle %s failed prerequisite action" % \
+ (bundle.get('name')))
+ bundles.remove(bundle)
+ b_to_remv = [ent for ent in self.whitelist if ent in bundle]
+ if b_to_remv:
+ self.logger.info("Not installing entries from Bundle %s" % \
+ (bundle.get('name')))
+ self.logger.info(["%s:%s" % (e.tag, e.get('name')) for e in b_to_remv])
+ [self.whitelist.remove(ent) for ent in b_to_remv]
+
+ if self.setup['interactive']:
+ self.whitelist = promptFilter(prompt, self.whitelist)
+ self.removal = promptFilter(rprompt, self.removal)
+
+ for entry in candidates:
+ if entry not in self.whitelist:
+ self.blacklist.append(entry)
+
+ def DispatchInstallCalls(self, entries):
+ """Dispatch install calls to underlying tools."""
+ for tool in self.tools:
+ handled = [entry for entry in entries if tool.canInstall(entry)]
+ if not handled:
+ continue
+ try:
+ tool.Install(handled, self.states)
+ except:
+ self.logger.error("%s.Install() call failed:" % tool.name, exc_info=1)
+
+ def Install(self):
+ """Install all entries."""
+ self.DispatchInstallCalls(self.whitelist)
+ mods = self.modified
+ mbundles = [struct for struct in self.config.findall('Bundle') if \
+ [mod for mod in mods if mod in struct]]
+
+ if self.modified:
+ # Handle Bundle interdeps
+ if mbundles:
+ self.logger.info("The Following Bundles have been modified:")
+ self.logger.info([mbun.get('name') for mbun in mbundles])
+ self.logger.info("")
+ tbm = [(t, b) for t in self.tools for b in mbundles]
+ for tool, bundle in tbm:
+ try:
+ tool.Inventory(self.states, [bundle])
+ except:
+ self.logger.error("%s.Inventory() call failed:" % tool.name, exc_info=1)
+ clobbered = [entry for bundle in mbundles for entry in bundle \
+ if not self.states[entry] and entry not in self.blacklist]
+ if clobbered:
+ self.logger.debug("Found clobbered entries:")
+ self.logger.debug(["%s:%s" % (entry.tag, entry.get('name')) \
+ for entry in clobbered])
+ if not self.setup['interactive']:
+ self.DispatchInstallCalls(clobbered)
+
+ for bundle in self.config.findall('.//Bundle'):
+ if self.setup['bundle'] and \
+ bundle.get('name') not in self.setup['bundle']:
+ # prune out unspecified bundles when running with -b
+ continue
+ for tool in self.tools:
+ try:
+ if bundle in mbundles:
+ tool.BundleUpdated(bundle, self.states)
+ else:
+ tool.BundleNotUpdated(bundle, self.states)
+ except:
+ self.logger.error("%s.BundleNotUpdated() call failed:" % \
+ (tool.name), exc_info=1)
+
+ def Remove(self):
+ """Remove extra entries."""
+ for tool in self.tools:
+ extras = [entry for entry in self.removal if tool.handlesEntry(entry)]
+ if extras:
+ try:
+ tool.Remove(extras)
+ except:
+ self.logger.error("%s.Remove() failed" % tool.name, exc_info=1)
+
+ def CondDisplayState(self, phase):
+ """Conditionally print tracing information."""
+ self.logger.info('\nPhase: %s' % phase)
+ self.logger.info('Correct entries:\t%d' % list(self.states.values()).count(True))
+ self.logger.info('Incorrect entries:\t%d' % list(self.states.values()).count(False))
+ if phase == 'final' and list(self.states.values()).count(False):
+ self.logger.info(["%s:%s" % (entry.tag, entry.get('name')) for \
+ entry in self.states if not self.states[entry]])
+ self.logger.info('Total managed entries:\t%d' % len(list(self.states.values())))
+ self.logger.info('Unmanaged entries:\t%d' % len(self.extra))
+ if phase == 'final' and self.setup['extra']:
+ self.logger.info(["%s:%s" % (entry.tag, entry.get('name')) \
+ for entry in self.extra])
+
+ self.logger.info("")
+
+ if ((list(self.states.values()).count(False) == 0) and not self.extra):
+ self.logger.info('All entries correct.')
+
+ def ReInventory(self):
+ """Recheck everything."""
+ if not self.dryrun and self.setup['kevlar']:
+ self.logger.info("Rechecking system inventory")
+ self.Inventory()
+
+ def Execute(self):
+ """Run all methods."""
+ self.Inventory()
+ self.times['inventory'] = time.time()
+ self.CondDisplayState('initial')
+ self.Decide()
+ self.Install()
+ self.times['install'] = time.time()
+ self.Remove()
+ self.times['remove'] = time.time()
+ if self.modified:
+ self.ReInventory()
+ self.times['reinventory'] = time.time()
+ self.times['finished'] = time.time()
+ self.CondDisplayState('final')
+
+ def GenerateStats(self):
+ """Generate XML summary of execution statistics."""
+ feedback = Bcfg2.Client.XML.Element("upload-statistics")
+ stats = Bcfg2.Client.XML.SubElement(feedback,
+ 'Statistics',
+ total=str(len(self.states)),
+ client_version=__revision__,
+ version='2.0',
+ revision=self.config.get('revision', '-1'))
+ good = len([key for key, val in list(self.states.items()) if val])
+ stats.set('good', str(good))
+ if len([key for key, val in list(self.states.items()) if not val]) == 0:
+ stats.set('state', 'clean')
+ else:
+ stats.set('state', 'dirty')
+
+ # List bad elements of the configuration
+ for (data, ename) in [(self.modified, 'Modified'), (self.extra, "Extra"), \
+ ([entry for entry in self.states if not \
+ self.states[entry]], "Bad")]:
+ container = Bcfg2.Client.XML.SubElement(stats, ename)
+ for item in data:
+ item.set('qtext', '')
+ container.append(item)
+ item.text = None
+
+ timeinfo = Bcfg2.Client.XML.Element("OpStamps")
+ feedback.append(stats)
+ for (event, timestamp) in list(self.times.items()):
+ timeinfo.set(event, str(timestamp))
+ stats.append(timeinfo)
+ return feedback
diff --git a/build/lib/Bcfg2/Client/Tools/APT.py b/build/lib/Bcfg2/Client/Tools/APT.py
new file mode 100644
index 000000000..2afe2eab7
--- /dev/null
+++ b/build/lib/Bcfg2/Client/Tools/APT.py
@@ -0,0 +1,218 @@
+"""This is the bcfg2 support for apt-get"""
+__revision__ = '$Revision$'
+
+# suppress apt API warnings
+import warnings
+warnings.filterwarnings("ignore", "apt API not stable yet",
+ FutureWarning)
+warnings.filterwarnings("ignore", "Accessed deprecated property Package.installedVersion, please see the Version class for alternatives.", DeprecationWarning)
+warnings.filterwarnings("ignore", "Accessed deprecated property Package.candidateVersion, please see the Version class for alternatives.", DeprecationWarning)
+warnings.filterwarnings("ignore", "Deprecated, please use 'is_installed' instead", DeprecationWarning)
+warnings.filterwarnings("ignore", "Attribute 'IsUpgradable' of the 'apt_pkg.DepCache' object is deprecated, use 'is_upgradable' instead.", DeprecationWarning)
+import apt.cache
+import os
+
+import Bcfg2.Client.Tools
+import Bcfg2.Options
+
+# Options for tool locations
+opts = {'install_path': Bcfg2.Options.CLIENT_APT_TOOLS_INSTALL_PATH,
+ 'var_path': Bcfg2.Options.CLIENT_APT_TOOLS_VAR_PATH,
+ 'etc_path': Bcfg2.Options.CLIENT_SYSTEM_ETC_PATH}
+setup = Bcfg2.Options.OptionParser(opts)
+setup.parse([])
+install_path = setup['install_path']
+var_path = setup['var_path']
+etc_path = setup['etc_path']
+DEBSUMS = '%s/bin/debsums' % install_path
+APTGET = '%s/bin/apt-get' % install_path
+DPKG = '%s/bin/dpkg' % install_path
+
+class APT(Bcfg2.Client.Tools.Tool):
+ """The Debian toolset implements package and service operations and inherits
+ the rest from Toolset.Toolset.
+
+ """
+ name = 'APT'
+ __execs__ = [DEBSUMS, APTGET, DPKG]
+ __handles__ = [('Package', 'deb')]
+ __req__ = {'Package': ['name', 'version']}
+ pkgcmd = '%s ' % APTGET + \
+ '-o DPkg::Options::=--force-overwrite ' + \
+ '-o DPkg::Options::=--force-confold ' + \
+ '-o DPkg::Options::=--force-confmiss ' + \
+ '--reinstall ' + \
+ '-q=2 ' + \
+ '--force-yes ' + \
+ '-y install %s'
+
+ def __init__(self, logger, setup, config):
+ Bcfg2.Client.Tools.Tool.__init__(self, logger, setup, config)
+ self.__important__ = self.__important__ + \
+ ["%s/cache/debconf/config.dat" % var_path,
+ "%s/cache/debconf/templates.dat" % var_path,
+ '/etc/passwd', '/etc/group',
+ '%s/apt/apt.conf' % etc_path,
+ '%s/dpkg/dpkg.cfg' % etc_path] + \
+ [entry.get('name') for struct in config for entry in struct \
+ if entry.tag == 'Path' and \
+ entry.get('name').startswith('%s/apt/sources.list' % etc_path)]
+ self.nonexistent = [entry.get('name') for struct in config for entry in struct \
+ if entry.tag == 'Path' and entry.get('type') == 'nonexistent']
+ os.environ["DEBIAN_FRONTEND"] = 'noninteractive'
+ self.actions = {}
+ if self.setup['kevlar'] and not self.setup['dryrun']:
+ self.cmd.run("%s --force-confold --configure --pending" % DPKG)
+ self.cmd.run("%s clean" % APTGET)
+ self.pkg_cache = apt.cache.Cache()
+ self.pkg_cache.update()
+ self.pkg_cache = apt.cache.Cache()
+
+ def FindExtra(self):
+ """Find extra packages."""
+ packages = [entry.get('name') for entry in self.getSupportedEntries()]
+ extras = [(p.name, p.installedVersion) for p in self.pkg_cache
+ if p.isInstalled and p.name not in packages]
+ return [Bcfg2.Client.XML.Element('Package', name=name, \
+ type='deb', version=version) \
+ for (name, version) in extras]
+
+ def VerifyDebsums(self, entry, modlist):
+ output = self.cmd.run("%s -as %s" % (DEBSUMS, entry.get('name')))[1]
+ if len(output) == 1 and "no md5sums for" in output[0]:
+ self.logger.info("Package %s has no md5sums. Cannot verify" % \
+ entry.get('name'))
+ entry.set('qtext', "Reinstall Package %s-%s to setup md5sums? (y/N) " \
+ % (entry.get('name'), entry.get('version')))
+ return False
+ files = []
+ for item in output:
+ if "checksum mismatch" in item:
+ files.append(item.split()[-1])
+ elif "changed file" in item:
+ files.append(item.split()[3])
+ elif "can't open" in item:
+ if item.split()[5] not in self.nonexistent:
+ files.append(item.split()[5])
+ elif "missing file" in item and \
+ item.split()[3] in self.nonexistent:
+ # these files should not exist
+ continue
+ elif "is not installed" in item or "missing file" in item:
+ self.logger.error("Package %s is not fully installed" \
+ % entry.get('name'))
+ else:
+ self.logger.error("Got Unsupported pattern %s from debsums" \
+ % item)
+ files.append(item)
+ # We check if there is file in the checksum to do
+ if files:
+ # if files are found there we try to be sure our modlist is sane
+ # with erroneous symlinks
+ modlist = [os.path.realpath(filename) for filename in modlist]
+ bad = [filename for filename in files if filename not in modlist]
+ if bad:
+ self.logger.info("Package %s failed validation. Bad files are:" % \
+ entry.get('name'))
+ self.logger.info(bad)
+ entry.set('qtext',
+ "Reinstall Package %s-%s to fix failing files? (y/N) " % \
+ (entry.get('name'), entry.get('version')))
+ return False
+ return True
+
+ def VerifyPackage(self, entry, modlist, checksums=True):
+ """Verify package for entry."""
+ if not 'version' in entry.attrib:
+ self.logger.info("Cannot verify unversioned package %s" %
+ (entry.attrib['name']))
+ return False
+ pkgname = entry.get('name')
+ if not self.pkg_cache.has_key(pkgname) \
+ or not self.pkg_cache[pkgname].isInstalled:
+ self.logger.info("Package %s not installed" % (entry.get('name')))
+ entry.set('current_exists', 'false')
+ return False
+
+ pkg = self.pkg_cache[pkgname]
+ if entry.get('version') == 'auto':
+ if self.pkg_cache._depcache.IsUpgradable(pkg._pkg):
+ desiredVersion = pkg.candidateVersion
+ else:
+ desiredVersion = pkg.installedVersion
+ elif entry.get('version') == 'any':
+ desiredVersion = pkg.installedVersion
+ else:
+ desiredVersion = entry.get('version')
+ if desiredVersion != pkg.installedVersion:
+ entry.set('current_version', pkg.installedVersion)
+ entry.set('qtext', "Modify Package %s (%s -> %s)? (y/N) " % \
+ (entry.get('name'), entry.get('current_version'),
+ desiredVersion))
+ return False
+ else:
+ # version matches
+ if not self.setup['quick'] and entry.get('verify', 'true') == 'true' \
+ and checksums:
+ pkgsums = self.VerifyDebsums(entry, modlist)
+ return pkgsums
+ return True
+
+ def Remove(self, packages):
+ """Deal with extra configuration detected."""
+ pkgnames = " ".join([pkg.get('name') for pkg in packages])
+ self.pkg_cache = apt.cache.Cache()
+ if len(packages) > 0:
+ self.logger.info('Removing packages:')
+ self.logger.info(pkgnames)
+ for pkg in pkgnames.split(" "):
+ try:
+ self.pkg_cache[pkg].markDelete(purge=True)
+ except:
+ self.pkg_cache[pkg].markDelete()
+ try:
+ self.pkg_cache.commit()
+ except SystemExit:
+ # thank you python-apt 0.6
+ pass
+ self.pkg_cache = apt.cache.Cache()
+ self.modified += packages
+ self.extra = self.FindExtra()
+
+ def Install(self, packages, states):
+ # it looks like you can't install arbitrary versions of software
+ # out of the pkg cache, we will still need to call apt-get
+ ipkgs = []
+ bad_pkgs = []
+ for pkg in packages:
+ if not self.pkg_cache.has_key(pkg.get('name')):
+ self.logger.error("APT has no information about package %s" % (pkg.get('name')))
+ continue
+ if pkg.get('version') in ['auto', 'any']:
+ ipkgs.append("%s=%s" % (pkg.get('name'),
+ self.pkg_cache[pkg.get('name')].candidateVersion))
+ continue
+ avail_vers = [x.VerStr for x in \
+ self.pkg_cache[pkg.get('name')]._pkg.VersionList]
+ if pkg.get('version') in avail_vers:
+ ipkgs.append("%s=%s" % (pkg.get('name'), pkg.get('version')))
+ continue
+ else:
+ self.logger.error("Package %s: desired version %s not in %s" \
+ % (pkg.get('name'), pkg.get('version'),
+ avail_vers))
+ bad_pkgs.append(pkg.get('name'))
+ if bad_pkgs:
+ self.logger.error("Cannot find correct versions of packages:")
+ self.logger.error(bad_pkgs)
+ if not ipkgs:
+ return
+ rc = self.cmd.run(self.pkgcmd % (" ".join(ipkgs)))[0]
+ if rc:
+ self.logger.error("APT command failed")
+ self.pkg_cache = apt.cache.Cache()
+ self.extra = self.FindExtra()
+ for package in packages:
+ states[package] = self.VerifyPackage(package, [], checksums=False)
+ if states[package]:
+ self.modified.append(package)
diff --git a/build/lib/Bcfg2/Client/Tools/Action.py b/build/lib/Bcfg2/Client/Tools/Action.py
new file mode 100644
index 000000000..452788f94
--- /dev/null
+++ b/build/lib/Bcfg2/Client/Tools/Action.py
@@ -0,0 +1,83 @@
+"""Action driver"""
+__revision__ = '$Revision$'
+
+import Bcfg2.Client.Tools
+
+"""
+<Action timing='pre|post|both'
+ name='name'
+ command='cmd text'
+ when='always|modified'
+ status='ignore|check'/>
+<PostInstall name='foo'/>
+ => <Action timing='post'
+ when='modified'
+ name='n'
+ command='foo'
+ status='ignore'/>
+"""
+
+
+class Action(Bcfg2.Client.Tools.Tool):
+ """Implement Actions"""
+ name = 'Action'
+ __handles__ = [('PostInstall', None), ('Action', None)]
+ __req__ = {'PostInstall': ['name'],
+ 'Action': ['name', 'timing', 'when', 'command', 'status']}
+
+ def RunAction(self, entry):
+ """This method handles command execution and status return."""
+ if not self.setup['dryrun']:
+ if self.setup['interactive']:
+ prompt = ('Run Action %s, %s: (y/N): ' %
+ (entry.get('name'), entry.get('command')))
+ if raw_input(prompt) not in ['y', 'Y']:
+ return False
+ if self.setup['servicemode'] == 'build':
+ if entry.get('build', 'true') == 'false':
+ self.logger.debug("Action: Deferring execution of %s due to build mode" % (entry.get('command')))
+ return False
+ self.logger.debug("Running Action %s" % (entry.get('name')))
+ rc = self.cmd.run(entry.get('command'))[0]
+ self.logger.debug("Action: %s got rc %s" % (entry.get('command'), rc))
+ entry.set('rc', str(rc))
+ if entry.get('status', 'check') == 'ignore':
+ return True
+ else:
+ return rc == 0
+ else:
+ self.logger.debug("In dryrun mode: not running action:\n %s" %
+ (entry.get('name')))
+ return False
+
+ def VerifyAction(self, dummy, _):
+ """Actions always verify true."""
+ return True
+
+ def VerifyPostInstall(self, dummy, _):
+ """Actions always verify true."""
+ return True
+
+ def InstallAction(self, entry):
+ """Run actions as pre-checks for bundle installation."""
+ if entry.get('timing') != 'post':
+ return self.RunAction(entry)
+ return True
+
+ def InstallPostInstall(self, entry):
+ return self.InstallAction(self, entry)
+
+ def BundleUpdated(self, bundle, states):
+ """Run postinstalls when bundles have been updated."""
+ for postinst in bundle.findall("PostInstall"):
+ self.cmd.run(postinst.get('name'))
+ for action in bundle.findall("Action"):
+ if action.get('timing') in ['post', 'both']:
+ states[action] = self.RunAction(action)
+
+ def BundleNotUpdated(self, bundle, states):
+ """Run Actions when bundles have not been updated."""
+ for action in bundle.findall("Action"):
+ if action.get('timing') in ['post', 'both'] and \
+ action.get('when') != 'modified':
+ states[action] = self.RunAction(action)
diff --git a/build/lib/Bcfg2/Client/Tools/Blast.py b/build/lib/Bcfg2/Client/Tools/Blast.py
new file mode 100644
index 000000000..4f2891fd6
--- /dev/null
+++ b/build/lib/Bcfg2/Client/Tools/Blast.py
@@ -0,0 +1,34 @@
+# This is the bcfg2 support for blastwave packages (pkg-get)
+"""This provides bcfg2 support for blastwave"""
+__revision__ = '$Revision$'
+
+import tempfile
+import Bcfg2.Client.Tools.SYSV
+
+
+class Blast(Bcfg2.Client.Tools.SYSV.SYSV):
+ """Support for Blastwave packages"""
+ pkgtype = 'blast'
+ pkgtool = ("/opt/csw/bin/pkg-get install %s", ("%s", ["bname"]))
+ name = 'Blast'
+ __execs__ = ['/opt/csw/bin/pkg-get', "/usr/bin/pkginfo"]
+ __handles__ = [('Package', 'blast')]
+ __ireq__ = {'Package': ['name', 'version', 'bname']}
+
+ def __init__(self, logger, setup, config):
+ # dont use the sysv constructor
+ Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config)
+ noaskfile = tempfile.NamedTemporaryFile()
+ self.noaskname = noaskfile.name
+ try:
+ noaskfile.write(Bcfg2.Client.Tools.SYSV.noask)
+ except:
+ pass
+
+ # VerifyPackage comes from Bcfg2.Client.Tools.SYSV
+ # Install comes from Bcfg2.Client.Tools.PkgTool
+ # Extra comes from Bcfg2.Client.Tools.Tool
+ # Remove comes from Bcfg2.Client.Tools.SYSV
+ def FindExtraPackages(self):
+ """Pass through to null FindExtra call."""
+ return []
diff --git a/build/lib/Bcfg2/Client/Tools/Chkconfig.py b/build/lib/Bcfg2/Client/Tools/Chkconfig.py
new file mode 100644
index 000000000..b7227ec3d
--- /dev/null
+++ b/build/lib/Bcfg2/Client/Tools/Chkconfig.py
@@ -0,0 +1,101 @@
+# This is the bcfg2 support for chkconfig
+# $Id$
+
+"""This is chkconfig support."""
+__revision__ = '$Revision$'
+
+import Bcfg2.Client.Tools
+import Bcfg2.Client.XML
+
+
+class Chkconfig(Bcfg2.Client.Tools.SvcTool):
+ """Chkconfig support for Bcfg2."""
+ name = 'Chkconfig'
+ __execs__ = ['/sbin/chkconfig']
+ __handles__ = [('Service', 'chkconfig')]
+ __req__ = {'Service': ['name', 'status']}
+
+ def get_svc_command(self, service, action):
+ return "/sbin/service %s %s" % (service.get('name'), action)
+
+ def VerifyService(self, entry, _):
+ """Verify Service status for entry."""
+ try:
+ cmd = "/sbin/chkconfig --list %s " % (entry.get('name'))
+ raw = self.cmd.run(cmd)[1]
+ patterns = ["error reading information", "unknown service"]
+ srvdata = [line.split() for line in raw for pattern in patterns \
+ if pattern not in line][0]
+ except IndexError:
+ # Ocurrs when no lines are returned (service not installed)
+ entry.set('current_status', 'off')
+ return False
+ if len(srvdata) == 2:
+ # This is an xinetd service
+ if entry.get('status') == srvdata[1]:
+ return True
+ else:
+ entry.set('current_status', srvdata[1])
+ return False
+
+ try:
+ onlevels = [level.split(':')[0] for level in srvdata[1:] \
+ if level.split(':')[1] == 'on']
+ except IndexError:
+ onlevels = []
+
+ if entry.get('status') == 'on':
+ status = (len(onlevels) > 0)
+ command = 'start'
+ else:
+ status = (len(onlevels) == 0)
+ command = 'stop'
+
+ if entry.get('mode', 'default') == 'supervised':
+ # turn on or off the service in supervised mode
+ pstatus = self.cmd.run('/sbin/service %s status' % \
+ entry.get('name'))[0]
+ needs_modification = ((command == 'start' and pstatus) or \
+ (command == 'stop' and not pstatus))
+ if not(self.setup.get('dryrun')) and needs_modification:
+ self.cmd.run('/sbin/service %s %s' % (entry.get('name'),
+ command))
+ pstatus = self.cmd.run('/sbin/service %s status' % \
+ entry.get('name'))[0]
+
+ # chkconfig/init.d service
+ if entry.get('status') == 'on':
+ status = status and not pstatus
+
+ if not status:
+ if entry.get('status') == 'on':
+ entry.set('current_status', 'off')
+ else:
+ entry.set('current_status', 'on')
+ return status
+
+ def InstallService(self, entry):
+ """Install Service entry."""
+ rcmd = "/sbin/chkconfig %s %s"
+ self.cmd.run("/sbin/chkconfig --add %s"%(entry.attrib['name']))
+ self.logger.info("Installing Service %s" % (entry.get('name')))
+ pass1 = True
+ if entry.get('status') == 'off':
+ rc = self.cmd.run(rcmd % (entry.get('name'),
+ entry.get('status')) + \
+ " --level 0123456")[0]
+ pass1 = rc == 0
+ rc = self.cmd.run(rcmd % (entry.get('name'), entry.get('status')))[0]
+ return pass1 and rc == 0
+
+ def FindExtra(self):
+ """Locate extra chkconfig Services."""
+ allsrv = [line.split()[0] for line in \
+ self.cmd.run("/sbin/chkconfig --list|grep :on")[1]]
+ self.logger.debug('Found active services:')
+ self.logger.debug(allsrv)
+ specified = [srv.get('name') for srv in self.getSupportedEntries()]
+ return [Bcfg2.Client.XML.Element('Service',
+ type='chkconfig',
+ name=name) \
+ for name in allsrv if name not in specified]
diff --git a/build/lib/Bcfg2/Client/Tools/DebInit.py b/build/lib/Bcfg2/Client/Tools/DebInit.py
new file mode 100644
index 000000000..aee8ffd65
--- /dev/null
+++ b/build/lib/Bcfg2/Client/Tools/DebInit.py
@@ -0,0 +1,117 @@
+"""Debian Init Support for Bcfg2"""
+__revision__ = '$Revision$'
+
+import glob
+import os
+import re
+import Bcfg2.Client.Tools
+
+# Debian squeeze and beyond uses a dependecy based boot sequence
+DEBIAN_OLD_STYLE_BOOT_SEQUENCE = (
+ 'etch', '4.0',
+ 'lenny', '5.0', '5.0.1', '5.0.2', '5.0.3', '5.0.4', '5.0.4', '5.0.5',
+ )
+
+
+class DebInit(Bcfg2.Client.Tools.SvcTool):
+ """Debian Service Support for Bcfg2."""
+ name = 'DebInit'
+ __execs__ = ['/usr/sbin/update-rc.d', '/usr/sbin/invoke-rc.d']
+ __handles__ = [('Service', 'deb')]
+ __req__ = {'Service': ['name', 'status']}
+ svcre = re.compile("/etc/.*/(?P<action>[SK])(?P<sequence>\d+)(?P<name>\S+)")
+
+ # implement entry (Verify|Install) ops
+ def VerifyService(self, entry, _):
+ """Verify Service status for entry."""
+ rawfiles = glob.glob("/etc/rc*.d/[SK]*%s" % (entry.get('name')))
+ files = []
+
+ try:
+ deb_version = open('/etc/debian_version', 'r').read().split('/', 1)[0]
+ except IOError:
+ deb_version = 'unknown'
+
+ if entry.get('sequence'):
+ if (deb_version in DEBIAN_OLD_STYLE_BOOT_SEQUENCE):
+ start_sequence = int(entry.get('sequence'))
+ kill_sequence = 100 - start_sequence
+ else:
+ self.logger.warning("Your debian version boot sequence is "
+ "dependency based \"sequence\" attribute "
+ "will be ignored.")
+ else:
+ start_sequence = None
+
+ for filename in rawfiles:
+ match = self.svcre.match(filename)
+ if not match:
+ self.logger.error("Failed to match file: %s" % filename)
+ continue
+ if match.group('name') == entry.get('name'):
+ files.append(filename)
+ if entry.get('status') == 'off':
+ if files:
+ entry.set('current_status', 'on')
+ return False
+ else:
+ return True
+ else:
+ if files:
+ if start_sequence:
+ for filename in files:
+ match = self.svcre.match(filename)
+ file_sequence = int(match.group('sequence'))
+ if match.group('action') == 'S' and file_sequence != start_sequence:
+ return False
+ if match.group('action') == 'K' and file_sequence != kill_sequence:
+ return False
+ return True
+ else:
+ entry.set('current_status', 'off')
+ return False
+
+ def InstallService(self, entry):
+ """Install Service for entry."""
+ self.logger.info("Installing Service %s" % (entry.get('name')))
+ try:
+ os.stat('/etc/init.d/%s' % entry.get('name'))
+ except OSError:
+ self.logger.debug("Init script for service %s does not exist" % entry.get('name'))
+ return False
+
+ if entry.get('status') == 'off':
+ self.cmd.run("/usr/sbin/invoke-rc.d %s stop" % (entry.get('name')))
+ cmdrc = self.cmd.run("/usr/sbin/update-rc.d -f %s remove" % entry.get('name'))[0]
+ else:
+ command = "/usr/sbin/update-rc.d %s defaults" % (entry.get('name'))
+ if entry.get('sequence'):
+ cmdrc = self.cmd.run("/usr/sbin/update-rc.d -f %s remove" % entry.get('name'))[0]
+ if cmdrc != 0:
+ return False
+ start_sequence = int(entry.get('sequence'))
+ kill_sequence = 100 - start_sequence
+ command = "%s %d %d" % (command, start_sequence, kill_sequence)
+ cmdrc = self.cmd.run(command)[0]
+ return cmdrc == 0
+
+ def FindExtra(self):
+ """Find Extra Debian Service entries."""
+ specified = [entry.get('name') for entry in self.getSupportedEntries()]
+ extra = []
+ for name in [self.svcre.match(fname).group('name') for fname in
+ glob.glob("/etc/rc[12345].d/S*") \
+ if self.svcre.match(fname).group('name') not in specified]:
+ if name not in extra:
+ extra.append(name)
+ return [Bcfg2.Client.XML.Element('Service', name=name, type='deb') for name \
+ in extra]
+
+ def Remove(self, _):
+ """Remove extra service entries."""
+ # Extra service removal is nonsensical
+ # Extra services need to be reflected in the config
+ return
+
+ def get_svc_command(self, service, action):
+ return '/usr/sbin/invoke-rc.d %s %s' % (service.get('name'), action)
diff --git a/build/lib/Bcfg2/Client/Tools/Encap.py b/build/lib/Bcfg2/Client/Tools/Encap.py
new file mode 100644
index 000000000..92062a750
--- /dev/null
+++ b/build/lib/Bcfg2/Client/Tools/Encap.py
@@ -0,0 +1,55 @@
+"""Bcfg2 Support for Encap Packages"""
+
+__revision__ = '$Revision$'
+
+import glob
+import re
+import Bcfg2.Client.Tools
+
+class Encap(Bcfg2.Client.Tools.PkgTool):
+ """Support for Encap packages."""
+ name = 'Encap'
+ __execs__ = ['/usr/local/bin/epkg']
+ __handles__ = [('Package', 'encap')]
+ __req__ = {'Package': ['version', 'url']}
+ pkgtype = 'encap'
+ pkgtool = ("/usr/local/bin/epkg -l -f -q %s", ("%s", ["url"]))
+ splitter = re.compile('.*/(?P<name>[\w-]+)\-(?P<version>[\w\.+-]+)')
+
+# If you define self.pkgtool and self.pkgname it will [use] the Pkgtool.Install
+# method will do the installation stuff for you
+
+ def RefreshPackages(self):
+ """Try to find encap packages."""
+ self.installed = {}
+ for pkg in glob.glob("/usr/local/encap/*"):
+ match = self.splitter.match(pkg)
+ if match:
+ self.installed[match.group('name')] = match.group('version')
+ else:
+ print("Failed to split name %s" % pkg)
+ self.logger.debug("Encap.py: RefreshPackages: self.installed.keys() are:")
+ self.logger.debug("%s" % list(self.installed.keys()))
+
+ def VerifyPackage(self, entry, _):
+ """Verify Package status for entry."""
+ if not entry.get('version'):
+ self.logger.info("Insufficient information of Package %s; cannot Verify" % entry.get('name'))
+ return False
+ cmdrc = self.cmd.run("/usr/local/bin/epkg -q -S -k %s-%s >/dev/null" %
+ (entry.get('name'), entry.get('version')))[0]
+ if cmdrc != 0:
+ self.logger.debug("Package %s version incorrect" % entry.get('name'))
+ else:
+ return True
+ return False
+
+ # Can use the FindExtraPackages method from Bcfg2.Client.Tools.PkgTool
+
+ def RemovePackages(self, packages):
+ """Deal with extra configuration detected."""
+ names = " ".join([pkg.get('name') for pkg in packages])
+ self.logger.info("Removing packages: %s" % (names))
+ self.cmd.run("/usr/local/bin/epkg -l -q -r %s" % (names))
+ self.RefreshPackages()
+ self.extra = self.FindExtraPackages()
diff --git a/build/lib/Bcfg2/Client/Tools/FreeBSDInit.py b/build/lib/Bcfg2/Client/Tools/FreeBSDInit.py
new file mode 100644
index 000000000..10f0f2e93
--- /dev/null
+++ b/build/lib/Bcfg2/Client/Tools/FreeBSDInit.py
@@ -0,0 +1,27 @@
+"""FreeBSD Init Support for Bcfg2."""
+__revision__ = '$Rev$'
+
+# TODO
+# - hardcoded path to ports rc.d
+# - doesn't know about /etc/rc.d/
+
+import os
+import Bcfg2.Client.Tools
+
+
+class FreeBSDInit(Bcfg2.Client.Tools.SvcTool):
+ """FreeBSD service support for Bcfg2."""
+ name = 'FreeBSDInit'
+ __handles__ = [('Service', 'freebsd')]
+ __req__ = {'Service': ['name', 'status']}
+
+ def __init__(self, logger, cfg, setup):
+ Bcfg2.Client.Tools.Tool.__init__(self, logger, cfg, setup)
+ if os.uname()[0] != 'FreeBSD':
+ raise Bcfg2.Client.Tools.toolInstantiationError
+
+ def VerifyService(self, entry, _):
+ return True
+
+ def get_svc_command(self, service, action):
+ return "/usr/local/etc/rc.d/%s %s" % (service.get('name'), action)
diff --git a/build/lib/Bcfg2/Client/Tools/FreeBSDPackage.py b/build/lib/Bcfg2/Client/Tools/FreeBSDPackage.py
new file mode 100644
index 000000000..04c05adaa
--- /dev/null
+++ b/build/lib/Bcfg2/Client/Tools/FreeBSDPackage.py
@@ -0,0 +1,47 @@
+"""This is the Bcfg2 tool for the FreeBSD package system."""
+__revision__ = '$Rev$'
+
+# TODO
+# - actual package installation
+# - verification of package files
+
+import re
+import Bcfg2.Client.Tools
+
+
+class FreeBSDPackage(Bcfg2.Client.Tools.PkgTool):
+ """The FreeBSD toolset implements package operations and inherits
+ the rest from Toolset.Toolset."""
+ name = 'FreeBSDPackage'
+ __execs__ = ['/usr/sbin/pkg_add', '/usr/sbin/pkg_info']
+ __handles__ = [('Package', 'freebsdpkg')]
+ __req__ = {'Package': ['name', 'version']}
+ pkgtool = ('/usr/sbin/pkg_add -r %s', ('%s-%s', ['name', 'version']))
+ pkgtype = 'freebsdpkg'
+
+ def RefreshPackages(self):
+ self.installed = {}
+ packages = self.cmd.run("/usr/sbin/pkg_info -a -E")[1]
+ pattern = re.compile('(.*)-(\d.*)')
+ for pkg in packages:
+ if pattern.match(pkg):
+ name = pattern.match(pkg).group(1)
+ version = pattern.match(pkg).group(2)
+ self.installed[name] = version
+
+ def VerifyPackage(self, entry, modlist):
+ if not 'version' in entry.attrib:
+ self.logger.info("Cannot verify unversioned package %s" %
+ (entry.attrib['name']))
+ return False
+ if entry.attrib['name'] in self.installed:
+ if self.installed[entry.attrib['name']] == entry.attrib['version']:
+ # TODO: verfification
+ return True
+ else:
+ entry.set('current_version', self.installed[entry.get('name')])
+ return False
+
+ self.logger.info("Package %s not installed" % (entry.get('name')))
+ entry.set('current_exists', 'false')
+ return False
diff --git a/build/lib/Bcfg2/Client/Tools/IPS.py b/build/lib/Bcfg2/Client/Tools/IPS.py
new file mode 100644
index 000000000..9afd23143
--- /dev/null
+++ b/build/lib/Bcfg2/Client/Tools/IPS.py
@@ -0,0 +1,61 @@
+"""This is the Bcfg2 support for OpenSolaris packages."""
+__revision__ = '$Revision$'
+
+import pkg.client.image as image
+import pkg.client.progress as progress
+
+import Bcfg2.Client.Tools
+
+
+class IPS(Bcfg2.Client.Tools.PkgTool):
+ """The IPS driver implements OpenSolaris package operations."""
+ name = 'IPS'
+ pkgtype = 'ips'
+ conflicts = ['SYSV']
+ __handles__ = [('Package', 'ips')]
+ __req__ = {'Package': ['name', 'version']}
+ pkgtool = ('pkg install --no-refresh %s', ('%s', ['name']))
+
+ def __init__(self, logger, setup, cfg):
+ self.installed = {}
+ self.pending_upgrades = set()
+ self.image = image.Image()
+ self.image.find_root('/', False)
+ self.image.load_config()
+ Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, cfg)
+ self.cfg = cfg
+
+ def RefreshPackages(self):
+ self.installed = dict()
+ self.image.history.operation_name = "list"
+ self.image.load_catalogs(progress.NullProgressTracker())
+ for (pfmri, pinfo) in self.image.inventory([], False):
+ pname = pfmri.pkg_name
+ pversion = pfmri.version.get_short_version()
+ self.installed[pname] = pversion
+ if pinfo['upgradable']:
+ self.pending_upgrades.add(pname)
+
+ def VerifyPackage(self, entry, modlist):
+ """Verify package for entry."""
+ pname = entry.get('name')
+ if not 'version' in entry.attrib:
+ self.logger.info("Cannot verify unversioned package %s" % (pname))
+ return False
+ if pname not in self.installed:
+ self.logger.debug("IPS: Package %s not installed" % pname)
+ return False
+ if entry.get('version') == 'auto':
+ if pname in self.pending_upgrades:
+ return False
+ elif entry.get('version') == 'any':
+ pass
+ else:
+ if entry.get('version') != self.installed[pname]:
+ self.logger.debug("IPS: Package %s: have %s want %s" \
+ % (pname, self.installed[pname],
+ entry.get('version')))
+ return False
+
+ # need to implement pkg chksum validation
+ return True
diff --git a/build/lib/Bcfg2/Client/Tools/MacPorts.py b/build/lib/Bcfg2/Client/Tools/MacPorts.py
new file mode 100644
index 000000000..23b536451
--- /dev/null
+++ b/build/lib/Bcfg2/Client/Tools/MacPorts.py
@@ -0,0 +1,60 @@
+"""This provides Bcfg2 support for macports packages."""
+__revision__ = '$Revision$'
+
+import Bcfg2.Client.Tools
+
+
+class MacPorts(Bcfg2.Client.Tools.PkgTool):
+ """macports package support."""
+ name = 'MacPorts'
+ __execs__ = ["/opt/local/bin/port"]
+ __handles__ = [('Package', 'macport')]
+ __req__ = {'Package': ['name', 'version']}
+ pkgtype = 'macport'
+ pkgtool = ("/opt/local/bin/port install %s")
+
+ def __init__(self, logger, setup, config):
+ Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config)
+ self.installed = {}
+ self.RefreshPackages()
+
+ def RefreshPackages(self):
+ """Refresh memory hashes of packages."""
+ pkgcache = self.cmd.run("/opt/local/bin/port installed")[1]
+ self.installed = {}
+ for pkg in pkgcache:
+ if pkg.startswith("The following ports are currently installed"):
+ continue
+ pkgname = pkg.split('@')[0].strip()
+ version = pkg.split('@')[1].split(' ')[0]
+ self.logger.info(" pkgname: %s\n version: %s" % (pkgname, version))
+ self.installed[pkgname] = version
+
+ def VerifyPackage(self, entry, modlist):
+ """Verify Package status for entry."""
+ if not 'version' in entry.attrib:
+ self.logger.info("Cannot verify unversioned package %s" %
+ (entry.attrib['name']))
+ return False
+
+ if entry.attrib['name'] in self.installed:
+ if self.installed[entry.attrib['name']] == entry.attrib['version']:
+ #if not self.setup['quick'] and \
+ # entry.get('verify', 'true') == 'true':
+ #FIXME: We should be able to check this once
+ # http://trac.macports.org/ticket/15709 is implemented
+ return True
+ else:
+ entry.set('current_version', self.installed[entry.get('name')])
+ return False
+ entry.set('current_exists', 'false')
+ return False
+
+ def RemovePackages(self, packages):
+ """Remove extra packages."""
+ names = [pkg.get('name') for pkg in packages]
+ self.logger.info("Removing packages: %s" % " ".join(names))
+ self.cmd.run("/opt/local/bin/port uninstall %s" % \
+ " ".join(names))
+ self.RefreshPackages()
+ self.extra = self.FindExtraPackages()
diff --git a/build/lib/Bcfg2/Client/Tools/POSIX.py b/build/lib/Bcfg2/Client/Tools/POSIX.py
new file mode 100644
index 000000000..d2611130c
--- /dev/null
+++ b/build/lib/Bcfg2/Client/Tools/POSIX.py
@@ -0,0 +1,766 @@
+"""All POSIX Type client support for Bcfg2."""
+__revision__ = '$Revision$'
+
+from stat import S_ISVTX, S_ISGID, S_ISUID, S_IXUSR, S_IWUSR, S_IRUSR, S_IXGRP
+from stat import S_IWGRP, S_IRGRP, S_IXOTH, S_IWOTH, S_IROTH, ST_MODE, S_ISDIR
+from stat import S_IFREG, ST_UID, ST_GID, S_ISREG, S_IFDIR, S_ISLNK, ST_MTIME
+import binascii
+from datetime import datetime
+import difflib
+import errno
+import grp
+import logging
+import os
+import pwd
+import shutil
+import stat
+import time
+import Bcfg2.Client.Tools
+import Bcfg2.Options
+from Bcfg2.Client import XML
+
+log = logging.getLogger('posix')
+
+# map between dev_type attribute and stat constants
+device_map = {'block': stat.S_IFBLK,
+ 'char': stat.S_IFCHR,
+ 'fifo': stat.S_IFIFO}
+
+
+def calcPerms(initial, perms):
+ """This compares ondisk permissions with specified ones."""
+ pdisp = [{1:S_ISVTX, 2:S_ISGID, 4:S_ISUID},
+ {1:S_IXUSR, 2:S_IWUSR, 4:S_IRUSR},
+ {1:S_IXGRP, 2:S_IWGRP, 4:S_IRGRP},
+ {1:S_IXOTH, 2:S_IWOTH, 4:S_IROTH}]
+ tempperms = initial
+ if len(perms) == 3:
+ perms = '0%s' % (perms)
+ pdigits = [int(perms[digit]) for digit in range(4)]
+ for index in range(4):
+ for (num, perm) in list(pdisp[index].items()):
+ if pdigits[index] & num:
+ tempperms |= perm
+ return tempperms
+
+
+def normGid(entry):
+ """
+ This takes a group name or gid and
+ returns the corresponding gid or False.
+ """
+ try:
+ try:
+ return int(entry.get('group'))
+ except:
+ return int(grp.getgrnam(entry.get('group'))[2])
+ except (OSError, KeyError):
+ log.error('GID normalization failed for %s' % (entry.get('name')))
+ return False
+
+
+def normUid(entry):
+ """
+ This takes a user name or uid and
+ returns the corresponding uid or False.
+ """
+ try:
+ try:
+ return int(entry.get('owner'))
+ except:
+ return int(pwd.getpwnam(entry.get('owner'))[2])
+ except (OSError, KeyError):
+ log.error('UID normalization failed for %s' % (entry.get('name')))
+ return False
+
+
+class POSIX(Bcfg2.Client.Tools.Tool):
+ """POSIX File support code."""
+ name = 'POSIX'
+ __handles__ = [('Path', 'device'),
+ ('Path', 'directory'),
+ ('Path', 'file'),
+ ('Path', 'hardlink'),
+ ('Path', 'nonexistent'),
+ ('Path', 'permissions'),
+ ('Path', 'symlink')]
+ __req__ = {'Path': ['name', 'type']}
+
+ # grab paranoid options from /etc/bcfg2.conf
+ opts = {'ppath': Bcfg2.Options.PARANOID_PATH,
+ 'max_copies': Bcfg2.Options.PARANOID_MAX_COPIES}
+ setup = Bcfg2.Options.OptionParser(opts)
+ setup.parse([])
+ ppath = setup['ppath']
+ max_copies = setup['max_copies']
+
+ def canInstall(self, entry):
+ """Check if entry is complete for installation."""
+ if Bcfg2.Client.Tools.Tool.canInstall(self, entry):
+ if (entry.tag,
+ entry.get('type'),
+ entry.text,
+ entry.get('empty', 'false')) == ('Path',
+ 'file',
+ None,
+ 'false'):
+ return False
+ return True
+ else:
+ return False
+
+ def gatherCurrentData(self, entry):
+ if entry.tag == 'Path' and entry.get('type') == 'file':
+ try:
+ ondisk = os.stat(entry.get('name'))
+ except OSError:
+ entry.set('current_exists', 'false')
+ self.logger.debug("%s %s does not exist" %
+ (entry.tag, entry.get('name')))
+ return False
+ try:
+ entry.set('current_owner', str(ondisk[ST_UID]))
+ entry.set('current_group', str(ondisk[ST_GID]))
+ except (OSError, KeyError):
+ pass
+ entry.set('perms', str(oct(ondisk[ST_MODE])[-4:]))
+ try:
+ content = open(entry.get('name')).read()
+ entry.set('current_bfile', binascii.b2a_base64(content))
+ except IOError, error:
+ self.logger.error("Failed to read %s: %s" % (error.filename,
+ error.strerror))
+
+ def Verifydevice(self, entry, _):
+ """Verify device entry."""
+ if entry.get('dev_type') == None or \
+ entry.get('owner') == None or \
+ entry.get('group') == None:
+ self.logger.error('Entry %s not completely specified. '
+ 'Try running bcfg2-repo-validate.' % (entry.get('name')))
+ return False
+ if entry.get('dev_type') in ['block', 'char']:
+ # check if major/minor are properly specified
+ if entry.get('major') == None or \
+ entry.get('minor') == None:
+ self.logger.error('Entry %s not completely specified. '
+ 'Try running bcfg2-repo-validate.' % (entry.get('name')))
+ return False
+ try:
+ # check for file existence
+ filestat = os.stat(entry.get('name'))
+ except OSError:
+ entry.set('current_exists', 'false')
+ self.logger.debug("%s %s does not exist" %
+ (entry.tag, entry.get('name')))
+ return False
+
+ try:
+ # attempt to verify device properties as specified in config
+ dev_type = entry.get('dev_type')
+ mode = calcPerms(device_map[dev_type],
+ entry.get('mode', '0600'))
+ owner = normUid(entry)
+ group = normGid(entry)
+ if dev_type in ['block', 'char']:
+ # check for incompletely specified entries
+ if entry.get('major') == None or \
+ entry.get('minor') == None:
+ self.logger.error('Entry %s not completely specified. '
+ 'Try running bcfg2-repo-validate.' % (entry.get('name')))
+ return False
+ major = int(entry.get('major'))
+ minor = int(entry.get('minor'))
+ if major == os.major(filestat.st_rdev) and \
+ minor == os.minor(filestat.st_rdev) and \
+ mode == filestat.st_mode and \
+ owner == filestat.st_uid and \
+ group == filestat.st_gid:
+ return True
+ else:
+ return False
+ elif dev_type == 'fifo' and \
+ mode == filestat.st_mode and \
+ owner == filestat.st_uid and \
+ group == filestat.st_gid:
+ return True
+ else:
+ self.logger.info('Device properties for %s incorrect' % \
+ entry.get('name'))
+ return False
+ except OSError:
+ self.logger.debug("%s %s failed to verify" %
+ (entry.tag, entry.get('name')))
+ return False
+
+ def Installdevice(self, entry):
+ """Install device entries."""
+ try:
+ # check for existing paths and remove them
+ os.lstat(entry.get('name'))
+ try:
+ os.unlink(entry.get('name'))
+ exists = False
+ except OSError:
+ self.logger.info('Failed to unlink %s' % \
+ entry.get('name'))
+ return False
+ except OSError:
+ exists = False
+
+ if not exists:
+ try:
+ dev_type = entry.get('dev_type')
+ mode = calcPerms(device_map[dev_type],
+ entry.get('mode', '0600'))
+ if dev_type in ['block', 'char']:
+ # check if major/minor are properly specified
+ if entry.get('major') == None or \
+ entry.get('minor') == None:
+ self.logger.error('Entry %s not completely specified. '
+ 'Try running bcfg2-repo-validate.' % (entry.get('name')))
+ return False
+ major = int(entry.get('major'))
+ minor = int(entry.get('minor'))
+ device = os.makedev(major, minor)
+ os.mknod(entry.get('name'), mode, device)
+ else:
+ os.mknod(entry.get('name'), mode)
+ os.chown(entry.get('name'), normUid(entry), normGid(entry))
+ return True
+ except KeyError:
+ self.logger.error('Failed to install %s' % entry.get('name'))
+ except OSError:
+ self.logger.error('Failed to install %s' % entry.get('name'))
+ return False
+
+ def Verifydirectory(self, entry, modlist):
+ """Verify Path type='directory' entry."""
+ if entry.get('perms') == None or \
+ entry.get('owner') == None or \
+ entry.get('group') == None:
+ self.logger.error('Entry %s not completely specified. '
+ 'Try running bcfg2-repo-validate.' % (entry.get('name')))
+ return False
+ while len(entry.get('perms', '')) < 4:
+ entry.set('perms', '0' + entry.get('perms', ''))
+ try:
+ ondisk = os.stat(entry.get('name'))
+ except OSError:
+ entry.set('current_exists', 'false')
+ self.logger.debug("%s %s does not exist" %
+ (entry.tag, entry.get('name')))
+ return False
+ try:
+ owner = str(ondisk[ST_UID])
+ group = str(ondisk[ST_GID])
+ except (OSError, KeyError):
+ self.logger.error('User/Group resolution failed for path %s' % \
+ entry.get('name'))
+ owner = 'root'
+ group = '0'
+ finfo = os.stat(entry.get('name'))
+ perms = oct(finfo[ST_MODE])[-4:]
+ if entry.get('mtime', '-1') != '-1':
+ mtime = str(finfo[ST_MTIME])
+ else:
+ mtime = '-1'
+ pTrue = ((owner == str(normUid(entry))) and
+ (group == str(normGid(entry))) and
+ (perms == entry.get('perms')) and
+ (mtime == entry.get('mtime', '-1')))
+
+ pruneTrue = True
+ ex_ents = []
+ if entry.get('prune', 'false') == 'true' \
+ and (entry.tag == 'Path' and entry.get('type') == 'directory'):
+ # check for any extra entries when prune='true' attribute is set
+ try:
+ entries = ['/'.join([entry.get('name'), ent]) \
+ for ent in os.listdir(entry.get('name'))]
+ ex_ents = [e for e in entries if e not in modlist]
+ if ex_ents:
+ pruneTrue = False
+ self.logger.debug("Directory %s contains extra entries:" % \
+ entry.get('name'))
+ self.logger.debug(ex_ents)
+ nqtext = entry.get('qtext', '') + '\n'
+ nqtext += "Directory %s contains extra entries:" % \
+ entry.get('name')
+ nqtext += ":".join(ex_ents)
+ entry.set('qtest', nqtext)
+ [entry.append(XML.Element('Prune', path=x)) \
+ for x in ex_ents]
+ except OSError:
+ ex_ents = []
+ pruneTrue = True
+
+ if not pTrue:
+ if owner != str(normUid(entry)):
+ entry.set('current_owner', owner)
+ self.logger.debug("%s %s ownership wrong" % \
+ (entry.tag, entry.get('name')))
+ nqtext = entry.get('qtext', '') + '\n'
+ nqtext += "%s owner wrong. is %s should be %s" % \
+ (entry.get('name'), owner, entry.get('owner'))
+ entry.set('qtext', nqtext)
+ if group != str(normGid(entry)):
+ entry.set('current_group', group)
+ self.logger.debug("%s %s group wrong" % \
+ (entry.tag, entry.get('name')))
+ nqtext = entry.get('qtext', '') + '\n'
+ nqtext += "%s group is %s should be %s" % \
+ (entry.get('name'), group, entry.get('group'))
+ entry.set('qtext', nqtext)
+ if perms != entry.get('perms'):
+ entry.set('current_perms', perms)
+ self.logger.debug("%s %s permissions are %s should be %s" %
+ (entry.tag,
+ entry.get('name'),
+ perms,
+ entry.get('perms')))
+ nqtext = entry.get('qtext', '') + '\n'
+ nqtext += "%s %s perms are %s should be %s" % \
+ (entry.tag,
+ entry.get('name'),
+ perms,
+ entry.get('perms'))
+ entry.set('qtext', nqtext)
+ if mtime != entry.get('mtime', '-1'):
+ entry.set('current_mtime', mtime)
+ self.logger.debug("%s %s mtime is %s should be %s" \
+ % (entry.tag, entry.get('name'), mtime,
+ entry.get('mtime')))
+ nqtext = entry.get('qtext', '') + '\n'
+ nqtext += "%s mtime is %s should be %s" % \
+ (entry.get('name'), mtime, entry.get('mtime'))
+ entry.set('qtext', nqtext)
+ if entry.get('type') != 'file':
+ nnqtext = entry.get('qtext')
+ nnqtext += '\nInstall %s %s: (y/N) ' % (entry.get('type'),
+ entry.get('name'))
+ entry.set('qtext', nnqtext)
+ return pTrue and pruneTrue
+
+ def Installdirectory(self, entry):
+ """Install Path type='directory' entry."""
+ if entry.get('perms') == None or \
+ entry.get('owner') == None or \
+ entry.get('group') == None:
+ self.logger.error('Entry %s not completely specified. '
+ 'Try running bcfg2-repo-validate.' % \
+ (entry.get('name')))
+ return False
+ self.logger.info("Installing directory %s" % (entry.get('name')))
+ try:
+ fmode = os.lstat(entry.get('name'))
+ if not S_ISDIR(fmode[ST_MODE]):
+ self.logger.debug("Found a non-directory entry at %s" % \
+ (entry.get('name')))
+ try:
+ os.unlink(entry.get('name'))
+ exists = False
+ except OSError:
+ self.logger.info("Failed to unlink %s" % \
+ (entry.get('name')))
+ return False
+ else:
+ self.logger.debug("Found a pre-existing directory at %s" % \
+ (entry.get('name')))
+ exists = True
+ except OSError:
+ # stat failed
+ exists = False
+
+ if not exists:
+ parent = "/".join(entry.get('name').split('/')[:-1])
+ if parent:
+ try:
+ os.stat(parent)
+ except:
+ self.logger.debug('Creating parent path for directory %s' % (entry.get('name')))
+ for idx in range(len(parent.split('/')[:-1])):
+ current = '/'+'/'.join(parent.split('/')[1:2+idx])
+ try:
+ sloc = os.stat(current)
+ except OSError:
+ try:
+ os.mkdir(current)
+ continue
+ except OSError:
+ return False
+ if not S_ISDIR(sloc[ST_MODE]):
+ try:
+ os.unlink(current)
+ os.mkdir(current)
+ except OSError:
+ return False
+
+ try:
+ os.mkdir(entry.get('name'))
+ except OSError:
+ self.logger.error('Failed to create directory %s' % \
+ (entry.get('name')))
+ return False
+ if entry.get('prune', 'false') == 'true' and entry.get("qtest"):
+ for pent in entry.findall('Prune'):
+ pname = pent.get('path')
+ ulfailed = False
+ if os.path.isdir(pname):
+ self.logger.info("Not removing extra directory %s, "
+ "please check and remove manually" % pname)
+ continue
+ try:
+ self.logger.debug("Unlinking file %s" % pname)
+ os.unlink(pname)
+ except OSError:
+ self.logger.error("Failed to unlink path %s" % pname)
+ ulfailed = True
+ if ulfailed:
+ return False
+ return self.Installpermissions(entry)
+
+ def Verifyfile(self, entry, _):
+ """Verify Path type='file' entry."""
+ # permissions check + content check
+ permissionStatus = self.Verifydirectory(entry, _)
+ tbin = False
+ if entry.get('encoding', 'ascii') == 'base64':
+ tempdata = binascii.a2b_base64(entry.text)
+ tbin = True
+ elif entry.get('empty', 'false') == 'true':
+ tempdata = ''
+ else:
+ if entry.text == None:
+ self.logger.error("Cannot verify incomplete Path type='%s' %s" % \
+ (entry.get('type'), entry.get('name')))
+ return False
+ tempdata = entry.text
+ if type(tempdata) == unicode:
+ try:
+ tempdata = tempdata.encode(self.setup['encoding'])
+ except UnicodeEncodeError, e:
+ self.logger.error("Error encoding file %s:\n %s" % \
+ (entry.get('name'), e))
+ try:
+ content = open(entry.get('name')).read()
+ except IOError, error:
+ if error.strerror == "No such file or directory":
+ # print diff for files that don't exist (yet)
+ content = ''
+ else:
+ self.logger.error("Failed to read %s: %s" % \
+ (error.filename, error.strerror))
+ return False
+ # comparison should be done with fingerprints or
+ # md5sum so it would be faster for big binary files
+ contentStatus = content == tempdata
+ if not contentStatus:
+ try:
+ content.decode('ascii')
+ isstring = True
+ except:
+ isstring = False
+ if tbin or not isstring:
+ entry.set('current_bfile', binascii.b2a_base64(content))
+ nqtext = entry.get('qtext', '')
+ nqtext += '\nBinary file, no printable diff'
+ else:
+ do_diff = True
+ rawdiff = []
+ start = time.time()
+ longtime = False
+ for x in difflib.ndiff(content.split('\n'),
+ tempdata.split('\n')):
+ now = time.time()
+ rawdiff.append(x)
+ if now - start > 5 and not longtime:
+ self.logger.info("Diff of %s taking a long time" % \
+ (entry.get('name')))
+ longtime = True
+ elif now - start > 30:
+ self.logger.error("Diff of %s took too long; giving up" % \
+ (entry.get('name')))
+ do_diff = False
+ break
+ if do_diff:
+ diff = '\n'.join(rawdiff)
+ entry.set("current_bdiff", binascii.b2a_base64(diff))
+# entry.set("current_diff", diff)
+ udiff = '\n'.join([x for x in \
+ difflib.unified_diff(content.split('\n'), \
+ tempdata.split('\n'))])
+ try:
+ eudiff = udiff.encode('ascii')
+ except:
+ eudiff = "Binary file: no diff printed"
+
+ nqtext = entry.get('qtext', '')
+
+ if nqtext:
+ nqtext += '\n'
+ nqtext += eudiff
+ else:
+ entry.set('current_bfile', binascii.b2a_base64(content))
+ nqtext = entry.get('qtext', '')
+ nqtext += '\nDiff took too long to compute, no printable diff'
+ entry.set('qtext', nqtext)
+ qtxt = entry.get('qtext', '')
+ qtxt += "\nInstall %s %s: (y/N): " % (entry.tag, entry.get('name'))
+ entry.set('qtext', qtxt)
+ return contentStatus and permissionStatus
+
+ def Installfile(self, entry):
+ """Install Path type='file' entry."""
+ self.logger.info("Installing file %s" % (entry.get('name')))
+
+ parent = "/".join(entry.get('name').split('/')[:-1])
+ if parent:
+ try:
+ os.stat(parent)
+ except:
+ self.logger.debug('Creating parent path for config file %s' % \
+ (entry.get('name')))
+ current = '/'
+ for next in parent.split('/')[1:]:
+ current += next + '/'
+ try:
+ sloc = os.stat(current)
+ try:
+ if not S_ISDIR(sloc[ST_MODE]):
+ self.logger.debug('%s is not a directory; recreating' \
+ % (current))
+ os.unlink(current)
+ os.mkdir(current)
+ except OSError:
+ return False
+ except OSError:
+ try:
+ self.logger.debug("Creating non-existent path %s" % current)
+ os.mkdir(current)
+ except OSError:
+ return False
+
+ # If we get here, then the parent directory should exist
+ if (entry.get("paranoid", False) == 'true') and \
+ self.setup.get("paranoid", False) and not \
+ (entry.get('current_exists', 'true') == 'false'):
+ bkupnam = entry.get('name').replace('/', '_')
+ # current list of backups for this file
+ bkuplist = [f for f in os.listdir(self.ppath) if
+ f.startswith(bkupnam)]
+ bkuplist.sort()
+ if len(bkuplist) == int(self.max_copies):
+ # remove the oldest backup available
+ oldest = bkuplist.pop(0)
+ self.logger.info("Removing %s" % oldest)
+ try:
+ os.remove("%s/%s" % (self.ppath, oldest))
+ except:
+ self.logger.error("Failed to remove %s/%s" % \
+ (self.ppath, oldest))
+ return False
+ try:
+ # backup existing file
+ shutil.copy(entry.get('name'),
+ "%s/%s_%s" % (self.ppath, bkupnam, datetime.now()))
+ self.logger.info("Backup of %s saved to %s" %
+ (entry.get('name'), self.ppath))
+ except IOError, e:
+ self.logger.error("Failed to create backup file for %s" % \
+ (entry.get('name')))
+ self.logger.error(e)
+ return False
+ try:
+ newfile = open("%s.new"%(entry.get('name')), 'w')
+ if entry.get('encoding', 'ascii') == 'base64':
+ filedata = binascii.a2b_base64(entry.text)
+ elif entry.get('empty', 'false') == 'true':
+ filedata = ''
+ else:
+ if type(entry.text) == unicode:
+ filedata = entry.text.encode(self.setup['encoding'])
+ else:
+ filedata = entry.text
+ newfile.write(filedata)
+ newfile.close()
+ try:
+ os.chown(newfile.name, normUid(entry), normGid(entry))
+ except KeyError:
+ self.logger.error("Failed to chown %s to %s:%s" % \
+ (entry.get('name'), entry.get('owner'),
+ entry.get('group')))
+ os.chown(newfile.name, 0, 0)
+ os.chmod(newfile.name, calcPerms(S_IFREG, entry.get('perms')))
+ os.rename(newfile.name, entry.get('name'))
+ if entry.get('mtime', '-1') != '-1':
+ try:
+ os.utime(entry.get('name'), (int(entry.get('mtime')),
+ int(entry.get('mtime'))))
+ except:
+ self.logger.error("File %s mtime fix failed" \
+ % (entry.get('name')))
+ return False
+ return True
+ except (OSError, IOError), err:
+ if err.errno == errno.EACCES:
+ self.logger.info("Failed to open %s for writing" % (entry.get('name')))
+ else:
+ print(err)
+ return False
+
+ def Verifyhardlink(self, entry, _):
+ """Verify HardLink entry."""
+ if entry.get('to') == None:
+ self.logger.error('Entry %s not completely specified. '
+ 'Try running bcfg2-repo-validate.' % \
+ (entry.get('name')))
+ return False
+ try:
+ if os.path.samefile(entry.get('name'), entry.get('to')):
+ return True
+ self.logger.debug("Hardlink %s is incorrect" % \
+ entry.get('name'))
+ entry.set('qtext', "Link %s to %s? [y/N] " % \
+ (entry.get('name'),
+ entry.get('to')))
+ return False
+ except OSError:
+ entry.set('current_exists', 'false')
+ entry.set('qtext', "Link %s to %s? [y/N] " % \
+ (entry.get('name'),
+ entry.get('to')))
+ return False
+
+ def Installhardlink(self, entry):
+ """Install HardLink entry."""
+ if entry.get('to') == None:
+ self.logger.error('Entry %s not completely specified. '
+ 'Try running bcfg2-repo-validate.' % \
+ (entry.get('name')))
+ return False
+ self.logger.info("Installing Hardlink %s" % (entry.get('name')))
+ if os.path.lexists(entry.get('name')):
+ try:
+ fmode = os.lstat(entry.get('name'))[ST_MODE]
+ if S_ISREG(fmode) or S_ISLNK(fmode):
+ self.logger.debug("Non-directory entry already exists at "
+ "%s. Unlinking entry." % (entry.get('name')))
+ os.unlink(entry.get('name'))
+ elif S_ISDIR(fmode):
+ self.logger.debug("Directory already exists at %s" % \
+ (entry.get('name')))
+ self.cmd.run("mv %s/ %s.bak" % \
+ (entry.get('name'),
+ entry.get('name')))
+ else:
+ os.unlink(entry.get('name'))
+ except OSError:
+ self.logger.info("Hardlink %s cleanup failed" % \
+ (entry.get('name')))
+ try:
+ os.link(entry.get('to'), entry.get('name'))
+ return True
+ except OSError:
+ return False
+
+ def Verifynonexistent(self, entry, _):
+ """Verify nonexistent entry."""
+ # return true if path does _not_ exist
+ return not os.path.lexists(entry.get('name'))
+
+ def Installnonexistent(self, entry):
+ '''Remove nonexistent entries'''
+ try:
+ os.remove(entry.get('name'))
+ return True
+ except OSError:
+ self.logger.error('Failed to remove %s' % entry.get('name'))
+ return False
+
+ def Verifypermissions(self, entry, _):
+ """Verify Path type='permissions' entry"""
+ return self.Verifydirectory(entry, _)
+
+ def Installpermissions(self, entry):
+ """Install POSIX permissions"""
+ if entry.get('perms') == None or \
+ entry.get('owner') == None or \
+ entry.get('group') == None:
+ self.logger.error('Entry %s not completely specified. '
+ 'Try running bcfg2-repo-validate.' % (entry.get('name')))
+ return False
+ try:
+ os.chown(entry.get('name'), normUid(entry), normGid(entry))
+ os.chmod(entry.get('name'), calcPerms(S_IFDIR, entry.get('perms')))
+ return True
+ except (OSError, KeyError):
+ self.logger.error('Permission fixup failed for %s' % \
+ (entry.get('name')))
+ return False
+
+ def Verifysymlink(self, entry, _):
+ """Verify Path type='symlink' entry."""
+ if entry.get('to') == None:
+ self.logger.error('Entry %s not completely specified. '
+ 'Try running bcfg2-repo-validate.' % \
+ (entry.get('name')))
+ return False
+ try:
+ sloc = os.readlink(entry.get('name'))
+ if sloc == entry.get('to'):
+ return True
+ self.logger.debug("Symlink %s points to %s, should be %s" % \
+ (entry.get('name'), sloc, entry.get('to')))
+ entry.set('current_to', sloc)
+ entry.set('qtext', "Link %s to %s? [y/N] " % (entry.get('name'),
+ entry.get('to')))
+ return False
+ except OSError:
+ entry.set('current_exists', 'false')
+ entry.set('qtext', "Link %s to %s? [y/N] " % (entry.get('name'),
+ entry.get('to')))
+ return False
+
+ def Installsymlink(self, entry):
+ """Install Path type='symlink' entry."""
+ if entry.get('to') == None:
+ self.logger.error('Entry %s not completely specified. '
+ 'Try running bcfg2-repo-validate.' % \
+ (entry.get('name')))
+ return False
+ self.logger.info("Installing symlink %s" % (entry.get('name')))
+ if os.path.lexists(entry.get('name')):
+ try:
+ fmode = os.lstat(entry.get('name'))[ST_MODE]
+ if S_ISREG(fmode) or S_ISLNK(fmode):
+ self.logger.debug("Non-directory entry already exists at "
+ "%s. Unlinking entry." % \
+ (entry.get('name')))
+ os.unlink(entry.get('name'))
+ elif S_ISDIR(fmode):
+ self.logger.debug("Directory already exists at %s" %\
+ (entry.get('name')))
+ self.cmd.run("mv %s/ %s.bak" % \
+ (entry.get('name'),
+ entry.get('name')))
+ else:
+ os.unlink(entry.get('name'))
+ except OSError:
+ self.logger.info("Symlink %s cleanup failed" %\
+ (entry.get('name')))
+ try:
+ os.symlink(entry.get('to'), entry.get('name'))
+ return True
+ except OSError:
+ return False
+
+ def InstallPath(self, entry):
+ """Dispatch install to the proper method according to type"""
+ ret = getattr(self, 'Install%s' % entry.get('type'))
+ return ret(entry)
+
+ def VerifyPath(self, entry, _):
+ """Dispatch verify to the proper method according to type"""
+ ret = getattr(self, 'Verify%s' % entry.get('type'))
+ return ret(entry, _)
diff --git a/build/lib/Bcfg2/Client/Tools/Pacman.py b/build/lib/Bcfg2/Client/Tools/Pacman.py
new file mode 100644
index 000000000..be3fb0c94
--- /dev/null
+++ b/build/lib/Bcfg2/Client/Tools/Pacman.py
@@ -0,0 +1,82 @@
+"""This is the bcfg2 support for pacman"""
+
+import Bcfg2.Client.Tools
+
+
+class Pacman(Bcfg2.Client.Tools.PkgTool):
+ '''Archlinux package support'''
+ name = 'Pacman'
+ __execs__ = ["/usr/bin/pacman"]
+ __handles__ = [('Package', 'pacman')]
+ __req__ = {'Package': ['name', 'version']}
+ pkgtype = 'pacman'
+ pkgtool = ("/usr/bin/pacman --needed --noconfirm --noprogressbar")
+
+ def __init__(self, logger, setup, config):
+ Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config)
+ self.installed = {}
+ self.RefreshPackages()
+
+ def RefreshPackages(self):
+ '''Refresh memory hashes of packages'''
+ pkgcache = self.cmd.run("/usr/bin/pacman -Q")[1]
+ self.installed = {}
+ for pkg in pkgcache:
+ pkgname = pkg.split(' ')[0].strip()
+ version = pkg.split(' ')[1].strip()
+ #self.logger.info(" pkgname: %s, version: %s" % (pkgname, version))
+ self.installed[pkgname] = version
+
+ def VerifyPackage(self, entry, modlist):
+ '''Verify Package status for entry'''
+
+ self.logger.info("VerifyPackage : %s : %s" % entry.get('name'),
+ entry.get('version'))
+
+ if not 'version' in entry.attrib:
+ self.logger.info("Cannot verify unversioned package %s" %
+ (entry.attrib['name']))
+ return False
+
+ if entry.attrib['name'] in self.installed:
+ if entry.attrib['version'] == 'auto':
+ return True
+ elif self.installed[entry.attrib['name']] == entry.attrib['version']:
+ #if not self.setup['quick'] and \
+ # entry.get('verify', 'true') == 'true':
+ #FIXME: need to figure out if pacman
+ # allows you to verify packages
+ return True
+ else:
+ entry.set('current_version', self.installed[entry.get('name')])
+ self.logger.info("attribname: %s" % (entry.attrib['name']))
+ self.logger.info("attribname: %s" % (entry.attrib['name']))
+ return False
+ entry.set('current_exists', 'false')
+ self.logger.info("attribname: %s" % (entry.attrib['name']))
+ return False
+
+ def RemovePackages(self, packages):
+ '''Remove extra packages'''
+ names = [pkg.get('name') for pkg in packages]
+ self.logger.info("Removing packages: %s" % " ".join(names))
+ self.cmd.run("%s --noconfirm --noprogressbar -R %s" % \
+ (self.pkgtool, " ".join(names)))
+ self.RefreshPackages()
+ self.extra = self.FindExtraPackages()
+
+ def Install(self, packages, states):
+ '''
+ Pacman Install
+ '''
+ pkgline = ""
+ for pkg in packages:
+ pkgline += " " + pkg.get('name')
+
+ print "packages : " + pkgline
+
+ try:
+ self.logger.debug("Running : %s -S %s" % (self.pkgtool, pkgline))
+ self.cmd.run("%s -S %s" % (self.pkgtool, pkgline))
+ except Exception, e:
+ self.logger.error("Error occurred during installation: %s" % e)
diff --git a/build/lib/Bcfg2/Client/Tools/Portage.py b/build/lib/Bcfg2/Client/Tools/Portage.py
new file mode 100644
index 000000000..58d2aad29
--- /dev/null
+++ b/build/lib/Bcfg2/Client/Tools/Portage.py
@@ -0,0 +1,71 @@
+"""This is the Bcfg2 tool for the Gentoo Portage system."""
+__revision__ = '$Revision$'
+
+import re
+import Bcfg2.Client.Tools
+
+
+class Portage(Bcfg2.Client.Tools.PkgTool):
+ """The Gentoo toolset implements package and service operations and
+ inherits the rest from Toolset.Toolset."""
+ name = 'Portage'
+ __execs__ = ['/usr/bin/emerge', '/usr/bin/equery']
+ __handles__ = [('Package', 'ebuild')]
+ __req__ = {'Package': ['name', 'version']}
+ pkgtype = 'ebuild'
+ # requires a working PORTAGE_BINHOST in make.conf
+ pkgtool = ('emerge --getbinpkgonly %s', ('=%s-%s', ['name', 'version']))
+
+ def __init__(self, logger, cfg, setup):
+ Bcfg2.Client.Tools.PkgTool.__init__(self, logger, cfg, setup)
+ self.__important__ = self.__important__ + ['/etc/make.conf']
+ self.cfg = cfg
+ self.installed = {}
+ self.RefreshPackages()
+
+ def RefreshPackages(self):
+ """Refresh memory hashes of packages."""
+ ret, cache = self.cmd.run("equery -q list")
+ if ret == 2:
+ cache = self.cmd.run("equery -q list '*'")[1]
+ pattern = re.compile('(.*)-(\d.*)')
+ self.installed = {}
+ for pkg in cache:
+ if pattern.match(pkg):
+ name = pattern.match(pkg).group(1)
+ version = pattern.match(pkg).group(2)
+ self.installed[name] = version
+ else:
+ self.logger.info("Failed to parse pkg name %s" % pkg)
+
+ def VerifyPackage(self, entry, modlist):
+ """Verify package for entry."""
+ if not 'version' in entry.attrib:
+ self.logger.info("Cannot verify unversioned package %s" %
+ (entry.attrib['name']))
+ return False
+ if entry.attrib['name'] in self.installed:
+ if self.installed[entry.attrib['name']] == entry.attrib['version']:
+ if not self.setup['quick'] and \
+ entry.get('verify', 'true') == 'true':
+ output = self.cmd.run("/usr/bin/equery check '=%s-%s' 2>&1 |grep '!!!' | awk '{print $2}'" \
+ % (entry.get('name'), entry.get('version')))[1]
+ if [filename for filename in output \
+ if filename not in modlist]:
+ return False
+ return True
+ else:
+ entry.set('current_version', self.installed[entry.get('name')])
+ return False
+ entry.set('current_exists', 'false')
+ return False
+
+ def RemovePackages(self, packages):
+ """Deal with extra configuration detected."""
+ pkgnames = " ".join([pkg.get('name') for pkg in packages])
+ if len(packages) > 0:
+ self.logger.info('Removing packages:')
+ self.logger.info(pkgnames)
+ self.cmd.run("emerge --unmerge --quiet %s" % " ".join(pkgnames.split(' ')))
+ self.RefreshPackages()
+ self.extra = self.FindExtraPackages()
diff --git a/build/lib/Bcfg2/Client/Tools/RPMng.py b/build/lib/Bcfg2/Client/Tools/RPMng.py
new file mode 100644
index 000000000..a1e14b3a6
--- /dev/null
+++ b/build/lib/Bcfg2/Client/Tools/RPMng.py
@@ -0,0 +1,1034 @@
+"""Bcfg2 Support for RPMS"""
+
+__revision__ = '$Revision$'
+
+import ConfigParser
+import os.path
+import rpm
+import rpmtools
+import Bcfg2.Client.Tools
+
+# Fix for python2.3
+try:
+ set
+except NameError:
+ from sets import Set as set
+
+class RPMng(Bcfg2.Client.Tools.PkgTool):
+ """Support for RPM packages."""
+ name = 'RPMng'
+
+ __execs__ = ['/bin/rpm', '/var/lib/rpm']
+ __handles__ = [('Package', 'rpm')]
+
+ __req__ = {'Package': ['name', 'version']}
+ __ireq__ = {'Package': ['url']}
+
+ __new_req__ = {'Package': ['name'], 'Instance': ['version', 'release', 'arch']}
+ __new_ireq__ = {'Package': ['uri'], \
+ 'Instance': ['simplefile']}
+
+ __gpg_req__ = {'Package': ['name', 'version']}
+ __gpg_ireq__ = {'Package': ['name', 'version']}
+
+ __new_gpg_req__ = {'Package': ['name'], 'Instance': ['version', 'release']}
+ __new_gpg_ireq__ = {'Package': ['name'], 'Instance': ['version', 'release']}
+
+ conflicts = ['RPM']
+
+ pkgtype = 'rpm'
+ pkgtool = ("rpm --oldpackage --replacepkgs --quiet -U %s", ("%s", ["url"]))
+
+ def __init__(self, logger, setup, config):
+ Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config)
+
+ # create a global ignore list used when ignoring particular
+ # files during package verification
+ self.ignores = [entry.get('name') for struct in config for entry in struct \
+ if entry.get('type') == 'ignore']
+ self.instance_status = {}
+ self.extra_instances = []
+ self.modlists = {}
+ self.gpg_keyids = self.getinstalledgpg()
+
+ # Process thee RPMng section from the config file.
+ RPMng_CP = ConfigParser.ConfigParser()
+ RPMng_CP.read(self.setup.get('setup'))
+
+ # installonlypackages
+ self.installOnlyPkgs = []
+ if RPMng_CP.has_option(self.name, 'installonlypackages'):
+ for i in RPMng_CP.get(self.name, 'installonlypackages').split(','):
+ self.installOnlyPkgs.append(i.strip())
+ if self.installOnlyPkgs == []:
+ self.installOnlyPkgs = ['kernel', 'kernel-bigmem', 'kernel-enterprise', 'kernel-smp',
+ 'kernel-modules', 'kernel-debug', 'kernel-unsupported',
+ 'kernel-source', 'kernel-devel', 'kernel-default',
+ 'kernel-largesmp-devel', 'kernel-largesmp', 'kernel-xen',
+ 'gpg-pubkey']
+ if 'gpg-pubkey' not in self.installOnlyPkgs:
+ self.installOnlyPkgs.append('gpg-pubkey')
+ self.logger.debug('installOnlyPackages = %s' % self.installOnlyPkgs)
+
+ # erase_flags
+ self.erase_flags = []
+ if RPMng_CP.has_option(self.name, 'erase_flags'):
+ for i in RPMng_CP.get(self.name, 'erase_flags').split(','):
+ self.erase_flags.append(i.strip())
+ if self.erase_flags == []:
+ self.erase_flags = ['allmatches']
+ self.logger.debug('erase_flags = %s' % self.erase_flags)
+
+ # pkg_checks
+ if RPMng_CP.has_option(self.name, 'pkg_checks'):
+ self.pkg_checks = RPMng_CP.get(self.name, 'pkg_checks').lower()
+ else:
+ self.pkg_checks = 'true'
+ self.logger.debug('pkg_checks = %s' % self.pkg_checks)
+
+ # pkg_verify
+ if RPMng_CP.has_option(self.name, 'pkg_verify'):
+ self.pkg_verify = RPMng_CP.get(self.name, 'pkg_verify').lower()
+ else:
+ self.pkg_verify = 'true'
+ self.logger.debug('pkg_verify = %s' % self.pkg_verify)
+
+ # installed_action
+ if RPMng_CP.has_option(self.name, 'installed_action'):
+ self.installed_action = RPMng_CP.get(self.name, 'installed_action').lower()
+ else:
+ self.installed_action = 'install'
+ self.logger.debug('installed_action = %s' % self.installed_action)
+
+ # version_fail_action
+ if RPMng_CP.has_option(self.name, 'version_fail_action'):
+ self.version_fail_action = RPMng_CP.get(self.name, 'version_fail_action').lower()
+ else:
+ self.version_fail_action = 'upgrade'
+ self.logger.debug('version_fail_action = %s' % self.version_fail_action)
+
+ # verify_fail_action
+ if self.name == "RPMng":
+ if RPMng_CP.has_option(self.name, 'verify_fail_action'):
+ self.verify_fail_action = RPMng_CP.get(self.name, 'verify_fail_action').lower()
+ else:
+ self.verify_fail_action = 'reinstall'
+ else: # yum can't reinstall packages.
+ self.verify_fail_action = 'none'
+ self.logger.debug('verify_fail_action = %s' % self.verify_fail_action)
+
+ # version_fail_action
+ if RPMng_CP.has_option(self.name, 'verify_flags'):
+ self.verify_flags = RPMng_CP.get(self.name, 'verify_flags').lower().split(',')
+ else:
+ self.verify_flags = []
+ if '' in self.verify_flags:
+ self.verify_flags.remove('')
+ self.logger.debug('version_fail_action = %s' % self.version_fail_action)
+ # Force a re- prelink of all packages if prelink exists.
+ # Many, if not most package verifies can be caused by out of date prelinking.
+ if os.path.isfile('/usr/sbin/prelink') and not self.setup['dryrun']:
+ cmdrc, output = self.cmd.run('/usr/sbin/prelink -a -mR')
+ if cmdrc == 0:
+ self.logger.debug('Pre-emptive prelink succeeded')
+ else:
+ # FIXME : this is dumb - what if the output is huge?
+ self.logger.error('Pre-emptive prelink failed: %s' % output)
+
+
+ def RefreshPackages(self):
+ """
+ Creates self.installed{} which is a dict of installed packages.
+
+ The dict items are lists of nevra dicts. This loosely matches the
+ config from the server and what rpmtools uses to specify pacakges.
+
+ e.g.
+
+ self.installed['foo'] = [ {'name':'foo', 'epoch':None,
+ 'version':'1', 'release':2,
+ 'arch':'i386'},
+ {'name':'foo', 'epoch':None,
+ 'version':'1', 'release':2,
+ 'arch':'x86_64'} ]
+ """
+ self.installed = {}
+ refresh_ts = rpmtools.rpmtransactionset()
+ # Don't bother with signature checks at this stage. The GPG keys might
+ # not be installed.
+ refresh_ts.setVSFlags(rpm._RPMVSF_NODIGESTS|rpm._RPMVSF_NOSIGNATURES)
+ for nevra in rpmtools.rpmpackagelist(refresh_ts):
+ self.installed.setdefault(nevra['name'], []).append(nevra)
+ if self.setup['debug']:
+ print("The following package instances are installed:")
+ for name, instances in list(self.installed.items()):
+ self.logger.debug(" " + name)
+ for inst in instances:
+ self.logger.debug(" %s" %self.str_evra(inst))
+ refresh_ts.closeDB()
+ del refresh_ts
+
+ def VerifyPackage(self, entry, modlist, pinned_version=None):
+ """
+ Verify Package status for entry.
+ Performs the following:
+ - Checks for the presence of required Package Instances.
+ - Compares the evra 'version' info against self.installed{}.
+ - RPM level package verify (rpm --verify).
+ - Checks for the presence of unrequired package instances.
+
+ Produces the following dict and list for RPMng.Install() to use:
+ For installs/upgrades/fixes of required instances:
+ instance_status = { <Instance Element Object>:
+ { 'installed': True|False,
+ 'version_fail': True|False,
+ 'verify_fail': True|False,
+ 'pkg': <Package Element Object>,
+ 'modlist': [ <filename>, ... ],
+ 'verify' : [ <rpm --verify results> ]
+ }, ......
+ }
+
+ For deletions of unrequired instances:
+ extra_instances = [ <Package Element Object>, ..... ]
+
+ Constructs the text prompts for interactive mode.
+ """
+ instances = [inst for inst in entry if inst.tag == 'Instance' or inst.tag == 'Package']
+ if instances == []:
+ # We have an old style no Instance entry. Convert it to new style.
+ instance = Bcfg2.Client.XML.SubElement(entry, 'Package')
+ for attrib in list(entry.attrib.keys()):
+ instance.attrib[attrib] = entry.attrib[attrib]
+ if self.pkg_checks == 'true' and entry.get('pkg_checks', 'true') == 'true':
+ if 'any' in [entry.get('version'), pinned_version]:
+ version, release = 'any', 'any'
+ elif entry.get('version') == 'auto':
+ if pinned_version != None:
+ version, release = pinned_version.split('-')
+ else:
+ return False
+ else:
+ version, release = entry.get('version').split('-')
+ instance.set('version', version)
+ instance.set('release', release)
+ if entry.get('verify', 'true') == 'false':
+ instance.set('verify', 'false')
+ instances = [ instance ]
+
+ self.logger.debug("Verifying package instances for %s" % entry.get('name'))
+ package_fail = False
+ qtext_versions = ''
+
+ if entry.get('name') in self.installed:
+ # There is at least one instance installed.
+ if self.pkg_checks == 'true' and entry.get('pkg_checks', 'true') == 'true':
+ rpmTs = rpm.TransactionSet()
+ rpmHeader = None
+ for h in rpmTs.dbMatch(rpm.RPMTAG_NAME, entry.get('name')):
+ if rpmHeader is None or rpm.versionCompare(h, rpmHeader) > 0:
+ rpmHeader = h
+ rpmProvides = [ h['provides'] for h in \
+ rpmTs.dbMatch(rpm.RPMTAG_NAME, entry.get('name')) ]
+ rpmIntersection = set(rpmHeader['provides']) & \
+ set(self.installOnlyPkgs)
+ if len(rpmIntersection) > 0:
+ # Packages that should only be installed or removed.
+ # e.g. kernels.
+ self.logger.debug(" Install only package.")
+ for inst in instances:
+ self.instance_status.setdefault(inst, {})['installed'] = False
+ self.instance_status[inst]['version_fail'] = False
+ if inst.tag == 'Package' and len(self.installed[entry.get('name')]) > 1:
+ self.logger.error("WARNING: Multiple instances of package %s are installed." % \
+ (entry.get('name')))
+ for pkg in self.installed[entry.get('name')]:
+ if inst.get('version') == 'any' or self.pkg_vr_equal(inst, pkg) \
+ or self.inst_evra_equal(inst, pkg):
+ if inst.get('version') == 'any':
+ self.logger.error("got any version")
+ self.logger.debug(" %s" % self.str_evra(inst))
+ self.instance_status[inst]['installed'] = True
+
+ if self.pkg_verify == 'true' and \
+ inst.get('pkg_verify', 'true') == 'true':
+ flags = inst.get('verify_flags', '').split(',') + self.verify_flags
+ if pkg.get('gpgkeyid', '')[-8:] not in self.gpg_keyids and \
+ entry.get('name') != 'gpg-pubkey':
+ flags += ['nosignature', 'nodigest']
+ self.logger.debug('WARNING: Package %s %s requires GPG Public key with ID %s'\
+ % (pkg.get('name'), self.str_evra(pkg), \
+ pkg.get('gpgkeyid', '')))
+ self.logger.debug(' Disabling signature check.')
+
+ if self.setup.get('quick', False):
+ if rpmtools.prelink_exists:
+ flags += ['nomd5', 'nosize']
+ else:
+ flags += ['nomd5']
+ self.logger.debug(" verify_flags = %s" % flags)
+
+ if inst.get('verify', 'true') == 'false':
+ self.instance_status[inst]['verify'] = None
+ else:
+ vp_ts = rpmtools.rpmtransactionset()
+ self.instance_status[inst]['verify'] = \
+ rpmtools.rpm_verify( vp_ts, pkg, flags)
+ vp_ts.closeDB()
+ del vp_ts
+
+ if self.instance_status[inst]['installed'] == False:
+ self.logger.info(" Package %s %s not installed." % \
+ (entry.get('name'), self.str_evra(inst)))
+
+ qtext_versions = qtext_versions + 'I(%s) ' % self.str_evra(inst)
+ entry.set('current_exists', 'false')
+ else:
+ # Normal Packages that can be upgraded.
+ for inst in instances:
+ self.instance_status.setdefault(inst, {})['installed'] = False
+ self.instance_status[inst]['version_fail'] = False
+
+ # Only installed packages with the same architecture are
+ # relevant.
+ if inst.get('arch', None) == None:
+ arch_match = self.installed[entry.get('name')]
+ else:
+ arch_match = [pkg for pkg in self.installed[entry.get('name')] \
+ if pkg.get('arch', None) == inst.get('arch', None)]
+
+ if len(arch_match) > 1:
+ self.logger.error("Multiple instances of package %s installed with the same achitecture." % \
+ (entry.get('name')))
+ elif len(arch_match) == 1:
+ # There is only one installed like there should be.
+ # Check that it is the right version.
+ for pkg in arch_match:
+ if inst.get('version') == 'any' or self.pkg_vr_equal(inst, pkg) or \
+ self.inst_evra_equal(inst, pkg):
+ self.logger.debug(" %s" % self.str_evra(inst))
+ self.instance_status[inst]['installed'] = True
+
+ if self.pkg_verify == 'true' and \
+ inst.get('pkg_verify', 'true') == 'true':
+ flags = inst.get('verify_flags', '').split(',') + self.verify_flags
+ if pkg.get('gpgkeyid', '')[-8:] not in self.gpg_keyids and \
+ 'nosignature' not in flags:
+ flags += ['nosignature', 'nodigest']
+ self.logger.info('WARNING: Package %s %s requires GPG Public key with ID %s'\
+ % (pkg.get('name'), self.str_evra(pkg), \
+ pkg.get('gpgkeyid', '')))
+ self.logger.info(' Disabling signature check.')
+
+ if self.setup.get('quick', False):
+ if rpmtools.prelink_exists:
+ flags += ['nomd5', 'nosize']
+ else:
+ flags += ['nomd5']
+ self.logger.debug(" verify_flags = %s" % flags)
+
+ if inst.get('verify', 'true') == 'false':
+ self.instance_status[inst]['verify'] = None
+ else:
+ vp_ts = rpmtools.rpmtransactionset()
+ self.instance_status[inst]['verify'] = \
+ rpmtools.rpm_verify( vp_ts, pkg, flags )
+ vp_ts.closeDB()
+ del vp_ts
+
+ else:
+ # Wrong version installed.
+ self.instance_status[inst]['version_fail'] = True
+ self.logger.info(" Wrong version installed. Want %s, but have %s"\
+ % (self.str_evra(inst), self.str_evra(pkg)))
+
+ qtext_versions = qtext_versions + 'U(%s -> %s) ' % \
+ (self.str_evra(pkg), self.str_evra(inst))
+ elif len(arch_match) == 0:
+ # This instance is not installed.
+ self.instance_status[inst]['installed'] = False
+ self.logger.info(" %s is not installed." % self.str_evra(inst))
+ qtext_versions = qtext_versions + 'I(%s) ' % self.str_evra(inst)
+
+ # Check the rpm verify results.
+ for inst in instances:
+ instance_fail = False
+ # Dump the rpm verify results.
+ #****Write something to format this nicely.*****
+ if self.setup['debug'] and self.instance_status[inst].get('verify', None):
+ self.logger.debug(self.instance_status[inst]['verify'])
+
+ self.instance_status[inst]['verify_fail'] = False
+ if self.instance_status[inst].get('verify', None):
+ if len(self.instance_status[inst].get('verify')) > 1:
+ self.logger.info("WARNING: Verification of more than one package instance.")
+
+ for result in self.instance_status[inst]['verify']:
+
+ # Check header results
+ if result.get('hdr', None):
+ instance_fail = True
+ self.instance_status[inst]['verify_fail'] = True
+
+ # Check dependency results
+ if result.get('deps', None):
+ instance_fail = True
+ self.instance_status[inst]['verify_fail'] = True
+
+ # Check the rpm verify file results against the modlist
+ # and entry and per Instance Ignores.
+ ignores = [ig.get('name') for ig in entry.findall('Ignore')] + \
+ [ig.get('name') for ig in inst.findall('Ignore')] + \
+ self.ignores
+ for file_result in result.get('files', []):
+ if file_result[-1] not in modlist + ignores:
+ instance_fail = True
+ self.instance_status[inst]['verify_fail'] = True
+ else:
+ self.logger.debug(" Modlist/Ignore match: %s" % \
+ (file_result[-1]))
+
+ if instance_fail == True:
+ self.logger.debug("*** Instance %s failed RPM verification ***" % \
+ self.str_evra(inst))
+ qtext_versions = qtext_versions + 'R(%s) ' % self.str_evra(inst)
+ self.modlists[entry] = modlist
+
+ # Attach status structure for return to server for reporting.
+ inst.set('verify_status', str(self.instance_status[inst]))
+
+ if self.instance_status[inst]['installed'] == False or \
+ self.instance_status[inst].get('version_fail', False)== True or \
+ self.instance_status[inst].get('verify_fail', False) == True:
+ package_fail = True
+ self.instance_status[inst]['pkg'] = entry
+ self.modlists[entry] = modlist
+
+ # Find Installed Instances that are not in the Config.
+ extra_installed = self.FindExtraInstances(entry, self.installed[entry.get('name')])
+ if extra_installed != None:
+ package_fail = True
+ self.extra_instances.append(extra_installed)
+ for inst in extra_installed.findall('Instance'):
+ qtext_versions = qtext_versions + 'D(%s) ' % self.str_evra(inst)
+ self.logger.debug("Found Extra Instances %s" % qtext_versions)
+
+ if package_fail == True:
+ self.logger.info(" Package %s failed verification." % \
+ (entry.get('name')))
+ qtext = 'Install/Upgrade/delete Package %s instance(s) - %s (y/N) ' % \
+ (entry.get('name'), qtext_versions)
+ entry.set('qtext', qtext)
+
+ bcfg2_versions = ''
+ for bcfg2_inst in [inst for inst in instances if inst.tag == 'Instance']:
+ bcfg2_versions = bcfg2_versions + '(%s) ' % self.str_evra(bcfg2_inst)
+ if bcfg2_versions != '':
+ entry.set('version', bcfg2_versions)
+ installed_versions = ''
+
+ for installed_inst in self.installed[entry.get('name')]:
+ installed_versions = installed_versions + '(%s) ' % \
+ self.str_evra(installed_inst)
+
+ entry.set('current_version', installed_versions)
+ return False
+
+ else:
+ # There are no Instances of this package installed.
+ self.logger.debug("Package %s has no instances installed" % (entry.get('name')))
+ entry.set('current_exists', 'false')
+ bcfg2_versions = ''
+ for inst in instances:
+ qtext_versions = qtext_versions + 'I(%s) ' % self.str_evra(inst)
+ self.instance_status.setdefault(inst, {})['installed'] = False
+ self.modlists[entry] = modlist
+ self.instance_status[inst]['pkg'] = entry
+ if inst.tag == 'Instance':
+ bcfg2_versions = bcfg2_versions + '(%s) ' % self.str_evra(inst)
+ if bcfg2_versions != '':
+ entry.set('version', bcfg2_versions)
+ entry.set('qtext', "Install Package %s Instance(s) %s? (y/N) " % \
+ (entry.get('name'), qtext_versions))
+
+ return False
+ return True
+
+ def RemovePackages(self, packages):
+ """
+ Remove specified entries.
+
+ packages is a list of Package Entries with Instances generated
+ by FindExtraPackages().
+
+ """
+ self.logger.debug('Running RPMng.RemovePackages()')
+
+ pkgspec_list = []
+ for pkg in packages:
+ for inst in pkg:
+ if pkg.get('name') != 'gpg-pubkey':
+ pkgspec = { 'name':pkg.get('name'),
+ 'epoch':inst.get('epoch', None),
+ 'version':inst.get('version'),
+ 'release':inst.get('release'),
+ 'arch':inst.get('arch') }
+ pkgspec_list.append(pkgspec)
+ else:
+ pkgspec = { 'name':pkg.get('name'),
+ 'version':inst.get('version'),
+ 'release':inst.get('release')}
+ self.logger.info("WARNING: gpg-pubkey package not in configuration %s %s"\
+ % (pkgspec.get('name'), self.str_evra(pkgspec)))
+ self.logger.info(" This package will be deleted in a future version of the RPMng driver.")
+ #pkgspec_list.append(pkg_spec)
+
+ erase_results = rpmtools.rpm_erase(pkgspec_list, self.erase_flags)
+ if erase_results == []:
+ self.modified += packages
+ for pkg in pkgspec_list:
+ self.logger.info("Deleted %s %s" % (pkg.get('name'), self.str_evra(pkg)))
+ else:
+ self.logger.info("Bulk erase failed with errors:")
+ self.logger.debug("Erase results = %s" % erase_results)
+ self.logger.info("Attempting individual erase for each package.")
+ pkgspec_list = []
+ for pkg in packages:
+ pkg_modified = False
+ for inst in pkg:
+ if pkg.get('name') != 'gpg-pubkey':
+ pkgspec = { 'name':pkg.get('name'),
+ 'epoch':inst.get('epoch', None),
+ 'version':inst.get('version'),
+ 'release':inst.get('release'),
+ 'arch':inst.get('arch') }
+ pkgspec_list.append(pkgspec)
+ else:
+ pkgspec = { 'name':pkg.get('name'),
+ 'version':inst.get('version'),
+ 'release':inst.get('release')}
+ self.logger.info("WARNING: gpg-pubkey package not in configuration %s %s"\
+ % (pkgspec.get('name'), self.str_evra(pkgspec)))
+ self.logger.info(" This package will be deleted in a future version of the RPMng driver.")
+ continue # Don't delete the gpg-pubkey packages for now.
+ erase_results = rpmtools.rpm_erase([pkgspec], self.erase_flags)
+ if erase_results == []:
+ pkg_modified = True
+ self.logger.info("Deleted %s %s" % \
+ (pkgspec.get('name'), self.str_evra(pkgspec)))
+ else:
+ self.logger.error("unable to delete %s %s" % \
+ (pkgspec.get('name'), self.str_evra(pkgspec)))
+ self.logger.debug("Failure = %s" % erase_results)
+ if pkg_modified == True:
+ self.modified.append(pkg)
+
+ self.RefreshPackages()
+ self.extra = self.FindExtraPackages()
+
+ def FixInstance(self, instance, inst_status):
+ """"
+ Control if a reinstall of a package happens or not based on the
+ results from RPMng.VerifyPackage().
+
+ Return True to reinstall, False to not reintstall.
+
+ """
+ fix = False
+
+ if inst_status.get('installed', False) == False:
+ if instance.get('installed_action', 'install') == "install" and \
+ self.installed_action == "install":
+ fix = True
+ else:
+ self.logger.debug('Installed Action for %s %s is to not install' % \
+ (inst_status.get('pkg').get('name'),
+ self.str_evra(instance)))
+
+ elif inst_status.get('version_fail', False) == True:
+ if instance.get('version_fail_action', 'upgrade') == "upgrade" and \
+ self.version_fail_action == "upgrade":
+ fix = True
+ else:
+ self.logger.debug('Version Fail Action for %s %s is to not upgrade' % \
+ (inst_status.get('pkg').get('name'),
+ self.str_evra(instance)))
+
+ elif inst_status.get('verify_fail', False) == True and self.name == "RPMng":
+ # yum can't reinstall packages so only do this for rpm.
+ if instance.get('verify_fail_action', 'reinstall') == "reinstall" and \
+ self.verify_fail_action == "reinstall":
+ for inst in inst_status.get('verify'):
+ # This needs to be a for loop rather than a straight get()
+ # because the underlying routines handle multiple packages
+ # and return a list of results.
+ self.logger.debug('reinstall_check: %s %s:%s-%s.%s' % inst.get('nevra'))
+
+ if inst.get("hdr", False):
+ fix = True
+
+ elif inst.get('files', False):
+ # Parse rpm verify file results
+ for file_result in inst.get('files', []):
+ self.logger.debug('reinstall_check: file: %s' % file_result)
+ if file_result[-2] != 'c':
+ fix = True
+ break
+
+ # Shouldn't really need this, but included for clarity.
+ elif inst.get("deps", False):
+ fix = False
+ else:
+ self.logger.debug('Verify Fail Action for %s %s is to not reinstall' % \
+ (inst_status.get('pkg').get('name'),
+ self.str_evra(instance)))
+
+ return fix
+
+ def Install(self, packages, states):
+ """
+ Try and fix everything that RPMng.VerifyPackages() found wrong for
+ each Package Entry. This can result in individual RPMs being
+ installed (for the first time), reinstalled, deleted, downgraded
+ or upgraded.
+
+ packages is a list of Package Elements that has
+ states[<Package Element>] == False
+
+ The following effects occur:
+ - states{} is conditionally updated for each package.
+ - self.installed{} is rebuilt, possibly multiple times.
+ - self.instance_statusi{} is conditionally updated for each instance
+ of a package.
+ - Each package will be added to self.modified[] if its states{}
+ entry is set to True.
+
+ """
+ self.logger.info('Runing RPMng.Install()')
+
+ install_only_pkgs = []
+ gpg_keys = []
+ upgrade_pkgs = []
+
+ # Remove extra instances.
+ # Can not reverify because we don't have a package entry.
+ if len(self.extra_instances) > 0:
+ if (self.setup.get('remove') == 'all' or \
+ self.setup.get('remove') == 'packages') and\
+ not self.setup.get('dryrun'):
+ self.RemovePackages(self.extra_instances)
+ else:
+ self.logger.info("The following extra package instances will be removed by the '-r' option:")
+ for pkg in self.extra_instances:
+ for inst in pkg:
+ self.logger.info(" %s %s" % (pkg.get('name'), self.str_evra(inst)))
+
+ # Figure out which instances of the packages actually need something
+ # doing to them and place in the appropriate work 'queue'.
+ for pkg in packages:
+ for inst in [instn for instn in pkg if instn.tag \
+ in ['Instance', 'Package']]:
+ if self.FixInstance(inst, self.instance_status[inst]):
+ if pkg.get('name') == 'gpg-pubkey':
+ gpg_keys.append(inst)
+ elif pkg.get('name') in self.installOnlyPkgs:
+ install_only_pkgs.append(inst)
+ else:
+ upgrade_pkgs.append(inst)
+
+ # Fix installOnlyPackages
+ if len(install_only_pkgs) > 0:
+ self.logger.info("Attempting to install 'install only packages'")
+ install_args = " ".join([os.path.join(self.instance_status[inst].get('pkg').get('uri'), \
+ inst.get('simplefile')) \
+ for inst in install_only_pkgs])
+ self.logger.debug("rpm --install --quiet --oldpackage %s" % install_args)
+ cmdrc, output = self.cmd.run("rpm --install --quiet --oldpackage --replacepkgs %s" % \
+ install_args)
+ if cmdrc == 0:
+ # The rpm command succeeded. All packages installed.
+ self.logger.info("Single Pass for InstallOnlyPkgs Succeded")
+ self.RefreshPackages()
+
+ else:
+ # The rpm command failed. No packages installed.
+ # Try installing instances individually.
+ self.logger.error("Single Pass for InstallOnlyPackages Failed")
+ installed_instances = []
+ for inst in install_only_pkgs:
+ install_args = os.path.join(self.instance_status[inst].get('pkg').get('uri'), \
+ inst.get('simplefile'))
+ self.logger.debug("rpm --install --quiet --oldpackage %s" % install_args)
+ cmdrc, output = self.cmd.run("rpm --install --quiet --oldpackage --replacepkgs %s" % \
+ install_args)
+ if cmdrc == 0:
+ installed_instances.append(inst)
+ else:
+ self.logger.debug("InstallOnlyPackage %s %s would not install." % \
+ (self.instance_status[inst].get('pkg').get('name'), \
+ self.str_evra(inst)))
+
+ install_pkg_set = set([self.instance_status[inst].get('pkg') \
+ for inst in install_only_pkgs])
+ self.RefreshPackages()
+
+ # Install GPG keys.
+ if len(gpg_keys) > 0:
+ for inst in gpg_keys:
+ self.logger.info("Installing GPG keys.")
+ key_arg = os.path.join(self.instance_status[inst].get('pkg').get('uri'), \
+ inst.get('simplefile'))
+ cmdrc, output = self.cmd.run("rpm --import %s" % key_arg)
+ if cmdrc != 0:
+ self.logger.debug("Unable to install %s-%s" % \
+ (self.instance_status[inst].get('pkg').get('name'), \
+ self.str_evra(inst)))
+ else:
+ self.logger.debug("Installed %s-%s-%s" % \
+ (self.instance_status[inst].get('pkg').get('name'), \
+ inst.get('version'), inst.get('release')))
+ self.RefreshPackages()
+ self.gpg_keyids = self.getinstalledgpg()
+ pkg = self.instance_status[gpg_keys[0]].get('pkg')
+ states[pkg] = self.VerifyPackage(pkg, [])
+
+ # Fix upgradeable packages.
+ if len(upgrade_pkgs) > 0:
+ self.logger.info("Attempting to upgrade packages")
+ upgrade_args = " ".join([os.path.join(self.instance_status[inst].get('pkg').get('uri'), \
+ inst.get('simplefile')) \
+ for inst in upgrade_pkgs])
+ cmdrc, output = self.cmd.run("rpm --upgrade --quiet --oldpackage --replacepkgs %s" % \
+ upgrade_args)
+ if cmdrc == 0:
+ # The rpm command succeeded. All packages upgraded.
+ self.logger.info("Single Pass for Upgraded Packages Succeded")
+ upgrade_pkg_set = set([self.instance_status[inst].get('pkg') \
+ for inst in upgrade_pkgs])
+ self.RefreshPackages()
+ else:
+ # The rpm command failed. No packages upgraded.
+ # Try upgrading instances individually.
+ self.logger.error("Single Pass for Upgrading Packages Failed")
+ upgraded_instances = []
+ for inst in upgrade_pkgs:
+ upgrade_args = os.path.join(self.instance_status[inst].get('pkg').get('uri'), \
+ inst.get('simplefile'))
+ #self.logger.debug("rpm --upgrade --quiet --oldpackage --replacepkgs %s" % \
+ # upgrade_args)
+ cmdrc, output = self.cmd.run("rpm --upgrade --quiet --oldpackage --replacepkgs %s" % upgrade_args)
+ if cmdrc == 0:
+ upgraded_instances.append(inst)
+ else:
+ self.logger.debug("Package %s %s would not upgrade." % \
+ (self.instance_status[inst].get('pkg').get('name'), \
+ self.str_evra(inst)))
+
+ upgrade_pkg_set = set([self.instance_status[inst].get('pkg') \
+ for inst in upgrade_pkgs])
+ self.RefreshPackages()
+
+ if not self.setup['kevlar']:
+ for pkg_entry in packages:
+ self.logger.debug("Reverifying Failed Package %s" % (pkg_entry.get('name')))
+ states[pkg_entry] = self.VerifyPackage(pkg_entry, \
+ self.modlists.get(pkg_entry, []))
+
+ for entry in [ent for ent in packages if states[ent]]:
+ self.modified.append(entry)
+
+ def canInstall(self, entry):
+ """Test if entry has enough information to be installed."""
+ if not self.handlesEntry(entry):
+ return False
+
+ if 'failure' in entry.attrib:
+ self.logger.error("Cannot install entry %s:%s with bind failure" % \
+ (entry.tag, entry.get('name')))
+ return False
+
+
+ instances = entry.findall('Instance')
+
+ # If the entry wasn't verifiable, then we really don't want to try and fix something
+ # that we don't know is broken.
+ if not self.canVerify(entry):
+ self.logger.debug("WARNING: Package %s was not verifiable, not passing to Install()" \
+ % entry.get('name'))
+ return False
+
+ if not instances:
+ # Old non Instance format, unmodified.
+ if entry.get('name') == 'gpg-pubkey':
+ # gpg-pubkey packages aren't really pacakges, so we have to do
+ # something a little different.
+ # Check that the Package Level has what we need for verification.
+ if [attr for attr in self.__gpg_ireq__[entry.tag] if attr not in entry.attrib]:
+ self.logger.error("Incomplete information for entry %s:%s; cannot install" \
+ % (entry.tag, entry.get('name')))
+ return False
+ else:
+ if [attr for attr in self.__ireq__[entry.tag] if attr not in entry.attrib]:
+ self.logger.error("Incomplete information for entry %s:%s; cannot install" \
+ % (entry.tag, entry.get('name')))
+ return False
+ else:
+ if entry.get('name') == 'gpg-pubkey':
+ # gpg-pubkey packages aren't really pacakges, so we have to do
+ # something a little different.
+ # Check that the Package Level has what we need for verification.
+ if [attr for attr in self.__new_gpg_ireq__[entry.tag] if attr not in entry.attrib]:
+ self.logger.error("Incomplete information for entry %s:%s; cannot install" \
+ % (entry.tag, entry.get('name')))
+ return False
+ # Check that the Instance Level has what we need for verification.
+ for inst in instances:
+ if [attr for attr in self.__new_gpg_ireq__[inst.tag] \
+ if attr not in inst.attrib]:
+ self.logger.error("Incomplete information for entry %s:%s; cannot install"\
+ % (inst.tag, entry.get('name')))
+ return False
+ else:
+ # New format with Instances.
+ # Check that the Package Level has what we need for verification.
+ if [attr for attr in self.__new_ireq__[entry.tag] if attr not in entry.attrib]:
+ self.logger.error("Incomplete information for entry %s:%s; cannot install" \
+ % (entry.tag, entry.get('name')))
+ self.logger.error(" Required attributes that may not be present are %s" \
+ % (self.__new_ireq__[entry.tag]))
+ return False
+ # Check that the Instance Level has what we need for verification.
+ for inst in instances:
+ if inst.tag == 'Instance':
+ if [attr for attr in self.__new_ireq__[inst.tag] \
+ if attr not in inst.attrib]:
+ self.logger.error("Incomplete information for %s of package %s; cannot install" \
+ % (inst.tag, entry.get('name')))
+ self.logger.error(" Required attributes that may not be present are %s" \
+ % (self.__new_ireq__[inst.tag]))
+ return False
+ return True
+
+ def canVerify(self, entry):
+ """
+ Test if entry has enough information to be verified.
+
+ Three types of entries are checked.
+ Old style Package
+ New style Package with Instances
+ pgp-pubkey packages
+
+ Also the old style entries get modified after the first
+ VerifyPackage() run, so there needs to be a second test.
+
+ """
+ if not self.handlesEntry(entry):
+ return False
+
+ if 'failure' in entry.attrib:
+ self.logger.error("Entry %s:%s reports bind failure: %s" % \
+ (entry.tag, entry.get('name'), entry.get('failure')))
+ return False
+
+ # We don't want to do any checks so we don't care what the entry has in it.
+ if self.pkg_checks == 'false' or \
+ entry.get('pkg_checks', 'true').lower() == 'false':
+ return True
+
+ instances = entry.findall('Instance')
+
+ if not instances:
+ # Old non Instance format, unmodified.
+ if entry.get('name') == 'gpg-pubkey':
+ # gpg-pubkey packages aren't really pacakges, so we have to do
+ # something a little different.
+ # Check that the Package Level has what we need for verification.
+ if [attr for attr in self.__gpg_req__[entry.tag] if attr not in entry.attrib]:
+ self.logger.error("Incomplete information for entry %s:%s; cannot verify" \
+ % (entry.tag, entry.get('name')))
+ return False
+ elif entry.tag == 'Path' and entry.get('type') == 'ignore':
+ # ignored Paths are only relevant during failed package
+ # verification
+ pass
+ else:
+ if [attr for attr in self.__req__[entry.tag] if attr not in entry.attrib]:
+ self.logger.error("Incomplete information for entry %s:%s; cannot verify" \
+ % (entry.tag, entry.get('name')))
+ return False
+ else:
+ if entry.get('name') == 'gpg-pubkey':
+ # gpg-pubkey packages aren't really pacakges, so we have to do
+ # something a little different.
+ # Check that the Package Level has what we need for verification.
+ if [attr for attr in self.__new_gpg_req__[entry.tag] if attr not in entry.attrib]:
+ self.logger.error("Incomplete information for entry %s:%s; cannot verify" \
+ % (entry.tag, entry.get('name')))
+ return False
+ # Check that the Instance Level has what we need for verification.
+ for inst in instances:
+ if [attr for attr in self.__new_gpg_req__[inst.tag] \
+ if attr not in inst.attrib]:
+ self.logger.error("Incomplete information for entry %s:%s; cannot verify" \
+ % (inst.tag, inst.get('name')))
+ return False
+ else:
+ # New format with Instances, or old style modified.
+ # Check that the Package Level has what we need for verification.
+ if [attr for attr in self.__new_req__[entry.tag] if attr not in entry.attrib]:
+ self.logger.error("Incomplete information for entry %s:%s; cannot verify" \
+ % (entry.tag, entry.get('name')))
+ return False
+ # Check that the Instance Level has what we need for verification.
+ for inst in instances:
+ if inst.tag == 'Instance':
+ if [attr for attr in self.__new_req__[inst.tag] \
+ if attr not in inst.attrib]:
+ self.logger.error("Incomplete information for entry %s:%s; cannot verify" \
+ % (inst.tag, inst.get('name')))
+ return False
+ return True
+
+ def FindExtraPackages(self):
+ """Find extra packages."""
+ packages = [entry.get('name') for entry in self.getSupportedEntries()]
+ extras = []
+
+ for (name, instances) in list(self.installed.items()):
+ if name not in packages:
+ extra_entry = Bcfg2.Client.XML.Element('Package', name=name, type=self.pkgtype)
+ for installed_inst in instances:
+ if self.setup['extra']:
+ self.logger.info("Extra Package %s %s." % \
+ (name, self.str_evra(installed_inst)))
+ tmp_entry = Bcfg2.Client.XML.SubElement(extra_entry, 'Instance', \
+ version = installed_inst.get('version'), \
+ release = installed_inst.get('release'))
+ if installed_inst.get('epoch', None) != None:
+ tmp_entry.set('epoch', str(installed_inst.get('epoch')))
+ if installed_inst.get('arch', None) != None:
+ tmp_entry.set('arch', installed_inst.get('arch'))
+ extras.append(extra_entry)
+ return extras
+
+
+ def FindExtraInstances(self, pkg_entry, installed_entry):
+ """
+ Check for installed instances that are not in the config.
+ Return a Package Entry with Instances to remove, or None if there
+ are no Instances to remove.
+
+ """
+ name = pkg_entry.get('name')
+ extra_entry = Bcfg2.Client.XML.Element('Package', name=name, type=self.pkgtype)
+ instances = [inst for inst in pkg_entry if inst.tag == 'Instance' or inst.tag == 'Package']
+ if name in self.installOnlyPkgs:
+ for installed_inst in installed_entry:
+ not_found = True
+ for inst in instances:
+ if self.pkg_vr_equal(inst, installed_inst) or \
+ self.inst_evra_equal(inst, installed_inst):
+ not_found = False
+ break
+ if not_found == True:
+ # Extra package.
+ self.logger.info("Extra InstallOnlyPackage %s %s." % \
+ (name, self.str_evra(installed_inst)))
+ tmp_entry = Bcfg2.Client.XML.SubElement(extra_entry, 'Instance', \
+ version = installed_inst.get('version'), \
+ release = installed_inst.get('release'))
+ if installed_inst.get('epoch', None) != None:
+ tmp_entry.set('epoch', str(installed_inst.get('epoch')))
+ if installed_inst.get('arch', None) != None:
+ tmp_entry.set('arch', installed_inst.get('arch'))
+ else:
+ # Normal package, only check arch.
+ for installed_inst in installed_entry:
+ not_found = True
+ for inst in instances:
+ if installed_inst.get('arch', None) == inst.get('arch', None) or\
+ inst.tag == 'Package':
+ not_found = False
+ break
+ if not_found:
+ self.logger.info("Extra Normal Package Instance %s %s" % \
+ (name, self.str_evra(installed_inst)))
+ tmp_entry = Bcfg2.Client.XML.SubElement(extra_entry, 'Instance', \
+ version = installed_inst.get('version'), \
+ release = installed_inst.get('release'))
+ if installed_inst.get('epoch', None) != None:
+ tmp_entry.set('epoch', str(installed_inst.get('epoch')))
+ if installed_inst.get('arch', None) != None:
+ tmp_entry.set('arch', installed_inst.get('arch'))
+
+ if len(extra_entry) == 0:
+ extra_entry = None
+
+ return extra_entry
+
+ def str_evra(self, instance):
+ """Convert evra dict entries to a string."""
+ if instance.get('epoch', '*') in ['*', None]:
+ return '%s-%s.%s' % (instance.get('version', '*'),
+ instance.get('release', '*'),
+ instance.get('arch', '*'))
+ else:
+ return '%s:%s-%s.%s' % (instance.get('epoch', '*'),
+ instance.get('version', '*'),
+ instance.get('release', '*'),
+ instance.get('arch', '*'))
+
+ def pkg_vr_equal(self, config_entry, installed_entry):
+ '''
+ Compare old style entry to installed entry. Which means ignore
+ the epoch and arch.
+ '''
+ if (config_entry.tag == 'Package' and \
+ config_entry.get('version') == installed_entry.get('version') and \
+ config_entry.get('release') == installed_entry.get('release')):
+ return True
+ else:
+ return False
+
+ def inst_evra_equal(self, config_entry, installed_entry):
+ """Compare new style instance to installed entry."""
+
+ if config_entry.get('epoch', None) != None:
+ epoch = int(config_entry.get('epoch'))
+ else:
+ epoch = None
+
+ if (config_entry.tag == 'Instance' and \
+ (epoch == installed_entry.get('epoch', 0) or \
+ (epoch == 0 and installed_entry.get('epoch', 0) == None) or \
+ (epoch == None and installed_entry.get('epoch', 0) == 0)) and \
+ config_entry.get('version') == installed_entry.get('version') and \
+ config_entry.get('release') == installed_entry.get('release') and \
+ config_entry.get('arch', None) == installed_entry.get('arch', None)):
+ return True
+ else:
+ return False
+
+ def getinstalledgpg(self):
+ """
+ Create a list of installed GPG key IDs.
+
+ The pgp-pubkey package version is the least significant 4 bytes
+ (big-endian) of the key ID which is good enough for our purposes.
+
+ """
+ init_ts = rpmtools.rpmtransactionset()
+ init_ts.setVSFlags(rpm._RPMVSF_NODIGESTS|rpm._RPMVSF_NOSIGNATURES)
+ gpg_hdrs = rpmtools.getheadersbykeyword(init_ts, **{'name':'gpg-pubkey'})
+ keyids = [ header[rpm.RPMTAG_VERSION] for header in gpg_hdrs]
+ keyids.append('None')
+ init_ts.closeDB()
+ del init_ts
+ return keyids
+
+ def VerifyPath(self, entry, _):
+ """
+ We don't do anything here since all
+ Paths are processed in __init__
+ """
+ return True
diff --git a/build/lib/Bcfg2/Client/Tools/RcUpdate.py b/build/lib/Bcfg2/Client/Tools/RcUpdate.py
new file mode 100644
index 000000000..159172b78
--- /dev/null
+++ b/build/lib/Bcfg2/Client/Tools/RcUpdate.py
@@ -0,0 +1,93 @@
+"""This is rc-update support."""
+__revision__ = '$Revision$'
+
+import os
+import Bcfg2.Client.Tools
+import Bcfg2.Client.XML
+
+
+class RcUpdate(Bcfg2.Client.Tools.SvcTool):
+ """RcUpdate support for Bcfg2."""
+ name = 'RcUpdate'
+ __execs__ = ['/sbin/rc-update', '/bin/rc-status']
+ __handles__ = [('Service', 'rc-update')]
+ __req__ = {'Service': ['name', 'status']}
+
+ def VerifyService(self, entry, _):
+ """
+ Verify Service status for entry.
+ Assumes we run in the "default" runlevel.
+
+ """
+ # check if service is enabled
+ cmd = '/sbin/rc-update show default | grep %s'
+ rc = self.cmd.run(cmd % entry.get('name'))[0]
+ is_enabled = (rc == 0)
+
+ if entry.get('mode', 'default') == 'supervised':
+ # check if init script exists
+ try:
+ os.stat('/etc/init.d/%s' % entry.get('name'))
+ except OSError:
+ self.logger.debug('Init script for service %s does not exist' %
+ entry.get('name'))
+ return False
+
+ # check if service is enabled
+ cmd = '/etc/init.d/%s status | grep started'
+ rc = self.cmd.run(cmd % entry.attrib['name'])[0]
+ is_running = (rc == 0)
+ else:
+ # we don't care
+ is_running = is_enabled
+
+ if entry.get('status') == 'on' and not (is_enabled and is_running):
+ entry.set('current_status', 'off')
+ return False
+
+ elif entry.get('status') == 'off' and (is_enabled or is_running):
+ entry.set('current_status', 'on')
+ return False
+
+ return True
+
+ def InstallService(self, entry):
+ """
+ Install Service entry
+ In supervised mode we also take care it's (not) running.
+
+ """
+ self.logger.info('Installing Service %s' % entry.get('name'))
+ if entry.get('status') == 'on':
+ # make sure it's running if in supervised mode
+ if entry.get('mode', 'default') == 'supervised' \
+ and entry.get('current_status') == 'off':
+ self.start_service(entry)
+ # make sure it's enabled
+ cmd = '/sbin/rc-update add %s default'
+ rc = self.cmd.run(cmd % entry.get('name'))[0]
+ return (rc == 0)
+
+ elif entry.get('status') == 'off':
+ # make sure it's not running if in supervised mode
+ if entry.get('mode', 'default') == 'supervised' \
+ and entry.get('current_status') == 'on':
+ self.stop_service(entry)
+ # make sure it's disabled
+ cmd = '/sbin/rc-update del %s default'
+ rc = self.cmd.run(cmd % entry.get('name'))[0]
+ return (rc == 0)
+
+ return False
+
+ def FindExtra(self):
+ """Locate extra rc-update services."""
+ cmd = '/bin/rc-status -s | grep started'
+ allsrv = [line.split()[0] for line in self.cmd.run(cmd)[1]]
+ self.logger.debug('Found active services:')
+ self.logger.debug(allsrv)
+ specified = [srv.get('name') for srv in self.getSupportedEntries()]
+ return [Bcfg2.Client.XML.Element('Service',
+ type='rc-update',
+ name=name) \
+ for name in allsrv if name not in specified]
diff --git a/build/lib/Bcfg2/Client/Tools/SMF.py b/build/lib/Bcfg2/Client/Tools/SMF.py
new file mode 100644
index 000000000..f0bc6bd05
--- /dev/null
+++ b/build/lib/Bcfg2/Client/Tools/SMF.py
@@ -0,0 +1,132 @@
+"""SMF support for Bcfg2"""
+__revision__ = '$Revision$'
+
+import glob
+import os
+
+import Bcfg2.Client.Tools
+
+
+class SMF(Bcfg2.Client.Tools.SvcTool):
+ """Support for Solaris SMF Services."""
+ __handles__ = [('Service', 'smf')]
+ __execs__ = ['/usr/sbin/svcadm', '/usr/bin/svcs']
+ name = 'SMF'
+ __req__ = {'Service': ['name', 'status']}
+ __ireq__ = {'Service': ['name', 'status', 'FMRI']}
+
+ def get_svc_command(self, service, action):
+ if service.get('type') == 'lrc':
+ return Bcfg2.Client.Tools.SvcTool.get_svc_command(self,
+ service, action)
+ if action == 'stop':
+ return "/usr/sbin/svcadm disable %s" % (service.get('FMRI'))
+ elif action == 'restart':
+ return "/usr/sbin/svcadm restart %s" % (service.get('FMRI'))
+ elif action == 'start':
+ return "/usr/sbin/svcadm enable %s" % (service.get('FMRI'))
+
+ def GetFMRI(self, entry):
+ """Perform FMRI resolution for service."""
+ if not 'FMRI' in entry.attrib:
+ name = self.cmd.run("/usr/bin/svcs -H -o FMRI %s 2>/dev/null" % \
+ entry.get('name'))[1]
+ if name:
+ entry.set('FMRI', name[0])
+ return True
+ else:
+ self.logger.info('Failed to locate FMRI for service %s' % \
+ entry.get('name'))
+ return False
+ return True
+
+ def VerifyService(self, entry, _):
+ """Verify SMF Service entry."""
+ if not self.GetFMRI(entry):
+ self.logger.error("smf service %s doesn't have FMRI set" % \
+ entry.get('name'))
+ return False
+ if entry.get('FMRI').startswith('lrc'):
+ filename = entry.get('FMRI').split('/')[-1]
+ # this is a legacy service
+ gname = "/etc/rc*.d/%s" % filename
+ files = glob.glob(gname.replace('_', '.'))
+ if files:
+ self.logger.debug("Matched %s with %s" % \
+ (entry.get("FMRI"), ":".join(files)))
+ return entry.get('status') == 'on'
+ else:
+ self.logger.debug("No service matching %s" % \
+ (entry.get("FMRI")))
+ return entry.get('status') == 'off'
+ try:
+ srvdata = self.cmd.run("/usr/bin/svcs -H -o STA %s" % \
+ entry.get('FMRI'))[1][0].split()
+ except IndexError:
+ # Occurs when no lines are returned (service not installed)
+ return False
+
+ if entry.get('status') == 'on':
+ return srvdata[0] == 'ON'
+ else:
+ return srvdata[0] in ['OFF', 'UN', 'MNT', 'DIS', 'DGD']
+
+ def InstallService(self, entry):
+ """Install SMF Service entry."""
+ self.logger.info("Installing Service %s" % (entry.get('name')))
+ if entry.get('status') == 'off':
+ if entry.get("FMRI").startswith('lrc'):
+ try:
+ loc = entry.get("FMRI")[4:].replace('_', '.')
+ self.logger.debug("Renaming file %s to %s" % \
+ (loc, loc.replace('/S', '/DISABLED.S')))
+ os.rename(loc, loc.replace('/S', '/DISABLED.S'))
+ return True
+ except OSError:
+ self.logger.error("Failed to rename init script %s" % \
+ (loc))
+ return False
+ else:
+ cmdrc = self.cmd.run("/usr/sbin/svcadm disable %s" % \
+ (entry.get('FMRI')))[0]
+ else:
+ if entry.get('FMRI').startswith('lrc'):
+ loc = entry.get("FMRI")[4:].replace('_', '.')
+ try:
+ os.stat(loc.replace('/S', '/Disabled.'))
+ self.logger.debug("Renaming file %s to %s" % \
+ (loc.replace('/S', '/DISABLED.S'), loc))
+ os.rename(loc.replace('/S', '/DISABLED.S'), loc)
+ cmdrc = 0
+ except OSError:
+ self.logger.debug("Failed to rename %s to %s" % \
+ (loc.replace('/S', '/DISABLED.S'), loc))
+ cmdrc = 1
+ else:
+ srvdata = self.cmd.run("/usr/bin/svcs -H -o STA %s" %
+ entry.get('FMRI'))[1] [0].split()
+ if srvdata[0] == 'MNT':
+ cmdarg = 'clear'
+ else:
+ cmdarg = 'enable'
+ cmdrc = self.cmd.run("/usr/sbin/svcadm %s -r %s" % \
+ (cmdarg, entry.get('FMRI')))[0]
+ return cmdrc == 0
+
+ def Remove(self, svcs):
+ """Remove Extra SMF entries."""
+ # Extra service entry removal is nonsensical
+ # Extra service entries should be reflected in config, even if disabled
+ pass
+
+ def FindExtra(self):
+ """Find Extra SMF Services."""
+ allsrv = [name for name, version in \
+ [srvc.split() for srvc in
+ self.cmd.run("/usr/bin/svcs -a -H -o FMRI,STATE")[1]]
+ if version != 'disabled']
+
+ [allsrv.remove(svc.get('FMRI')) for svc in self.getSupportedEntries() \
+ if svc.get("FMRI") in allsrv]
+ return [Bcfg2.Client.XML.Element("Service", type='smf', name=name) \
+ for name in allsrv]
diff --git a/build/lib/Bcfg2/Client/Tools/SYSV.py b/build/lib/Bcfg2/Client/Tools/SYSV.py
new file mode 100644
index 000000000..b5e1f1c59
--- /dev/null
+++ b/build/lib/Bcfg2/Client/Tools/SYSV.py
@@ -0,0 +1,107 @@
+# This is the bcfg2 support for solaris sysv packages
+"""This provides bcfg2 support for Solaris SYSV packages."""
+__revision__ = '$Revision$'
+
+import tempfile
+
+import Bcfg2.Client.Tools
+import Bcfg2.Client.XML
+
+
+noask = '''
+mail=
+instance=overwrite
+partial=nocheck
+runlevel=nocheck
+idepend=nocheck
+rdepend=nocheck
+space=ask
+setuid=nocheck
+conflict=nocheck
+action=nocheck
+basedir=default
+'''
+
+
+class SYSV(Bcfg2.Client.Tools.PkgTool):
+ """Solaris SYSV package support."""
+ __execs__ = ["/usr/sbin/pkgadd", "/usr/bin/pkginfo"]
+ __handles__ = [('Package', 'sysv')]
+ __req__ = {'Package': ['name', 'version']}
+ __ireq__ = {'Package': ['name', 'url', 'version']}
+ name = 'SYSV'
+ pkgtype = 'sysv'
+ pkgtool = ("/usr/sbin/pkgadd %s -n -d %%s", (('%s %s', ['url', 'name'])))
+
+ def __init__(self, logger, setup, config):
+ Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config)
+ # noaskfile needs to live beyond __init__ otherwise file is removed
+ self.noaskfile = tempfile.NamedTemporaryFile()
+ self.noaskname = self.noaskfile.name
+ try:
+ self.noaskfile.write(noask)
+ # flush admin file contents to disk
+ self.noaskfile.flush()
+ self.pkgtool = (self.pkgtool[0] % ("-a %s" % (self.noaskname)), \
+ self.pkgtool[1])
+ except:
+ self.pkgtool = (self.pkgtool[0] % (""), self.pkgtool[1])
+
+ def RefreshPackages(self):
+ """Refresh memory hashes of packages."""
+ self.installed = {}
+ # Build list of packages
+ lines = self.cmd.run("/usr/bin/pkginfo -x")[1]
+ while lines:
+ # Splitting on whitespace means that packages with spaces in
+ # their version numbers don't work right. Found this with
+ # IBM TSM software with package versions like
+ # "Version 6 Release 1 Level 0.0"
+ # Should probably be done with a regex but this works.
+ version = lines.pop().split(') ')[1]
+ pkg = lines.pop().split()[0]
+ self.installed[pkg] = version
+
+ def VerifyPackage(self, entry, modlist):
+ """Verify Package status for entry."""
+ if not entry.get('version'):
+ self.logger.info("Insufficient information of Package %s; cannot Verify" % entry.get('name'))
+ return False
+
+ desiredVersion = entry.get('version')
+ if desiredVersion == 'any':
+ desiredVersion = self.installed.get(entry.get('name'), desiredVersion)
+
+ cmdrc = self.cmd.run("/usr/bin/pkginfo -q -v \"%s\" %s" % \
+ (desiredVersion, entry.get('name')))[0]
+
+ if cmdrc != 0:
+ if entry.get('name') in self.installed:
+ self.logger.debug("Package %s version incorrect: have %s want %s" \
+ % (entry.get('name'), self.installed[entry.get('name')],
+ desiredVersion))
+ else:
+ self.logger.debug("Package %s not installed" % (entry.get("name")))
+ else:
+ if self.setup['quick'] or entry.attrib.get('verify', 'true') == 'false':
+ return True
+ (vstat, odata) = self.cmd.run("/usr/sbin/pkgchk -n %s" % (entry.get('name')))
+ if vstat == 0:
+ return True
+ else:
+ output = [line for line in odata if line[:5] == 'ERROR']
+ if len([name for name in output if name.split()[-1] not in modlist]):
+ self.logger.debug("Package %s content verification failed" % \
+ (entry.get('name')))
+ else:
+ return True
+ return False
+
+ def RemovePackages(self, packages):
+ """Remove specified Sysv packages."""
+ names = [pkg.get('name') for pkg in packages]
+ self.logger.info("Removing packages: %s" % (names))
+ self.cmd.run("/usr/sbin/pkgrm -a %s -n %s" % \
+ (self.noaskname, names))
+ self.RefreshPackages()
+ self.extra = self.FindExtraPackages()
diff --git a/build/lib/Bcfg2/Client/Tools/Upstart.py b/build/lib/Bcfg2/Client/Tools/Upstart.py
new file mode 100644
index 000000000..113f28d23
--- /dev/null
+++ b/build/lib/Bcfg2/Client/Tools/Upstart.py
@@ -0,0 +1,78 @@
+"""Upstart support for Bcfg2."""
+__revision__ = '$Revision$'
+
+import glob
+import re
+
+import Bcfg2.Client.Tools
+import Bcfg2.Client.XML
+
+
+class Upstart(Bcfg2.Client.Tools.SvcTool):
+ """Upstart service support for Bcfg2."""
+ name = 'Upstart'
+ __execs__ = ['/lib/init/upstart-job',
+ '/sbin/initctl',
+ '/usr/sbin/service']
+ __handles__ = [('Service', 'upstart')]
+ __req__ = {'Service': ['name', 'status']}
+ svcre = re.compile("/etc/init/(?P<name>.*).conf")
+
+ def get_svc_command(self, service, action):
+ return "/usr/sbin/service %s %s" % (service.get('name'), action)
+
+ def VerifyService(self, entry, _):
+ """Verify Service status for entry
+
+ Verifying whether or not the service is enabled can be done
+ at the file level with upstart using the contents of
+ /etc/init/servicename.conf. All we need to do is make sure
+ the service is running when it should be.
+ """
+ try:
+ output = self.cmd.run('/usr/sbin/service %s status' % \
+ entry.get('name'))[1][0]
+ except IndexError:
+ self.logger.error("Service %s not an Upstart service" % \
+ entry.get('name'))
+ return False
+ try:
+ running = output.split(' ')[1].split('/')[1].startswith('running')
+ if running:
+ entry.set('current_status', 'on')
+ if entry.get('status') == 'off':
+ status = False
+ else:
+ status = True
+ else:
+ entry.set('current_status', 'off')
+ if entry.get('status') == 'on':
+ status = False
+ else:
+ status = True
+ except IndexError:
+ # service does not exist
+ entry.set('current_status', 'off')
+ status = False
+
+ return status
+
+ def InstallService(self, entry):
+ """Install Service for entry."""
+ if entry.get('status') == 'on':
+ pstatus = self.cmd.run(self.get_svc_command(entry, 'start'))[0]
+ elif entry.get('status') == 'off':
+ pstatus = self.cmd.run(self.get_svc_command(entry, 'stop'))[0]
+ # pstatus is true if command failed
+ return not pstatus
+
+ def FindExtra(self):
+ """Locate extra Upstart services."""
+ specified = [entry.get('name') for entry in self.getSupportedEntries()]
+ extra = []
+ for name in [self.svcre.match(fname).group('name') for fname in
+ glob.glob("/etc/init/*.conf") \
+ if self.svcre.match(fname).group('name') not in specified]:
+ extra.append(name)
+ return [Bcfg2.Client.XML.Element('Service', type='upstart', name=name) \
+ for name in extra]
diff --git a/build/lib/Bcfg2/Client/Tools/YUMng.py b/build/lib/Bcfg2/Client/Tools/YUMng.py
new file mode 100644
index 000000000..f0d906717
--- /dev/null
+++ b/build/lib/Bcfg2/Client/Tools/YUMng.py
@@ -0,0 +1,848 @@
+"""This provides bcfg2 support for yum."""
+__revision__ = '$Revision$'
+
+import ConfigParser
+import copy
+import os.path
+import yum
+import yum.packages
+import yum.rpmtrans
+import yum.callbacks
+import yum.Errors
+import yum.misc
+import rpmUtils.arch
+import Bcfg2.Client.XML
+import Bcfg2.Client.Tools
+
+# Fix for python2.3
+try:
+ set
+except NameError:
+ from sets import Set as set
+
+
+def build_yname(pkgname, inst):
+ """Build yum appropriate package name."""
+ d = {}
+ if isinstance(inst, yum.packages.PackageObject):
+ for i in ['name', 'epoch', 'version', 'release', 'arch']:
+ d[i] = getattr(inst, i)
+ else:
+ d['name'] = pkgname
+ if inst.get('version') != 'any':
+ d['version'] = inst.get('version')
+ if inst.get('epoch', False):
+ d['epoch'] = inst.get('epoch')
+ if inst.get('release', False) and inst.get('release') != 'any':
+ d['release'] = inst.get('release')
+ if inst.get('arch', False) and inst.get('arch') != 'any':
+ d['arch'] = inst.get('arch')
+ return d
+
+
+def short_yname(nevra):
+ d = nevra.copy()
+ if 'version' in d:
+ d['ver'] = d['version']
+ del d['version']
+ if 'release' in d:
+ d['rel'] = d['release']
+ del d['release']
+ return d
+
+
+def nevraString(p):
+ if isinstance(p, yum.packages.PackageObject):
+ return str(p)
+ else:
+ ret = ""
+ for i, j in [('epoch', '%s:'), ('name', '%s'), ('version', '-%s'),
+ ('release', '-%s'), ('arch', '.%s')]:
+ if i in p:
+ ret = "%s%s" % (ret, j % p[i])
+ return ret
+
+
+class Parser(ConfigParser.ConfigParser):
+
+ def get(self, section, option, default):
+ """
+ Override ConfigParser.get: If the request option is not in the
+ config file then return the value of default rather than raise
+ an exception. We still raise exceptions on missing sections.
+ """
+ try:
+ return ConfigParser.ConfigParser.get(self, section, option)
+ except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
+ return default
+
+
+class RPMDisplay(yum.rpmtrans.RPMBaseCallback):
+ """We subclass the default RPM transaction callback so that we
+ can control Yum's verbosity and pipe it through the right logger."""
+
+ def __init__(self, logger):
+ yum.rpmtrans.RPMBaseCallback.__init__(self)
+ self.logger = logger
+ self.state = None
+ self.package = None
+
+ def event(self, package, action, te_current, te_total,
+ ts_current, ts_total):
+ """
+ @param package: A yum package object or simple string of a package name
+ @param action: A yum.constant transaction set state or in the obscure
+ rpm repackage case it could be the string 'repackaging'
+ @param te_current: Current number of bytes processed in the transaction
+ element being processed
+ @param te_total: Total number of bytes in the transaction element being
+ processed
+ @param ts_current: number of processes completed in whole transaction
+ @param ts_total: total number of processes in the transaction.
+ """
+
+ if self.package != str(package) or action != self.state:
+ msg = "%s: %s" % (self.action[action], package)
+ self.logger.info(msg)
+ self.state = action
+ self.package = str(package)
+
+ def scriptout(self, package, msgs):
+ """Handle output from package scripts."""
+
+ if msgs:
+ msg = "%s: %s" % (package, msgs)
+ self.logger.debug(msg)
+
+ def errorlog(self, msg):
+ """Deal with error reporting."""
+ self.logger.error(msg)
+
+
+class YumDisplay(yum.callbacks.ProcessTransBaseCallback):
+ """Class to handle display of what step we are in the Yum transaction
+ such as downloading packages, etc."""
+
+ def __init__(self, logger):
+ self.logger = logger
+
+
+class YUMng(Bcfg2.Client.Tools.PkgTool):
+ """Support for Yum packages."""
+ pkgtype = 'yum'
+
+ name = 'YUMng'
+ __execs__ = []
+ __handles__ = [('Package', 'yum'),
+ ('Package', 'rpm'),
+ ('Path', 'ignore')]
+
+ __req__ = {'Package': ['name'],
+ 'Path': ['type']}
+ __ireq__ = {'Package': ['name']}
+
+ conflicts = ['RPMng']
+
+ def __init__(self, logger, setup, config):
+ self.yb = yum.YumBase()
+ Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config)
+ self.ignores = [entry.get('name') for struct in config \
+ for entry in struct \
+ if entry.tag == 'Path' and \
+ entry.get('type') == 'ignore']
+ self.instance_status = {}
+ self.extra_instances = []
+ self.modlists = {}
+ self._loadConfig()
+ self.__important__ = self.__important__ + \
+ [entry.get('name') for struct in config \
+ for entry in struct \
+ if entry.tag == 'Path' and \
+ (entry.get('name').startswith('/etc/yum.d') \
+ or entry.get('name').startswith('/etc/yum.repos.d')) \
+ or entry.get('name') == '/etc/yum.conf']
+ self.yum_avail = dict()
+ self.yum_installed = dict()
+ try:
+ self.yb.doConfigSetup()
+ self.yb.doTsSetup()
+ self.yb.doRpmDBSetup()
+ except yum.Errors.RepoError, e:
+ self.logger.error("YUMng Repository error: %s" % e)
+ raise Bcfg2.Client.Tools.toolInstantiationError
+ except yum.Errors.YumBaseError, e:
+ self.logger.error("YUMng error: %s" % e)
+ raise Bcfg2.Client.Tools.toolInstantiationError
+
+ yup = self.yb.doPackageLists(pkgnarrow='updates')
+ if hasattr(self.yb.rpmdb, 'pkglist'):
+ yinst = self.yb.rpmdb.pkglist
+ else:
+ yinst = self.yb.rpmdb.getPkgList()
+ for dest, source in [(self.yum_avail, yup.updates),
+ (self.yum_installed, yinst)]:
+ for pkg in source:
+ if dest is self.yum_avail:
+ pname = pkg.name
+ data = [(pkg.arch, (pkg.epoch, pkg.version, pkg.release))]
+ else:
+ pname = pkg[0]
+ data = [(pkg[1], (pkg[2], pkg[3], pkg[4]))]
+ if pname in dest:
+ dest[pname].update(data)
+ else:
+ dest[pname] = dict(data)
+
+ def _loadConfig(self):
+ # Process the YUMng section from the config file.
+ CP = Parser()
+ CP.read(self.setup.get('setup'))
+ truth = ['true', 'yes', '1']
+
+ # These are all boolean flags, either we do stuff or we don't
+ self.pkg_checks = CP.get(self.name, "pkg_checks", "true").lower() \
+ in truth
+ self.pkg_verify = CP.get(self.name, "pkg_verify", "true").lower() \
+ in truth
+ self.doInstall = CP.get(self.name, "installed_action",
+ "install").lower() == "install"
+ self.doUpgrade = CP.get(self.name,
+ "version_fail_action", "upgrade").lower() == "upgrade"
+ self.doReinst = CP.get(self.name, "verify_fail_action",
+ "reinstall").lower() == "reinstall"
+ self.verifyFlags = CP.get(self.name, "verify_flags",
+ "").lower().replace(' ', ',')
+
+ self.installOnlyPkgs = self.yb.conf.installonlypkgs
+ if 'gpg-pubkey' not in self.installOnlyPkgs:
+ self.installOnlyPkgs.append('gpg-pubkey')
+
+ self.logger.debug("YUMng: Install missing: %s" \
+ % self.doInstall)
+ self.logger.debug("YUMng: pkg_checks: %s" % self.pkg_checks)
+ self.logger.debug("YUMng: pkg_verify: %s" % self.pkg_verify)
+ self.logger.debug("YUMng: Upgrade on version fail: %s" \
+ % self.doUpgrade)
+ self.logger.debug("YUMng: Reinstall on verify fail: %s" \
+ % self.doReinst)
+ self.logger.debug("YUMng: installOnlyPkgs: %s" \
+ % str(self.installOnlyPkgs))
+ self.logger.debug("YUMng: verify_flags: %s" % self.verifyFlags)
+
+ def _fixAutoVersion(self, entry):
+ # old style entry; synthesize Instances from current installed
+ if entry.get('name') not in self.yum_installed and \
+ entry.get('name') not in self.yum_avail:
+ # new entry; fall back to default
+ entry.set('version', 'any')
+ else:
+ data = copy.copy(self.yum_installed[entry.get('name')])
+ if entry.get('name') in self.yum_avail:
+ # installed but out of date
+ data.update(self.yum_avail[entry.get('name')])
+ for (arch, (epoch, vers, rel)) in list(data.items()):
+ x = Bcfg2.Client.XML.SubElement(entry, "Instance",
+ name=entry.get('name'),
+ version=vers, arch=arch,
+ release=rel, epoch=epoch)
+ if 'verify_flags' in entry.attrib:
+ x.set('verify_flags', entry.get('verify_flags'))
+ if 'verify' in entry.attrib:
+ x.set('verify', entry.get('verify'))
+
+ def _buildInstances(self, entry):
+ instances = [inst for inst in entry \
+ if inst.tag == 'Instance' or inst.tag == 'Package']
+
+ # XXX: Uniquify instances. Cases where duplicates are returned.
+ # However, the elements aren't comparable.
+
+ if instances == []:
+ # We have an old style no Instance entry. Convert it to new style.
+ instance = Bcfg2.Client.XML.SubElement(entry, 'Package')
+ for attrib in list(entry.attrib.keys()):
+ instance.attrib[attrib] = entry.attrib[attrib]
+ instances = [instance]
+
+ return instances
+
+ def _getGPGKeysAsPackages(self):
+ """Return a list of the GPG RPM signing keys installed on the
+ system as a list of Package Objects."""
+
+ # XXX GPG keys existing in the RPMDB have numbered days
+ # and newer Yum versions will not return information about them
+ if hasattr(self.yb.rpmdb, 'returnGPGPubkeyPackages'):
+ return self.yb.rpmdb.returnGPGPubkeyPackages()
+ return self.yb.rpmdb.searchNevra(name='gpg-pubkey')
+
+ def _verifyHelper(self, po):
+ # This code primarly deals with a yum bug where the PO.verify()
+ # method does not properly take into count multilib sharing of files.
+ # Neither does RPM proper, really....it just ignores the problem.
+ def verify(p):
+ # disabling file checksums is a new feature yum 3.2.17-ish
+ try:
+ vResult = p.verify(fast=self.setup.get('quick', False))
+ except TypeError:
+ # Older Yum API
+ vResult = p.verify()
+ return vResult
+
+ key = (po.name, po.epoch, po.version, po.release, po.arch)
+ if key in self.verifyCache:
+ results = self.verifyCache[key]
+ else:
+ results = verify(po)
+ self.verifyCache[key] = results
+ if not rpmUtils.arch.isMultiLibArch():
+ return results
+
+ # Okay deal with a buggy yum multilib and verify
+ packages = self.yb.rpmdb.searchNevra(name=po.name, epoch=po.epoch,
+ ver=po.version, rel=po.release) # find all arches of pkg
+ if len(packages) == 1:
+ return results # No mathcing multilib packages
+
+ files = set(po.returnFileEntries()) # Will be the list of common fns
+ common = {}
+ for p in packages:
+ if p != po:
+ files = files & set(p.returnFileEntries())
+ for p in packages:
+ k = (p.name, p.epoch, p.version, p.release, p.arch)
+ self.logger.debug("Multilib Verify: comparing %s to %s" \
+ % (po, p))
+ if k in self.verifyCache:
+ v = self.verifyCache[k]
+ else:
+ v = verify(p)
+ self.verifyCache[k] = v
+
+ for fn, probs in v.items():
+ # file problems must exist in ALL multilib packages to be real
+ if fn in files:
+ common[fn] = common.get(fn, 0) + 1
+
+ flag = len(packages) - 1
+ for fn, i in common.items():
+ if i == flag:
+ # this fn had verify problems in all but one of the multilib
+ # packages. That means its correct in the package that's
+ # "on top." Therefore, this is a fake verify problem.
+ if fn in results:
+ del results[fn]
+
+ return results
+
+ def RefreshPackages(self):
+ """
+ Creates self.installed{} which is a dict of installed packages.
+
+ The dict items are lists of nevra dicts. This loosely matches the
+ config from the server and what rpmtools uses to specify pacakges.
+
+ e.g.
+
+ self.installed['foo'] = [ {'name':'foo', 'epoch':None,
+ 'version':'1', 'release':2,
+ 'arch':'i386'},
+ {'name':'foo', 'epoch':None,
+ 'version':'1', 'release':2,
+ 'arch':'x86_64'} ]
+ """
+
+ self.installed = {}
+ packages = self._getGPGKeysAsPackages() + \
+ self.yb.rpmdb.returnPackages()
+ for po in packages:
+ d = {}
+ for i in ['name', 'epoch', 'version', 'release', 'arch']:
+ d[i] = getattr(po, i)
+ self.installed.setdefault(po.name, []).append(d)
+
+ def VerifyPackage(self, entry, modlist, pinned_version=None):
+ """
+ Verify Package status for entry.
+ Performs the following:
+ - Checks for the presence of required Package Instances.
+ - Compares the evra 'version' info against self.installed{}.
+ - RPM level package verify (rpm --verify).
+ - Checks for the presence of unrequired package instances.
+
+ Produces the following dict and list for YUMng.Install() to use:
+ For installs/upgrades/fixes of required instances:
+ instance_status = { <Instance Element Object>:
+ { 'installed': True|False,
+ 'version_fail': True|False,
+ 'verify_fail': True|False,
+ 'pkg': <Package Element Object>,
+ 'modlist': [ <filename>, ... ],
+ 'verify' : [ <rpm --verify results> ]
+ }, ......
+ }
+
+ For deletions of unrequired instances:
+ extra_instances = [ <Package Element Object>, ..... ]
+
+ Constructs the text prompts for interactive mode.
+ """
+
+ if entry.get('version', False) == 'auto':
+ self._fixAutoVersion(entry)
+
+ self.logger.debug("Verifying package instances for %s" \
+ % entry.get('name'))
+
+ self.verifyCache = {} # Used for checking multilib packages
+ self.modlists[entry] = modlist
+ instances = self._buildInstances(entry)
+ packageCache = []
+ package_fail = False
+ qtext_versions = []
+ virtPkg = False
+ pkg_checks = self.pkg_checks and \
+ entry.get('pkg_checks', 'true').lower() == 'true'
+ pkg_verify = self.pkg_verify and \
+ entry.get('pkg_verify', 'true').lower() == 'true'
+
+ if entry.get('name') == 'gpg-pubkey':
+ POs = self._getGPGKeysAsPackages()
+ pkg_verify = False # No files here to verify
+ else:
+ POs = self.yb.rpmdb.searchNevra(name=entry.get('name'))
+ if len(POs) == 0:
+ # Some sort of virtual capability? Try to resolve it
+ POs = self.yb.rpmdb.searchProvides(entry.get('name'))
+ if len(POs) > 0:
+ virtPkg = True
+ self.logger.info("%s appears to be provided by:" \
+ % entry.get('name'))
+ for p in POs:
+ self.logger.info(" %s" % p)
+
+ for inst in instances:
+ nevra = build_yname(entry.get('name'), inst)
+ snevra = short_yname(nevra)
+ if nevra in packageCache:
+ continue # Ignore duplicate instances
+ else:
+ packageCache.append(nevra)
+
+ self.logger.debug("Verifying: %s" % nevraString(nevra))
+
+ # Set some defaults here
+ stat = self.instance_status.setdefault(inst, {})
+ stat['installed'] = True
+ stat['version_fail'] = False
+ stat['verify'] = {}
+ stat['verify_fail'] = False
+ stat['pkg'] = entry
+ stat['modlist'] = modlist
+ verify_flags = inst.get('verify_flags', self.verifyFlags)
+ verify_flags = verify_flags.lower().replace(' ', ',').split(',')
+
+ if len(POs) == 0:
+ # Package not installed
+ self.logger.debug(" %s is not installed" % nevraString(nevra))
+ stat['installed'] = False
+ package_fail = True
+ qtext_versions.append("I(%s)" % nevra)
+ continue
+
+ if not pkg_checks:
+ continue
+
+ # Check EVR
+ if virtPkg:
+ self.logger.debug(" Not checking version for virtual package")
+ _POs = [po for po in POs] # Make a copy
+ elif entry.get('name') == 'gpg-pubkey':
+ _POs = [p for p in POs if p.version == nevra['version'] \
+ and p.release == nevra['release']]
+ else:
+ _POs = self.yb.rpmdb.searchNevra(**snevra)
+ if len(_POs) == 0:
+ package_fail = True
+ stat['version_fail'] = True
+ # Just chose the first pkg for the error message
+ self.logger.info(" Wrong version installed. "\
+ "Want %s, but have %s" % (nevraString(nevra),
+ nevraString(POs[0])))
+ qtext_versions.append("U(%s)" % str(POs[0]))
+ continue
+
+ if not (pkg_verify and \
+ inst.get('pkg_verify', 'true').lower() == 'true'):
+ continue
+
+ # XXX: We ignore GPG sig checking the package as it
+ # has nothing to do with the individual file hash/size/etc.
+ # GPG checking the package only eaxmines some header/rpmdb
+ # wacky-ness, and will not properly detect a compromised rpmdb.
+ # Yum's verify routine does not support it for that reaosn.
+
+ if len(_POs) > 1:
+ self.logger.debug(" Verify Instance found many packages:")
+ for po in _POs:
+ self.logger.debug(" %s" % str(po))
+
+ try:
+ vResult = self._verifyHelper(_POs[0])
+ except Exception, e:
+ # Unknown Yum exception
+ self.logger.warning(" Verify Exception: %s" % str(e))
+ package_fail = True
+ continue
+
+ # Now take out the Yum specific objects / modlists / unproblmes
+ ignores = [ig.get('name') for ig in entry.findall('Ignore')] + \
+ [ig.get('name') for ig in inst.findall('Ignore')] + \
+ self.ignores
+ for fn, probs in vResult.items():
+ if fn in modlist:
+ self.logger.debug(" %s in modlist, skipping" % fn)
+ continue
+ if fn in ignores:
+ self.logger.debug(" %s in ignore list, skipping" % fn)
+ continue
+ tmp = []
+ for p in probs:
+ if p.type == 'missing' and os.path.islink(fn):
+ continue
+ elif 'no' + p.type in verify_flags:
+ continue
+ if p.type not in ['missingok', 'ghost']:
+ tmp.append((p.type, p.message))
+ if tmp != []:
+ stat['verify'][fn] = tmp
+
+ if stat['verify'] != {}:
+ stat['verify_fail'] = True
+ package_fail = True
+ self.logger.debug(" Verify Problems:")
+ for fn, probs in stat['verify'].items():
+ self.logger.debug(" %s" % fn)
+ for p in probs:
+ self.logger.debug(" %s: %s" % p)
+
+ if len(POs) > 0:
+ # Is this an install only package? We just look at the first one
+ provides = set([p[0] for p in POs[0].provides] + [POs[0].name])
+ install_only = len(set(self.installOnlyPkgs) & provides) > 0
+ else:
+ install_only = False
+
+ if virtPkg or (install_only and not self.setup['kevlar']):
+ # XXX: virtual capability supplied, we a probably dealing
+ # with multiple packages of different names. This check
+ # doesn't make a lot of since in this case
+ # XXX: install_only: Yum may clean some of these up itself.
+ # Otherwise having multiple instances of install only packages
+ # is considered correct
+ self.extra_instances = None
+ else:
+ self.extra_instances = self.FindExtraInstances(entry, POs)
+ if self.extra_instances is not None:
+ package_fail = True
+
+ return not package_fail
+
+ def FindExtraInstances(self, entry, POs):
+ """
+ Check for installed instances that are not in the config.
+ Return a Package Entry with Instances to remove, or None if there
+ are no Instances to remove.
+
+ """
+ if len(POs) == 0:
+ return None
+ name = entry.get('name')
+ extra_entry = Bcfg2.Client.XML.Element('Package', name=name,
+ type=self.pkgtype)
+ instances = self._buildInstances(entry)
+ _POs = [p for p in POs] # Shallow copy
+
+ # Algorythm is sensitive to duplicates, check for them
+ checked = []
+ for inst in instances:
+ nevra = build_yname(name, inst)
+ snevra = short_yname(nevra)
+ pkgs = self.yb.rpmdb.searchNevra(**snevra)
+ flag = True
+ if len(pkgs) > 0:
+ if pkgs[0] in checked:
+ continue # We've already taken care of this Instance
+ else:
+ checked.append(pkgs[0])
+ _POs.remove(pkgs[0])
+
+ for p in _POs:
+ self.logger.debug(" Extra Instance Found: %s" % str(p))
+ Bcfg2.Client.XML.SubElement(extra_entry, 'Instance',
+ epoch=p.epoch, name=p.name, version=p.version,
+ release=p.release, arch=p.arch)
+
+ if _POs == []:
+ return None
+ else:
+ return extra_entry
+
+ def FindExtraPackages(self):
+ """Find extra packages."""
+ packages = [e.get('name') for e in self.getSupportedEntries()]
+ extras = []
+
+ for p in self.installed.keys():
+ if p not in packages:
+ entry = Bcfg2.Client.XML.Element('Package', name=p,
+ type=self.pkgtype)
+ for i in self.installed[p]:
+ inst = Bcfg2.Client.XML.SubElement(entry, 'Instance', \
+ epoch = i['epoch'],
+ version = i['version'],
+ release = i['release'],
+ arch = i['arch'])
+
+ extras.append(entry)
+
+ return extras
+
+ def _installGPGKey(self, inst, key_file):
+ """Examine the GPG keys carefully before installation. Avoid
+ installing duplicate keys. Returns True on successful install."""
+
+ # RPM Transaction Set
+ ts = self.yb.rpmdb.readOnlyTS()
+
+ if not os.path.exists(key_file):
+ self.logger.debug("GPG Key file %s not installed" % key_file)
+ return False
+
+ rawkey = open(key_file).read()
+ gpg = yum.misc.getgpgkeyinfo(rawkey)
+
+ ver = yum.misc.keyIdToRPMVer(gpg['keyid'])
+ rel = yum.misc.keyIdToRPMVer(gpg['timestamp'])
+ if not (ver == inst.get('version') and rel == inst.get('release')):
+ self.logger.info("GPG key file %s does not match gpg-pubkey-%s-%s"\
+ % (key_file, inst.get('version'),
+ inst.get('release')))
+ return False
+
+ if not yum.misc.keyInstalled(ts, gpg['keyid'],
+ gpg['timestamp']) == 0:
+ result = ts.pgpImportPubkey(yum.misc.procgpgkey(rawkey))
+ else:
+ self.logger.debug("gpg-pubkey-%s-%s already installed"\
+ % (inst.get('version'),
+ inst.get('release')))
+ return True
+
+ if result != 0:
+ self.logger.debug("Unable to install %s-%s" % \
+ (self.instance_status[inst].get('pkg').get('name'),
+ self.str_evra(inst)))
+ return False
+ else:
+ self.logger.debug("Installed %s-%s-%s" % \
+ (self.instance_status[inst].get('pkg').get('name'),
+ inst.get('version'), inst.get('release')))
+ return True
+
+ def _runYumTransaction(self):
+ rDisplay = RPMDisplay(self.logger)
+ yDisplay = YumDisplay(self.logger)
+ # Run the Yum Transaction
+ rescode, restring = self.yb.buildTransaction()
+ self.logger.debug("Initial Yum buildTransaction() run said:")
+ self.logger.debug(" resultcode: %s, msgs: %s" \
+ % (rescode, restring))
+
+ if rescode != 1:
+ # Transaction built successfully, run it
+ self.yb.processTransaction(callback=yDisplay,
+ rpmDisplay=rDisplay)
+ self.logger.info("Single Pass for Install Succeeded")
+ else:
+ # The yum command failed. No packages installed.
+ # Try installing instances individually.
+ self.logger.error("Single Pass Install of Packages Failed")
+ skipBroken = self.yb.conf.skip_broken
+ self.yb.conf.skip_broken = True
+ rescode, restring = self.yb.buildTransaction()
+ if rescode != 1:
+ self.yb.processTransaction(callback=yDisplay,
+ rpmDisplay=rDisplay)
+ self.logger.debug(
+ "Second pass install did not install all packages")
+ else:
+ self.logger.error("Second pass yum install failed.")
+ self.logger.debug(" %s" % restring)
+ self.yb.conf.skip_broken = skipBroken
+
+ self.yb.closeRpmDB()
+ self.RefreshPackages()
+
+ def Install(self, packages, states):
+ """
+ Try and fix everything that YUMng.VerifyPackages() found wrong for
+ each Package Entry. This can result in individual RPMs being
+ installed (for the first time), deleted, downgraded
+ or upgraded.
+
+ packages is a list of Package Elements that has
+ states[<Package Element>] == False
+
+ The following effects occur:
+ - states{} is conditionally updated for each package.
+ - self.installed{} is rebuilt, possibly multiple times.
+ - self.instance_status{} is conditionally updated for each instance
+ of a package.
+ - Each package will be added to self.modified[] if its states{}
+ entry is set to True.
+
+ """
+ self.logger.debug('Running YUMng.Install()')
+
+ install_pkgs = []
+ gpg_keys = []
+ upgrade_pkgs = []
+ reinstall_pkgs = []
+
+ def queuePkg(pkg, inst, queue):
+ if pkg.get('name') == 'gpg-pubkey':
+ gpg_keys.append(inst)
+ else:
+ queue.append(inst)
+
+ # Remove extra instances.
+ # Can not reverify because we don't have a package entry.
+ if self.extra_instances is not None and len(self.extra_instances) > 0:
+ if (self.setup.get('remove') == 'all' or \
+ self.setup.get('remove') == 'packages'):
+ self.RemovePackages(self.extra_instances)
+ else:
+ self.logger.info("The following extra package instances will be removed by the '-r' option:")
+ for pkg in self.extra_instances:
+ for inst in pkg:
+ self.logger.info(" %s %s" % \
+ ((pkg.get('name'), self.str_evra(inst))))
+
+ # Figure out which instances of the packages actually need something
+ # doing to them and place in the appropriate work 'queue'.
+ for pkg in packages:
+ insts = [pinst for pinst in pkg \
+ if pinst.tag in ['Instance', 'Package']]
+ if insts:
+ for inst in insts:
+ if inst not in self.instance_status:
+ m = " Asked to install/update package never verified"
+ p = nevraString(build_yname(pkg.get('name'), inst))
+ self.logger.warning("%s: %s" % (m, p))
+ continue
+ status = self.instance_status[inst]
+ if not status.get('installed', False) and self.doInstall:
+ queuePkg(pkg, inst, install_pkgs)
+ elif status.get('version_fail', False) and self.doUpgrade:
+ queuePkg(pkg, inst, upgrade_pkgs)
+ elif status.get('verify_fail', False) and self.doReinst:
+ queuePkg(pkg, inst, reinstall_pkgs)
+ else:
+ # Either there was no Install/Version/Verify
+ # task to be done or the user disabled the actions
+ # in the configuration. XXX Logging for the latter?
+ pass
+ else:
+ msg = "YUMng: Package tag found where Instance expected: %s"
+ self.logger.warning(msg % pkg.get('name'))
+ queuePkg(pkg, pkg, install_pkgs)
+
+ # Install GPG keys.
+ # Alternatively specify the required keys using 'gpgkey' in the
+ # repository definition in yum.conf. YUM will install the keys
+ # automatically.
+ if len(gpg_keys) > 0:
+ self.logger.info("Installing GPG keys.")
+ for inst in gpg_keys:
+ if inst.get('simplefile') is None:
+ self.logger.error("GPG key has no simplefile attribute")
+ continue
+ key_file = os.path.join(self.instance_status[inst].get('pkg').get('uri'), \
+ inst.get('simplefile'))
+ self._installGPGKey(inst, key_file)
+
+ self.RefreshPackages()
+ pkg = self.instance_status[gpg_keys[0]].get('pkg')
+ states[pkg] = self.VerifyPackage(pkg, [])
+
+ # Install packages.
+ if len(install_pkgs) > 0:
+ self.logger.info("Attempting to install packages")
+
+ for inst in install_pkgs:
+ pkg_arg = self.instance_status[inst].get('pkg').get('name')
+ try:
+ self.yb.install(**build_yname(pkg_arg, inst))
+ except yum.Errors.YumBaseError, yume:
+ self.logger.error("Error installing some packages: %s" % yume)
+
+ if len(upgrade_pkgs) > 0:
+ self.logger.info("Attempting to upgrade packages")
+
+ for inst in upgrade_pkgs:
+ pkg_arg = self.instance_status[inst].get('pkg').get('name')
+ try:
+ self.yb.update(**build_yname(pkg_arg, inst))
+ except yum.Errors.YumBaseError, yume:
+ self.logger.error("Error upgrading some packages: %s" % yume)
+
+ if len(reinstall_pkgs) > 0:
+ self.logger.info("Attempting to reinstall packages")
+ for inst in reinstall_pkgs:
+ pkg_arg = self.instance_status[inst].get('pkg').get('name')
+ try:
+ self.yb.reinstall(**build_yname(pkg_arg, inst))
+ except yum.Errors.YumBaseError, yume:
+ self.logger.error("Error upgrading some packages: %s" \
+ % yume)
+
+ self._runYumTransaction()
+
+ if not self.setup['kevlar']:
+ for pkg_entry in [p for p in packages if self.canVerify(p)]:
+ self.logger.debug("Reverifying Failed Package %s" \
+ % (pkg_entry.get('name')))
+ states[pkg_entry] = self.VerifyPackage(pkg_entry,
+ self.modlists.get(pkg_entry, []))
+
+ for entry in [ent for ent in packages if states[ent]]:
+ self.modified.append(entry)
+
+ def RemovePackages(self, packages):
+ """
+ Remove specified entries.
+
+ packages is a list of Package Entries with Instances generated
+ by FindExtraPackages().
+ """
+ self.logger.debug('Running YUMng.RemovePackages()')
+
+ erase_args = []
+ for pkg in packages:
+ for inst in pkg:
+ nevra = build_yname(pkg.get('name'), inst)
+ if pkg.get('name') != 'gpg-pubkey':
+ self.yb.remove(**nevra)
+ self.modified.append(pkg)
+ else:
+ self.logger.info("WARNING: gpg-pubkey package not in configuration %s %s-%s"\
+ % (nevra['name'], nevra['version'], nevra['release']))
+ self.logger.info(" This package will be deleted in a future version of the YUMng driver.")
+
+ self._runYumTransaction()
+ self.extra = self.FindExtraPackages()
+
+ def VerifyPath(self, entry, _):
+ """Do nothing here since we only verify Path type=ignore"""
+ return True
diff --git a/build/lib/Bcfg2/Client/Tools/__init__.py b/build/lib/Bcfg2/Client/Tools/__init__.py
new file mode 100644
index 000000000..8a90e130c
--- /dev/null
+++ b/build/lib/Bcfg2/Client/Tools/__init__.py
@@ -0,0 +1,370 @@
+"""This contains all Bcfg2 Tool modules"""
+# suppress popen2 warnings for python 2.3
+import warnings
+warnings.filterwarnings("ignore", "The popen2 module is deprecated.*",
+ DeprecationWarning)
+import os
+import popen2
+import stat
+import sys
+import time
+
+import Bcfg2.Client.XML
+__revision__ = '$Revision$'
+
+__all__ = [tool.split('.')[0] \
+ for tool in os.listdir(os.path.dirname(__file__)) \
+ if tool.endswith(".py") and tool != "__init__.py"]
+
+drivers = [item for item in __all__ if item not in ['rpmtools']]
+default = [item for item in drivers if item not in ['RPM', 'Yum']]
+
+
+class toolInstantiationError(Exception):
+ """This error is called if the toolset cannot be instantiated."""
+ pass
+
+
+class readonlypipe(popen2.Popen4):
+ """This pipe sets up stdin --> /dev/null."""
+
+ def __init__(self, cmd, bufsize=-1):
+ popen2._cleanup()
+ c2pread, c2pwrite = os.pipe()
+ null = open('/dev/null', 'w+')
+ self.pid = os.fork()
+ if self.pid == 0:
+ # Child
+ os.dup2(null.fileno(), sys.__stdin__.fileno())
+ #os.dup2(p2cread, 0)
+ os.dup2(c2pwrite, 1)
+ os.dup2(c2pwrite, 2)
+ self._run_child(cmd)
+ os.close(c2pwrite)
+ self.fromchild = os.fdopen(c2pread, 'r', bufsize)
+ popen2._active.append(self)
+
+
+class executor:
+ """This class runs stuff for us"""
+
+ def __init__(self, logger):
+ self.logger = logger
+
+ def run(self, command):
+ """Run a command in a pipe dealing with stdout buffer overloads."""
+ self.logger.debug('> %s' % command)
+
+ runpipe = readonlypipe(command, bufsize=16384)
+ output = []
+ try:#macosx doesn't like this
+ runpipe.fromchild.flush()
+ except IOError:
+ pass
+ line = runpipe.fromchild.readline()
+ cmdstat = -1
+ while cmdstat == -1:
+ while line:
+ if len(line) > 0:
+ self.logger.debug('< %s' % line[:-1])
+ output.append(line[:-1])
+ line = runpipe.fromchild.readline()
+ time.sleep(0.1)
+ cmdstat = runpipe.poll()
+ output += [line[:-1] for line in runpipe.fromchild.readlines() \
+ if line]
+ # The exit code from the program is in the upper byte of the
+ # value returned by cmdstat. Shift it down for tools looking at
+ # the value.
+ return ((cmdstat >> 8), output)
+
+
+class Tool:
+ """
+ All tools subclass this. It defines all interfaces that need to be defined.
+ """
+ name = 'Tool'
+ __execs__ = []
+ __handles__ = []
+ __req__ = {}
+ __important__ = []
+
+ def __init__(self, logger, setup, config):
+ self.__important__ = [entry.get('name') \
+ for struct in config for entry in struct \
+ if entry.tag == 'Path' and \
+ entry.get('important') in ['true', 'True']]
+ self.setup = setup
+ self.logger = logger
+ if not hasattr(self, '__ireq__'):
+ self.__ireq__ = self.__req__
+ self.config = config
+ self.cmd = executor(logger)
+ self.modified = []
+ self.extra = []
+ self.handled = [entry for struct in self.config for entry in struct \
+ if self.handlesEntry(entry)]
+ for filename in self.__execs__:
+ try:
+ mode = stat.S_IMODE(os.stat(filename)[stat.ST_MODE])
+ if mode & stat.S_IEXEC != stat.S_IEXEC:
+ self.logger.debug("%s: %s not executable" % \
+ (self.name, filename))
+ raise toolInstantiationError
+ except OSError:
+ raise toolInstantiationError
+ except:
+ self.logger.debug("%s failed" % filename, exc_info=1)
+ raise toolInstantiationError
+
+ def BundleUpdated(self, _, states):
+ """This callback is used when bundle updates occur."""
+ return
+
+ def BundleNotUpdated(self, _, states):
+ """This callback is used when a bundle is not updated."""
+ return
+
+ def Inventory(self, states, structures=[]):
+ """Dispatch verify calls to underlying methods."""
+ if not structures:
+ structures = self.config.getchildren()
+ mods = self.buildModlist()
+ for (struct, entry) in [(struct, entry) for struct in structures \
+ for entry in struct.getchildren() \
+ if self.canVerify(entry)]:
+ try:
+ func = getattr(self, "Verify%s" % (entry.tag))
+ states[entry] = func(entry, mods)
+ except:
+ self.logger.error(
+ "Unexpected failure of verification method for entry type %s" \
+ % (entry.tag), exc_info=1)
+ self.extra = self.FindExtra()
+
+ def Install(self, entries, states):
+ """Install all entries in sublist."""
+ for entry in entries:
+ try:
+ func = getattr(self, "Install%s" % (entry.tag))
+ states[entry] = func(entry)
+ self.modified.append(entry)
+ except:
+ self.logger.error("Unexpected failure of install method for entry type %s" \
+ % (entry.tag), exc_info=1)
+
+ def Remove(self, entries):
+ """Remove specified extra entries"""
+ pass
+
+ def getSupportedEntries(self):
+ """Return a list of supported entries."""
+ return [entry for struct in \
+ self.config.getchildren() for entry in \
+ struct.getchildren() \
+ if self.handlesEntry(entry)]
+
+ def handlesEntry(self, entry):
+ """Return if entry is handled by this tool."""
+ return (entry.tag, entry.get('type')) in self.__handles__
+
+ def buildModlist(self):
+ '''Build a list of potentially modified POSIX paths for this entry'''
+ return [entry.get('name') for struct in self.config.getchildren() \
+ for entry in struct.getchildren() \
+ if entry.tag in ['Ignore', 'Path']]
+
+ def gatherCurrentData(self, entry):
+ """Default implementation of the information gathering routines."""
+ pass
+
+ def canVerify(self, entry):
+ """Test if entry has enough information to be verified."""
+ if not self.handlesEntry(entry):
+ return False
+
+ if 'failure' in entry.attrib:
+ self.logger.error("Entry %s:%s reports bind failure: %s" % \
+ (entry.tag, entry.get('name'), entry.get('failure')))
+ return False
+
+ missing = [attr for attr in self.__req__[entry.tag] \
+ if attr not in entry.attrib]
+ if missing:
+ self.logger.error("Incomplete information for entry %s:%s; cannot verify" \
+ % (entry.tag, entry.get('name')))
+ self.logger.error("\t... due to absence of %s attribute(s)" % \
+ (":".join(missing)))
+ try:
+ self.gatherCurrentData(entry)
+ except:
+ self.logger.error("Unexpected error in gatherCurrentData", exc_info=1)
+ return False
+ return True
+
+ def FindExtra(self):
+ """Return a list of extra entries."""
+ return []
+
+ def canInstall(self, entry):
+ """Test if entry has enough information to be installed."""
+ if not self.handlesEntry(entry):
+ return False
+
+ if 'failure' in entry.attrib:
+ self.logger.error("Cannot install entry %s:%s with bind failure" % \
+ (entry.tag, entry.get('name')))
+ return False
+
+ missing = [attr for attr in self.__ireq__[entry.tag] \
+ if attr not in entry.attrib or not entry.attrib[attr]]
+ if missing:
+ self.logger.error("Incomplete information for entry %s:%s; cannot install" \
+ % (entry.tag, entry.get('name')))
+ self.logger.error("\t... due to absence of %s attribute" % \
+ (":".join(missing)))
+ return False
+ return True
+
+
+class PkgTool(Tool):
+ """
+ PkgTool provides a one-pass install with
+ fallback for use with packaging systems
+ """
+ pkgtool = ('echo %s', ('%s', ['name']))
+ pkgtype = 'echo'
+ name = 'PkgTool'
+
+ def __init__(self, logger, setup, config):
+ Tool.__init__(self, logger, setup, config)
+ self.installed = {}
+ self.Remove = self.RemovePackages
+ self.FindExtra = self.FindExtraPackages
+ self.RefreshPackages()
+
+ def VerifyPackage(self, dummy, _):
+ """Dummy verification method"""
+ return False
+
+ def Install(self, packages, states):
+ """
+ Run a one-pass install, followed by
+ single pkg installs in case of failure.
+ """
+ self.logger.info("Trying single pass package install for pkgtype %s" % \
+ self.pkgtype)
+
+ data = [tuple([pkg.get(field) for field in self.pkgtool[1][1]]) for pkg in packages]
+ pkgargs = " ".join([self.pkgtool[1][0] % datum for datum in data])
+
+ self.logger.debug("Installing packages: :%s:" % pkgargs)
+ self.logger.debug("Running command ::%s::" % (self.pkgtool[0] % pkgargs))
+
+ cmdrc = self.cmd.run(self.pkgtool[0] % pkgargs)[0]
+ if cmdrc == 0:
+ self.logger.info("Single Pass Succeded")
+ # set all package states to true and flush workqueues
+ pkgnames = [pkg.get('name') for pkg in packages]
+ for entry in [entry for entry in list(states.keys())
+ if entry.tag == 'Package'
+ and entry.get('type') == self.pkgtype
+ and entry.get('name') in pkgnames]:
+ self.logger.debug('Setting state to true for pkg %s' % \
+ (entry.get('name')))
+ states[entry] = True
+ self.RefreshPackages()
+ else:
+ self.logger.error("Single Pass Failed")
+ # do single pass installs
+ self.RefreshPackages()
+ for pkg in packages:
+ # handle state tracking updates
+ if self.VerifyPackage(pkg, []):
+ self.logger.info("Forcing state to true for pkg %s" % \
+ (pkg.get('name')))
+ states[pkg] = True
+ else:
+ self.logger.info("Installing pkg %s version %s" %
+ (pkg.get('name'), pkg.get('version')))
+ cmdrc = self.cmd.run(self.pkgtool[0] %
+ (self.pkgtool[1][0] %
+ tuple([pkg.get(field) for field in self.pkgtool[1][1]])))
+ if cmdrc[0] == 0:
+ states[pkg] = True
+ else:
+ self.logger.error("Failed to install package %s" % \
+ (pkg.get('name')))
+ self.RefreshPackages()
+ for entry in [ent for ent in packages if states[ent]]:
+ self.modified.append(entry)
+
+ def RefreshPackages(self):
+ """Dummy state refresh method."""
+ pass
+
+ def RemovePackages(self, packages):
+ """Dummy implementation of package removal method."""
+ pass
+
+ def FindExtraPackages(self):
+ """Find extra packages."""
+ packages = [entry.get('name') for entry in self.getSupportedEntries()]
+ extras = [data for data in list(self.installed.items()) \
+ if data[0] not in packages]
+ return [Bcfg2.Client.XML.Element('Package', name=name, \
+ type=self.pkgtype, version=version) \
+ for (name, version) in extras]
+
+
+class SvcTool(Tool):
+ """This class defines basic Service behavior"""
+ name = 'SvcTool'
+
+ def get_svc_command(self, service, action):
+ """Return the basename of the command used to start/stop services."""
+ return '/etc/init.d/%s %s' % (service.get('name'), action)
+
+ def start_service(self, service):
+ self.logger.debug('Starting service %s' % service.get('name'))
+ return self.cmd.run(self.get_svc_command(service, 'start'))[0]
+
+ def stop_service(self, service):
+ self.logger.debug('Stopping service %s' % service.get('name'))
+ return self.cmd.run(self.get_svc_command(service, 'stop'))[0]
+
+ def restart_service(self, service):
+ self.logger.debug('Restarting service %s' % service.get('name'))
+ restart_target = 'restart'
+ if service.get('mode', 'default') == 'custom':
+ restart_target = service.get('target', 'restart')
+ return self.cmd.run(self.get_svc_command(service, restart_target))[0]
+
+ def check_service(self, service):
+ # not supported for this driver
+ return 0
+
+ def BundleUpdated(self, bundle, states):
+ """The Bundle has been updated."""
+ if self.setup['servicemode'] == 'disabled':
+ return
+
+ for entry in [ent for ent in bundle if self.handlesEntry(ent)]:
+ if entry.get('mode', 'default') == 'manual':
+ continue
+ # need to handle servicemode = (build|default)
+ # need to handle mode = (default|supervised|custom)
+ if entry.get('status') == 'on':
+ if self.setup['servicemode'] == 'build':
+ rc = self.stop_service(entry)
+ else:
+ if self.setup['interactive']:
+ prompt = 'Restart service %s?: (y/N): ' % entry.get('name')
+ if raw_input(prompt) not in ['y', 'Y']:
+ continue
+ rc = self.restart_service(entry)
+ else:
+ rc = self.stop_service(entry)
+ if rc:
+ self.logger.error("Failed to manipulate service %s" % \
+ (entry.get('name')))
diff --git a/build/lib/Bcfg2/Client/Tools/launchd.py b/build/lib/Bcfg2/Client/Tools/launchd.py
new file mode 100644
index 000000000..db6d94c1b
--- /dev/null
+++ b/build/lib/Bcfg2/Client/Tools/launchd.py
@@ -0,0 +1,131 @@
+"""launchd support for Bcfg2."""
+__revision__ = '$Revision$'
+
+import os
+import popen2
+
+import Bcfg2.Client.Tools
+
+
+class launchd(Bcfg2.Client.Tools.Tool):
+ """Support for Mac OS X launchd services."""
+ __handles__ = [('Service', 'launchd')]
+ __execs__ = ['/bin/launchctl', '/usr/bin/defaults']
+ name = 'launchd'
+ __req__ = {'Service': ['name', 'status']}
+
+ '''
+ Currently requires the path to the plist to load/unload,
+ and Name is acually a reverse-fqdn (or the label).
+ '''
+
+ def __init__(self, logger, setup, config):
+ Bcfg2.Client.Tools.Tool.__init__(self, logger, setup, config)
+
+ '''Locate plist file that provides given reverse-fqdn name
+ /Library/LaunchAgents Per-user agents provided by the administrator.
+ /Library/LaunchDaemons System wide daemons provided by the administrator.
+ /System/Library/LaunchAgents Mac OS X Per-user agents.
+ /System/Library/LaunchDaemons Mac OS X System wide daemons.'''
+ plistLocations = ["/Library/LaunchDaemons", "/System/Library/LaunchDaemons"]
+ self.plistMapping = {}
+ for directory in plistLocations:
+ for daemon in os.listdir(directory):
+ try:
+ if daemon.endswith(".plist"):
+ d = daemon[:-6]
+ else:
+ d = daemon
+ (stdout, _) = popen2.popen2('defaults read %s/%s Label' % (directory, d))
+ label = stdout.read().strip()
+ self.plistMapping[label] = "%s/%s" % (directory, daemon)
+ except KeyError: #perhaps this could be more robust
+ pass
+
+ def FindPlist(self, entry):
+ return self.plistMapping.get(entry.get('name'), None)
+
+ def os_version(self):
+ version = ""
+ try:
+ vers = self.cmd.run('sw_vers')[1]
+ except:
+ return version
+
+ for line in vers:
+ if line.startswith("ProductVersion"):
+ version = line.split()[-1]
+ return version
+
+ def VerifyService(self, entry, _):
+ """Verify launchd service entry."""
+ try:
+ services = self.cmd.run("/bin/launchctl list")[1]
+ except IndexError:#happens when no services are running (should be never)
+ services = []
+ # launchctl output changed in 10.5
+ # It is now three columns, with the last column being the name of the # service
+ version = self.os_version()
+ if version.startswith('10.5') or version.startswith('10.6'):
+ services = [s.split()[-1] for s in services]
+ if entry.get('name') in services:#doesn't check if non-spawning services are Started
+ return entry.get('status') == 'on'
+ else:
+ self.logger.debug("Didn't find service Loaded (launchd running under same user as bcfg)")
+ return entry.get('status') == 'off'
+
+ try: #Perhaps add the "-w" flag to load and unload to modify the file itself!
+ self.cmd.run("/bin/launchctl load -w %s" % self.FindPlist(entry))
+ except IndexError:
+ return 'on'
+ return False
+
+ def InstallService(self, entry):
+ """Enable or disable launchd item."""
+ name = entry.get('name')
+ if entry.get('status') == 'on':
+ self.logger.error("Installing service %s" % name)
+ cmdrc = self.cmd.run("/bin/launchctl load -w %s" % self.FindPlist(entry))
+ cmdrc = self.cmd.run("/bin/launchctl start %s" % name)
+ else:
+ self.logger.error("Uninstalling service %s" % name)
+ cmdrc = self.cmd.run("/bin/launchctl stop %s" % name)
+ cmdrc = self.cmd.run("/bin/launchctl unload -w %s" % self.FindPlist(entry))
+ return cmdrc[0] == 0
+
+ def Remove(self, svcs):
+ """Remove Extra launchd entries."""
+ pass
+
+ def FindExtra(self):
+ """Find Extra launchd services."""
+ try:
+ allsrv = self.cmd.run("/bin/launchctl list")[1]
+ except IndexError:
+ allsrv = []
+
+ [allsrv.remove(svc) for svc in [entry.get("name") for entry
+ in self.getSupportedEntries()] if svc in allsrv]
+ return [Bcfg2.Client.XML.Element("Service",
+ type='launchd',
+ name=name,
+ status='on') for name in allsrv]
+
+ def BundleUpdated(self, bundle, states):
+ """Reload launchd plist."""
+ for entry in [entry for entry in bundle if self.handlesEntry(entry)]:
+ if not self.canInstall(entry):
+ self.logger.error("Insufficient information to restart service %s" % (entry.get('name')))
+ else:
+ name = entry.get('name')
+ if entry.get('status') == 'on' and self.FindPlist(entry):
+ self.logger.info("Reloading launchd service %s" % name)
+ #stop?
+ self.cmd.run("/bin/launchctl stop %s" % name)
+ self.cmd.run("/bin/launchctl unload -w %s" % (self.FindPlist(entry)))#what if it disappeared? how do we stop services that are currently running but the plist disappeared?!
+ self.cmd.run("/bin/launchctl load -w %s" % (self.FindPlist(entry)))
+ self.cmd.run("/bin/launchctl start %s" % name)
+ else:
+ #only if necessary....
+ self.cmd.run("/bin/launchctl stop %s" % name)
+ self.cmd.run("/bin/launchctl unload -w %s" % (self.FindPlist(entry)))
diff --git a/build/lib/Bcfg2/Client/Tools/rpmtools.py b/build/lib/Bcfg2/Client/Tools/rpmtools.py
new file mode 100644
index 000000000..3cd2b7014
--- /dev/null
+++ b/build/lib/Bcfg2/Client/Tools/rpmtools.py
@@ -0,0 +1,1115 @@
+#!/usr/bin/env python
+"""
+ Module that uses rpm-python to implement the following rpm
+ functionality for the bcfg2 RPM and YUM client drivers:
+
+ rpm -qa
+ rpm --verify
+ rpm --erase
+
+ The code closely follows the rpm C code.
+
+ The code was written to be used in the bcfg2 RPM/YUM drivers.
+
+ Some command line options have been provided to assist with
+ testing and development, but the output isn't pretty and looks
+ nothing like rpm output.
+
+ Run 'rpmtools' -h for the options.
+
+"""
+__revision__ = '$Revision$'
+
+import grp
+import optparse
+import os
+import pwd
+import rpm
+import stat
+import sys
+if sys.version_info >= (2, 5):
+ import hashlib
+ py24compat = False
+else:
+ # FIXME: Remove when client python dep is 2.5 or greater
+ py24compat = True
+ import md5
+
+# Determine what prelink tools we have available.
+# The isprelink module is a python extension that examines the ELF headers
+# to see if the file has been prelinked. If it is not present a lot of files
+# are unnecessarily run through the prelink command.
+try:
+ from isprelink import *
+ isprelink_imported = True
+except ImportError:
+ isprelink_imported = False
+ #print '*********************** isprelink not loaded ***********************'
+
+# If the prelink command is installed on the system then we need to do
+# prelink -y on files.
+if os.access('/usr/sbin/prelink', os.X_OK):
+ prelink_exists = True
+else:
+ prelink_exists = False
+
+# If we don't have isprelink then we will use the prelink configuration file to
+# filter what we have to put through prelink -y.
+import re
+blacklist = []
+whitelist = []
+try:
+ f = open('/etc/prelink.conf', mode='r')
+ for line in f:
+ if line.startswith('#'):
+ continue
+ option, pattern = line.split()
+ if pattern.startswith('*.'):
+ pattern = pattern.replace('*.', '\.')
+ pattern += '$'
+ elif pattern.startswith('/'):
+ pattern = '^' + pattern
+ if option == '-b':
+ blacklist.append(pattern)
+ elif option == '-l':
+ whitelist.append(pattern)
+ f.close()
+except IOError:
+ pass
+
+blacklist_re = re.compile('|'.join(blacklist))
+whitelist_re = re.compile('|'.join(whitelist))
+
+# Flags that are not defined in rpm-python.
+# They are defined in lib/rpmcli.h
+# Bit(s) for verifyFile() attributes.
+#
+RPMVERIFY_NONE = 0 # /*!< */
+RPMVERIFY_MD5 = 1 # 1 << 0 # /*!< from %verify(md5) */
+RPMVERIFY_FILESIZE = 2 # 1 << 1 # /*!< from %verify(size) */
+RPMVERIFY_LINKTO = 4 # 1 << 2 # /*!< from %verify(link) */
+RPMVERIFY_USER = 8 # 1 << 3 # /*!< from %verify(user) */
+RPMVERIFY_GROUP = 16 # 1 << 4 # /*!< from %verify(group) */
+RPMVERIFY_MTIME = 32 # 1 << 5 # /*!< from %verify(mtime) */
+RPMVERIFY_MODE = 64 # 1 << 6 # /*!< from %verify(mode) */
+RPMVERIFY_RDEV = 128 # 1 << 7 # /*!< from %verify(rdev) */
+RPMVERIFY_CONTEXTS = 32768 # (1 << 15) # /*!< from --nocontexts */
+RPMVERIFY_READLINKFAIL = 268435456 # (1 << 28) # /*!< readlink failed */
+RPMVERIFY_READFAIL = 536870912 # (1 << 29) # /*!< file read failed */
+RPMVERIFY_LSTATFAIL = 1073741824 # (1 << 30) # /*!< lstat failed */
+RPMVERIFY_LGETFILECONFAIL = 2147483648 # (1 << 31) # /*!< lgetfilecon failed */
+
+RPMVERIFY_FAILURES = \
+ (RPMVERIFY_LSTATFAIL|RPMVERIFY_READFAIL|RPMVERIFY_READLINKFAIL| \
+ RPMVERIFY_LGETFILECONFAIL)
+
+# Bit(s) to control rpm_verify() operation.
+#
+VERIFY_DEFAULT = 0, # /*!< */
+VERIFY_MD5 = 1 << 0 # /*!< from --nomd5 */
+VERIFY_SIZE = 1 << 1 # /*!< from --nosize */
+VERIFY_LINKTO = 1 << 2 # /*!< from --nolinkto */
+VERIFY_USER = 1 << 3 # /*!< from --nouser */
+VERIFY_GROUP = 1 << 4 # /*!< from --nogroup */
+VERIFY_MTIME = 1 << 5 # /*!< from --nomtime */
+VERIFY_MODE = 1 << 6 # /*!< from --nomode */
+VERIFY_RDEV = 1 << 7 # /*!< from --nodev */
+# /* bits 8-14 unused, reserved for rpmVerifyAttrs */
+VERIFY_CONTEXTS = 1 << 15 # /*!< verify: from --nocontexts */
+VERIFY_FILES = 1 << 16 # /*!< verify: from --nofiles */
+VERIFY_DEPS = 1 << 17 # /*!< verify: from --nodeps */
+VERIFY_SCRIPT = 1 << 18 # /*!< verify: from --noscripts */
+VERIFY_DIGEST = 1 << 19 # /*!< verify: from --nodigest */
+VERIFY_SIGNATURE = 1 << 20 # /*!< verify: from --nosignature */
+VERIFY_PATCHES = 1 << 21 # /*!< verify: from --nopatches */
+VERIFY_HDRCHK = 1 << 22 # /*!< verify: from --nohdrchk */
+VERIFY_FOR_LIST = 1 << 23 # /*!< query: from --list */
+VERIFY_FOR_STATE = 1 << 24 # /*!< query: from --state */
+VERIFY_FOR_DOCS = 1 << 25 # /*!< query: from --docfiles */
+VERIFY_FOR_CONFIG = 1 << 26 # /*!< query: from --configfiles */
+VERIFY_FOR_DUMPFILES = 1 << 27 # /*!< query: from --dump */
+# /* bits 28-31 used in rpmVerifyAttrs */
+
+# Comes from C cource. lib/rpmcli.h
+VERIFY_ATTRS = \
+ (VERIFY_MD5 | VERIFY_SIZE | VERIFY_LINKTO | VERIFY_USER | VERIFY_GROUP | \
+ VERIFY_MTIME | VERIFY_MODE | VERIFY_RDEV | VERIFY_CONTEXTS)
+
+VERIFY_ALL = \
+ (VERIFY_ATTRS | VERIFY_FILES | VERIFY_DEPS | VERIFY_SCRIPT | VERIFY_DIGEST |\
+ VERIFY_SIGNATURE | VERIFY_HDRCHK)
+
+
+# Some masks for what checks to NOT do on these file types.
+# The C code actiually resets these up for every file.
+DIR_FLAGS = ~(RPMVERIFY_MD5 | RPMVERIFY_FILESIZE | RPMVERIFY_MTIME | \
+ RPMVERIFY_LINKTO)
+
+# These file types all have the same mask, but hopefully this will make the
+# code more readable.
+FIFO_FLAGS = CHR_FLAGS = BLK_FLAGS = GHOST_FLAGS = DIR_FLAGS
+
+LINK_FLAGS = ~(RPMVERIFY_MD5 | RPMVERIFY_FILESIZE | RPMVERIFY_MTIME | \
+ RPMVERIFY_MODE | RPMVERIFY_USER | RPMVERIFY_GROUP)
+
+REG_FLAGS = ~(RPMVERIFY_LINKTO)
+
+
+def s_isdev(mode):
+ """
+ Check to see if a file is a device.
+
+ """
+ return stat.S_ISBLK(mode) | stat.S_ISCHR(mode)
+
+def rpmpackagelist(rts):
+ """
+ Equivalent of rpm -qa. Intended for RefreshPackages() in the RPM Driver.
+ Requires rpmtransactionset() to be run first to get a ts.
+ Returns a list of pkgspec dicts.
+
+ e.g. [ {'name':'foo', 'epoch':'20', 'version':'1.2', 'release':'5', 'arch':'x86_64' },
+ {'name':'bar', 'epoch':'10', 'version':'5.2', 'release':'2', 'arch':'x86_64' } ]
+
+ """
+ return [{'name':header[rpm.RPMTAG_NAME],
+ 'epoch':header[rpm.RPMTAG_EPOCH],
+ 'version':header[rpm.RPMTAG_VERSION],
+ 'release':header[rpm.RPMTAG_RELEASE],
+ 'arch':header[rpm.RPMTAG_ARCH],
+ 'gpgkeyid':header.sprintf("%|SIGGPG?{%{SIGGPG:pgpsig}}:{None}|").split()[-1]}
+ for header in rts.dbMatch()]
+
+def getindexbykeyword(index_ts, **kwargs):
+ """
+ Return list of indexs from the rpmdb matching keywords
+ ex: getHeadersByKeyword(name='foo', version='1', release='1')
+
+ Can be passed any structure that can be indexed by the pkgspec
+ keyswords as other keys are filtered out.
+
+ """
+ lst = []
+ name = kwargs.get('name')
+ if name:
+ index_mi = index_ts.dbMatch(rpm.RPMTAG_NAME, name)
+ else:
+ index_mi = index_ts.dbMatch()
+
+ if 'epoch' in kwargs:
+ if kwargs['epoch'] != None and kwargs['epoch'] != 'None':
+ kwargs['epoch'] = int(kwargs['epoch'])
+ else:
+ del(kwargs['epoch'])
+
+ keywords = [key for key in list(kwargs.keys()) \
+ if key in ('name', 'epoch', 'version', 'release', 'arch')]
+ keywords_len = len(keywords)
+ for hdr in index_mi:
+ match = 0
+ for keyword in keywords:
+ if hdr[keyword] == kwargs[keyword]:
+ match += 1
+ if match == keywords_len:
+ lst.append(index_mi.instance())
+ del index_mi
+ return lst
+
+def getheadersbykeyword(header_ts, **kwargs):
+ """
+ Borrowed parts of this from from Yum. Need to fix it though.
+ Epoch is not handled right.
+
+ Return list of headers from the rpmdb matching keywords
+ ex: getHeadersByKeyword(name='foo', version='1', release='1')
+
+ Can be passed any structure that can be indexed by the pkgspec
+ keyswords as other keys are filtered out.
+
+ """
+ lst = []
+ name = kwargs.get('name')
+ if name:
+ header_mi = header_ts.dbMatch(rpm.RPMTAG_NAME, name)
+ else:
+ header_mi = header_ts.dbMatch()
+
+ if 'epoch' in kwargs:
+ if kwargs['epoch'] != None and kwargs['epoch'] != 'None':
+ kwargs['epoch'] = int(kwargs['epoch'])
+ else:
+ del(kwargs['epoch'])
+
+ keywords = [key for key in list(kwargs.keys()) \
+ if key in ('name', 'epoch', 'version', 'release', 'arch')]
+ keywords_len = len(keywords)
+ for hdr in header_mi:
+ match = 0
+ for keyword in keywords:
+ if hdr[keyword] == kwargs[keyword]:
+ match += 1
+ if match == keywords_len:
+ lst.append(hdr)
+ del header_mi
+ return lst
+
+def prelink_md5_check(filename):
+ """
+ Checks if a file is prelinked. If it is run it through prelink -y
+ to get the unprelinked md5 and file size.
+
+ Return 0 if the file was not prelinked, otherwise return the file size.
+ Always return the md5.
+
+ """
+ prelink = False
+ try:
+ plf = open(filename, "rb")
+ except IOError:
+ return False, 0
+
+ if prelink_exists:
+ if isprelink_imported:
+ plfd = plf.fileno()
+ if isprelink(plfd):
+ plf.close()
+ cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \
+ % (re.escape(filename))
+ plf = os.popen(cmd, 'rb')
+ prelink = True
+ elif whitelist_re.search(filename) and not blacklist_re.search(filename):
+ plf.close()
+ cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \
+ % (re.escape(filename))
+ plf = os.popen(cmd, 'rb')
+ prelink = True
+
+ fsize = 0
+ if py24compat:
+ chksum = md5.new()
+ else:
+ chksum = hashlib.md5()
+ while 1:
+ data = plf.read()
+ if not data:
+ break
+ fsize += len(data)
+ chksum.update(data)
+ plf.close()
+ file_md5 = chksum.hexdigest()
+ if prelink:
+ return file_md5, fsize
+ else:
+ return file_md5, 0
+
+def prelink_size_check(filename):
+ """
+ This check is only done if the prelink_md5_check() is not done first.
+
+ Checks if a file is prelinked. If it is run it through prelink -y
+ to get the unprelinked file size.
+
+ Return 0 if the file was not prelinked, otherwise return the file size.
+
+ """
+ fsize = 0
+ try:
+ plf = open(filename, "rb")
+ except IOError:
+ return False
+
+ if prelink_exists:
+ if isprelink_imported:
+ plfd = plf.fileno()
+ if isprelink(plfd):
+ plf.close()
+ cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \
+ % (re.escape(filename))
+ plf = os.popen(cmd, 'rb')
+
+ while 1:
+ data = plf.read()
+ if not data:
+ break
+ fsize += len(data)
+
+ elif whitelist_re.search(filename) and not blacklist_re.search(filename):
+ # print "***** Warning isprelink extension failed to import ******"
+ plf.close()
+ cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \
+ % (re.escape(filename))
+ plf = os.popen(cmd, 'rb')
+
+ while 1:
+ data = plf.read()
+ if not data:
+ break
+ fsize += len(data)
+
+ plf.close()
+
+ return fsize
+
+def debug_verify_flags(vflags):
+ """
+ Decodes the verify flags bits.
+ """
+ if vflags & RPMVERIFY_MD5:
+ print('RPMVERIFY_MD5')
+ if vflags & RPMVERIFY_FILESIZE:
+ print('RPMVERIFY_FILESIZE')
+ if vflags & RPMVERIFY_LINKTO:
+ print('RPMVERIFY_LINKTO')
+ if vflags & RPMVERIFY_USER:
+ print('RPMVERIFY_USER')
+ if vflags & RPMVERIFY_GROUP:
+ print('RPMVERIFY_GROUP')
+ if vflags & RPMVERIFY_MTIME:
+ print('RPMVERIFY_MTIME')
+ if vflags & RPMVERIFY_MODE:
+ print('RPMVERIFY_MODE')
+ if vflags & RPMVERIFY_RDEV:
+ print('RPMVERIFY_RDEV')
+ if vflags & RPMVERIFY_CONTEXTS:
+ print('RPMVERIFY_CONTEXTS')
+ if vflags & RPMVERIFY_READLINKFAIL:
+ print('RPMVERIFY_READLINKFAIL')
+ if vflags & RPMVERIFY_READFAIL:
+ print('RPMVERIFY_READFAIL')
+ if vflags & RPMVERIFY_LSTATFAIL:
+ print('RPMVERIFY_LSTATFAIL')
+ if vflags & RPMVERIFY_LGETFILECONFAIL:
+ print('RPMVERIFY_LGETFILECONFAIL')
+
+def debug_file_flags(fflags):
+ """
+ Decodes the file flags bits.
+ """
+ if fflags & rpm.RPMFILE_CONFIG:
+ print('rpm.RPMFILE_CONFIG')
+
+ if fflags & rpm.RPMFILE_DOC:
+ print('rpm.RPMFILE_DOC')
+
+ if fflags & rpm.RPMFILE_ICON:
+ print('rpm.RPMFILE_ICON')
+
+ if fflags & rpm.RPMFILE_MISSINGOK:
+ print('rpm.RPMFILE_MISSINGOK')
+
+ if fflags & rpm.RPMFILE_NOREPLACE:
+ print('rpm.RPMFILE_NOREPLACE')
+
+ if fflags & rpm.RPMFILE_GHOST:
+ print('rpm.RPMFILE_GHOST')
+
+ if fflags & rpm.RPMFILE_LICENSE:
+ print('rpm.RPMFILE_LICENSE')
+
+ if fflags & rpm.RPMFILE_README:
+ print('rpm.RPMFILE_README')
+
+ if fflags & rpm.RPMFILE_EXCLUDE:
+ print('rpm.RPMFILE_EXLUDE')
+
+ if fflags & rpm.RPMFILE_UNPATCHED:
+ print('rpm.RPMFILE_UNPATCHED')
+
+ if fflags & rpm.RPMFILE_PUBKEY:
+ print('rpm.RPMFILE_PUBKEY')
+
+def rpm_verify_file(fileinfo, rpmlinktos, omitmask):
+ """
+ Verify all the files in a package.
+
+ Returns a list of error flags, the file type and file name. The list
+ entries are strings that are the same as the labels for the bitwise
+ flags used in the C code.
+
+ """
+ (fname, fsize, fmode, fmtime, fflags, frdev, finode, fnlink, fstate, \
+ vflags, fuser, fgroup, fmd5) = fileinfo
+
+ # 1. rpmtsRootDir stuff. What does it do and where to I get it from?
+
+ file_results = []
+ flags = vflags
+
+ # Check to see if the file was installed - if not pretend all is ok.
+ # This is what the rpm C code does!
+ if fstate != rpm.RPMFILE_STATE_NORMAL:
+ return file_results
+
+ # Get the installed files stats
+ try:
+ lstat = os.lstat(fname)
+ except OSError:
+ if not (fflags & (rpm.RPMFILE_MISSINGOK|rpm.RPMFILE_GHOST)):
+ file_results.append('RPMVERIFY_LSTATFAIL')
+ #file_results.append(fname)
+ return file_results
+
+ # 5. Contexts? SELinux stuff?
+
+ # Setup what checks to do. This is straight out of the C code.
+ if stat.S_ISDIR(lstat.st_mode):
+ flags &= DIR_FLAGS
+ elif stat.S_ISLNK(lstat.st_mode):
+ flags &= LINK_FLAGS
+ elif stat.S_ISFIFO(lstat.st_mode):
+ flags &= FIFO_FLAGS
+ elif stat.S_ISCHR(lstat.st_mode):
+ flags &= CHR_FLAGS
+ elif stat.S_ISBLK(lstat.st_mode):
+ flags &= BLK_FLAGS
+ else:
+ flags &= REG_FLAGS
+
+ if (fflags & rpm.RPMFILE_GHOST):
+ flags &= GHOST_FLAGS
+
+ flags &= ~(omitmask | RPMVERIFY_FAILURES)
+
+ # 8. SELinux stuff.
+
+ prelink_size = 0
+ if flags & RPMVERIFY_MD5:
+ prelink_md5, prelink_size = prelink_md5_check(fname)
+ if prelink_md5 == False:
+ file_results.append('RPMVERIFY_MD5')
+ file_results.append('RPMVERIFY_READFAIL')
+ elif prelink_md5 != fmd5:
+ file_results.append('RPMVERIFY_MD5')
+
+ if flags & RPMVERIFY_LINKTO:
+ linkto = os.readlink(fname)
+ if not linkto:
+ file_results.append('RPMVERIFY_READLINKFAIL')
+ file_results.append('RPMVERIFY_LINKTO')
+ else:
+ if len(rpmlinktos) == 0 or linkto != rpmlinktos:
+ file_results.append('RPMVERIFY_LINKTO')
+
+ if flags & RPMVERIFY_FILESIZE:
+ if not (flags & RPMVERIFY_MD5): # prelink check hasn't been done.
+ prelink_size = prelink_size_check(fname)
+ if (prelink_size != 0): # This is a prelinked file.
+ if (prelink_size != fsize):
+ file_results.append('RPMVERIFY_FILESIZE')
+ elif lstat.st_size != fsize: # It wasn't a prelinked file.
+ file_results.append('RPMVERIFY_FILESIZE')
+
+ if flags & RPMVERIFY_MODE:
+ metamode = fmode
+ filemode = lstat.st_mode
+
+ # Comparing the type of %ghost files is meaningless, but perms are ok.
+ if fflags & rpm.RPMFILE_GHOST:
+ metamode &= ~0xf000
+ filemode &= ~0xf000
+
+ if (stat.S_IFMT(metamode) != stat.S_IFMT(filemode)) or \
+ (stat.S_IMODE(metamode) != stat.S_IMODE(filemode)):
+ file_results.append('RPMVERIFY_MODE')
+
+ if flags & RPMVERIFY_RDEV:
+ if (stat.S_ISCHR(fmode) != stat.S_ISCHR(lstat.st_mode) or
+ stat.S_ISBLK(fmode) != stat.S_ISBLK(lstat.st_mode)):
+ file_results.append('RPMVERIFY_RDEV')
+ elif (s_isdev(fmode) & s_isdev(lstat.st_mode)):
+ st_rdev = lstat.st_rdev
+ if frdev != st_rdev:
+ file_results.append('RPMVERIFY_RDEV')
+
+ if flags & RPMVERIFY_MTIME:
+ if lstat.st_mtime != fmtime:
+ file_results.append('RPMVERIFY_MTIME')
+
+ if flags & RPMVERIFY_USER:
+ try:
+ user = pwd.getpwuid(lstat.st_uid)[0]
+ except KeyError:
+ user = None
+ if not user or not fuser or (user != fuser):
+ file_results.append('RPMVERIFY_USER')
+
+ if flags & RPMVERIFY_GROUP:
+ try:
+ group = grp.getgrgid(lstat.st_gid)[0]
+ except KeyError:
+ group = None
+ if not group or not fgroup or (group != fgroup):
+ file_results.append('RPMVERIFY_GROUP')
+
+ return file_results
+
+def rpm_verify_dependencies(header):
+ """
+ Check package dependencies. Header is an rpm.hdr.
+
+ Don't like opening another ts to do this, but
+ it was the only way I could find of clearing the ts
+ out.
+
+ Have asked on the rpm-maint list on how to do
+ this the right way (28 Feb 2007).
+
+ ts.check() returns:
+
+ ((name, version, release), (reqname, reqversion), \
+ flags, suggest, sense)
+
+ """
+ _ts1 = rpmtransactionset()
+ _ts1.addInstall(header, 'Dep Check', 'i')
+ dep_errors = _ts1.check()
+ _ts1.closeDB()
+ return dep_errors
+
+def rpm_verify_package(vp_ts, header, verify_options):
+ """
+ Verify a single package specified by header. Header is an rpm.hdr.
+
+ If errors are found it returns a dictionary of errors.
+
+ """
+ # Set some transaction level flags.
+ vsflags = 0
+ if 'nodigest' in verify_options:
+ vsflags |= rpm._RPMVSF_NODIGESTS
+ if 'nosignature' in verify_options:
+ vsflags |= rpm._RPMVSF_NOSIGNATURES
+ ovsflags = vp_ts.setVSFlags(vsflags)
+
+ # Map from the Python options to the rpm bitwise flags.
+ omitmask = 0
+
+ if 'nolinkto' in verify_options:
+ omitmask |= VERIFY_LINKTO
+ if 'nomd5' in verify_options:
+ omitmask |= VERIFY_MD5
+ if 'nosize' in verify_options:
+ omitmask |= VERIFY_SIZE
+ if 'nouser' in verify_options:
+ omitmask |= VERIFY_USER
+ if 'nogroup' in verify_options:
+ omitmask |= VERIFY_GROUP
+ if 'nomtime' in verify_options:
+ omitmask |= VERIFY_MTIME
+ if 'nomode' in verify_options:
+ omitmask |= VERIFY_MODE
+ if 'nordev' in verify_options:
+ omitmask |= VERIFY_RDEV
+
+ omitmask = ((~omitmask & VERIFY_ATTRS) ^ VERIFY_ATTRS)
+ #print 'omitmask =', omitmask
+
+ package_results = {}
+
+ # Check Signatures and Digests.
+ # No idea what this might return. Need to break something to see.
+ # Setting the vsflags above determines what gets checked in the header.
+ hdr_stat = vp_ts.hdrCheck(header.unload())
+ if hdr_stat:
+ package_results['hdr'] = hdr_stat
+
+ # Check Package Depencies.
+ if 'nodeps' not in verify_options:
+ dep_stat = rpm_verify_dependencies(header)
+ if dep_stat:
+ package_results['deps'] = dep_stat
+
+ # Check all the package files.
+ if 'nofiles' not in verify_options:
+ vp_fi = header.fiFromHeader()
+ for fileinfo in vp_fi:
+ # Do not bother doing anything with ghost files.
+ # This is what RPM does.
+ if fileinfo[4] & rpm.RPMFILE_GHOST:
+ continue
+
+ # This is only needed because of an inconsistency in the
+ # rpm.fi interface.
+ linktos = vp_fi.FLink()
+
+ file_stat = rpm_verify_file(fileinfo, linktos, omitmask)
+
+ #if len(file_stat) > 0 or options.verbose:
+ if len(file_stat) > 0:
+ fflags = fileinfo[4]
+ if fflags & rpm.RPMFILE_CONFIG:
+ file_stat.append('c')
+ elif fflags & rpm.RPMFILE_DOC:
+ file_stat.append('d')
+ elif fflags & rpm.RPMFILE_GHOST:
+ file_stat.append('g')
+ elif fflags & rpm.RPMFILE_LICENSE:
+ file_stat.append('l')
+ elif fflags & rpm.RPMFILE_PUBKEY:
+ file_stat.append('P')
+ elif fflags & rpm.RPMFILE_README:
+ file_stat.append('r')
+ else:
+ file_stat.append(' ')
+
+ file_stat.append(fileinfo[0]) # The filename.
+ package_results.setdefault('files', []).append(file_stat)
+
+ # Run the verify script if there is one.
+ # Do we want this?
+ #if 'noscripts' not in verify_options:
+ # script_stat = rpmVerifyscript()
+ # if script_stat:
+ # package_results['script'] = script_stat
+
+ # If there have been any errors, add the package nevra to the result.
+ if len(package_results) > 0:
+ package_results.setdefault('nevra', (header[rpm.RPMTAG_NAME], \
+ header[rpm.RPMTAG_EPOCH], \
+ header[rpm.RPMTAG_VERSION], \
+ header[rpm.RPMTAG_RELEASE], \
+ header[rpm.RPMTAG_ARCH]))
+ else:
+ package_results = None
+
+ # Put things back the way we found them.
+ vsflags = vp_ts.setVSFlags(ovsflags)
+
+ return package_results
+
+def rpm_verify(verify_ts, verify_pkgspec, verify_options=[]):
+ """
+ Requires rpmtransactionset() to be run first to get a ts.
+
+ pkgspec is a dict specifying the package
+ e.g.:
+ For a single package
+ { name='foo', epoch='20', version='1', release='1', arch='x86_64'}
+
+ For all packages
+ {}
+
+ Or any combination of keywords to select one or more packages to verify.
+
+ options is a list of 'rpm --verify' options. Default is to check everything.
+ e.g.:
+ [ 'nodeps', 'nodigest', 'nofiles', 'noscripts', 'nosignature',
+ 'nolinkto' 'nomd5', 'nosize', 'nouser', 'nogroup', 'nomtime',
+ 'nomode', 'nordev' ]
+
+ Returns a list. One list entry per package. Each list entry is a
+ dictionary. Dict keys are 'files', 'deps', 'nevra' and 'hdr'.
+ Entries only get added for the failures. If nothing failed, None is
+ returned.
+
+ Its all a bit messy and probably needs reviewing.
+
+ [ { 'hdr': [???],
+ 'deps: [((name, version, release), (reqname, reqversion),
+ flags, suggest, sense), .... ]
+ 'files': [ ['filename1', 'RPMVERIFY_GROUP', 'RPMVERIFY_USER' ],
+ ['filename2', 'RPMVERFIY_LSTATFAIL']]
+ 'nevra': ['name1', 'epoch1', 'version1', 'release1', 'arch1'] }
+ { 'hdr': [???],
+ 'deps: [((name, version, release), (reqname, reqversion),
+ flags, suggest, sense), .... ]
+ 'files': [ ['filename', 'RPMVERIFY_GROUP', 'RPMVERIFY_USER" ],
+ ['filename2', 'RPMVERFIY_LSTATFAIL']]
+ 'nevra': ['name2', 'epoch2', 'version2', 'release2', 'arch2'] } ]
+
+ """
+ verify_results = []
+ headers = getheadersbykeyword(verify_ts, **verify_pkgspec)
+ for header in headers:
+ result = rpm_verify_package(verify_ts, header, verify_options)
+ if result:
+ verify_results.append(result)
+
+ return verify_results
+
+def rpmtransactionset():
+ """
+ A simple wrapper for rpm.TransactionSet() to keep everthiing together.
+ Might use it to set some ts level flags later.
+
+ """
+ ts = rpm.TransactionSet()
+ return ts
+
+class Rpmtscallback(object):
+ """
+ Callback for ts.run(). Used for adding, upgrading and removing packages.
+ Starting with all possible reasons codes, but bcfg2 will probably only
+ make use of a few of them.
+
+ Mostly just printing stuff at the moment to understand how the callback
+ is used.
+
+ """
+ def __init__(self):
+ self.fdnos = {}
+
+ def callback(self, reason, amount, total, key, client_data):
+ """
+ Generic rpmts call back.
+ """
+ if reason == rpm.RPMCALLBACK_INST_OPEN_FILE:
+ pass
+ #print 'rpm.RPMCALLBACK_INST_OPEN_FILE'
+ elif reason == rpm.RPMCALLBACK_INST_CLOSE_FILE:
+ pass
+ #print 'rpm.RPMCALLBACK_INST_CLOSE_FILE'
+ elif reason == rpm.RPMCALLBACK_INST_START:
+ pass
+ #print 'rpm.RPMCALLBACK_INST_START'
+ elif reason == rpm.RPMCALLBACK_TRANS_PROGRESS or \
+ reason == rpm.RPMCALLBACK_INST_PROGRESS:
+ pass
+ #print 'rpm.RPMCALLBACK_TRANS_PROGRESS or \
+ # rpm.RPMCALLBACK_INST_PROGRESS'
+ elif reason == rpm.RPMCALLBACK_TRANS_START:
+ pass
+ #print 'rpm.RPMCALLBACK_TRANS_START'
+ elif reason == rpm.RPMCALLBACK_TRANS_STOP:
+ pass
+ #print 'rpm.RPMCALLBACK_TRANS_STOP'
+ elif reason == rpm.RPMCALLBACK_REPACKAGE_START:
+ pass
+ #print 'rpm.RPMCALLBACK_REPACKAGE_START'
+ elif reason == rpm.RPMCALLBACK_REPACKAGE_PROGRESS:
+ pass
+ #print 'rpm.RPMCALLBACK_REPACKAGE_PROGRESS'
+ elif reason == rpm.RPMCALLBACK_REPACKAGE_STOP:
+ pass
+ #print 'rpm.RPMCALLBACK_REPACKAGE_STOP'
+ elif reason == rpm.RPMCALLBACK_UNINST_PROGRESS:
+ pass
+ #print 'rpm.RPMCALLBACK_UNINST_PROGRESS'
+ elif reason == rpm.RPMCALLBACK_UNINST_START:
+ pass
+ #print 'rpm.RPMCALLBACK_UNINST_START'
+ elif reason == rpm.RPMCALLBACK_UNINST_STOP:
+ pass
+ #print 'rpm.RPMCALLBACK_UNINST_STOP'
+ #print '***Package ', key, ' deleted ***'
+ # How do we get at this?
+ # RPM.modified += key
+ elif reason == rpm.RPMCALLBACK_UNPACK_ERROR:
+ pass
+ #print 'rpm.RPMCALLBACK_UNPACK_ERROR'
+ elif reason == rpm.RPMCALLBACK_CPIO_ERROR:
+ pass
+ #print 'rpm.RPMCALLBACK_CPIO_ERROR'
+ elif reason == rpm.RPMCALLBACK_UNKNOWN:
+ pass
+ #print 'rpm.RPMCALLBACK_UNKNOWN'
+ else:
+ print('ERROR - Fell through callBack')
+
+ #print reason, amount, total, key, client_data
+
+def rpm_erase(erase_pkgspecs, erase_flags):
+ """
+ pkgspecs is a list of pkgspec dicts specifying packages
+ e.g.:
+ For a single package
+ { name='foo', epoch='20', version='1', release='1', arch='x86_64'}
+
+ """
+ erase_ts_flags = 0
+ if 'noscripts' in erase_flags:
+ erase_ts_flags |= rpm.RPMTRANS_FLAG_NOSCRIPTS
+ if 'notriggers' in erase_flags:
+ erase_ts_flags |= rpm.RPMTRANS_FLAG_NOTRIGGERS
+ if 'repackage' in erase_flags:
+ erase_ts_flags |= rpm.RPMTRANS_FLAG_REPACKAGE
+
+ erase_ts = rpmtransactionset()
+ erase_ts.setFlags(erase_ts_flags)
+
+ for pkgspec in erase_pkgspecs:
+ idx_list = getindexbykeyword(erase_ts, **pkgspec)
+ if len(idx_list) > 1 and not 'allmatches' in erase_flags:
+ #pass
+ print('ERROR - Multiple package match for erase', pkgspec)
+ else:
+ for idx in idx_list:
+ erase_ts.addErase(idx)
+
+ #for te in erase_ts:
+ # print "%s %s:%s-%s.%s" % (te.N(), te.E(), te.V(), te.R(), te.A())
+
+ erase_problems = []
+ if 'nodeps' not in erase_flags:
+ erase_problems = erase_ts.check()
+
+ if erase_problems == []:
+ erase_ts.order()
+ erase_callback = Rpmtscallback()
+ erase_ts.run(erase_callback.callback, 'Erase')
+ #else:
+ # print 'ERROR - Dependency failures on package erase'
+ # print erase_problems
+
+ erase_ts.closeDB()
+ del erase_ts
+ return erase_problems
+
+def display_verify_file(file_results):
+ '''
+ Display file results similar to rpm --verify.
+ '''
+ filename = file_results[-1]
+ filetype = file_results[-2]
+
+ result_string = ''
+
+ if 'RPMVERIFY_LSTATFAIL' in file_results:
+ result_string = 'missing '
+ else:
+ if 'RPMVERIFY_FILESIZE' in file_results:
+ result_string = result_string + 'S'
+ else:
+ result_string = result_string + '.'
+
+ if 'RPMVERIFY_MODE' in file_results:
+ result_string = result_string + 'M'
+ else:
+ result_string = result_string + '.'
+
+ if 'RPMVERIFY_MD5' in file_results:
+ if 'RPMVERIFY_READFAIL' in file_results:
+ result_string = result_string + '?'
+ else:
+ result_string = result_string + '5'
+ else:
+ result_string = result_string + '.'
+
+ if 'RPMVERIFY_RDEV' in file_results:
+ result_string = result_string + 'D'
+ else:
+ result_string = result_string + '.'
+
+ if 'RPMVERIFY_LINKTO' in file_results:
+ if 'RPMVERIFY_READLINKFAIL' in file_results:
+ result_string = result_string + '?'
+ else:
+ result_string = result_string + 'L'
+ else:
+ result_string = result_string + '.'
+
+ if 'RPMVERIFY_USER' in file_results:
+ result_string = result_string + 'U'
+ else:
+ result_string = result_string + '.'
+
+ if 'RPMVERIFY_GROUP' in file_results:
+ result_string = result_string + 'G'
+ else:
+ result_string = result_string + '.'
+
+ if 'RPMVERIFY_MTIME' in file_results:
+ result_string = result_string + 'T'
+ else:
+ result_string = result_string + '.'
+
+ print(result_string + ' ' + filetype + ' ' + filename)
+ sys.stdout.flush()
+
+#===============================================================================
+# Some options and output to assist with development and testing.
+# These are not intended for normal use.
+if __name__ == "__main__":
+
+ p = optparse.OptionParser()
+
+ p.add_option('--name', action='store', \
+ default=None, \
+ help='''Package name to verify.
+
+ ******************************************
+ NOT SPECIFYING A NAME MEANS 'ALL' PACKAGES.
+ ******************************************
+
+ The specified operation will be carried out on all
+ instances of packages that match the package specification
+ (name, epoch, version, release, arch).''')
+
+ p.add_option('--epoch', action='store', \
+ default=None, \
+ help='''Package epoch.''')
+
+ p.add_option('--version', action='store', \
+ default=None, \
+ help='''Package version.''')
+
+ p.add_option('--release', action='store', \
+ default=None, \
+ help='''Package release.''')
+
+ p.add_option('--arch', action='store', \
+ default=None, \
+ help='''Package arch.''')
+
+ p.add_option('--erase', '-e', action='store_true', \
+ default=None, \
+ help='''****************************************************
+ REMOVE PACKAGES. THERE ARE NO WARNINGS. MULTIPLE
+ PACKAGES WILL BE REMOVED IF A FULL PACKAGE SPEC IS NOT
+ GIVEN. E.G. IF JUST A NAME IS GIVEN ALL INSTALLED
+ INSTANCES OF THAT PACKAGE WILL BE REMOVED PROVIDED
+ DEPENDENCY CHECKS PASS. IF JUST AN EPOCH IS GIVEN
+ ALL PACKAGE INSTANCES WITH THAT EPOCH WILL BE REMOVED.
+ ****************************************************''')
+
+ p.add_option('--list', '-l', action='store_true', \
+ help='''List package identity info. rpm -qa ish equivalent
+ intended for use in RefreshPackages().''')
+
+ p.add_option('--verify', action='store_true', \
+ help='''Verify Package(s). Output is only produced after all
+ packages has been verified. Be patient.''')
+
+ p.add_option('--verbose', '-v', action='store_true', \
+ help='''Verbose output for --verify option. Output is the
+ same as rpm -v --verify.''')
+
+ p.add_option('--nodeps', action='store_true', \
+ default=False, \
+ help='Do not do dependency testing.')
+
+ p.add_option('--nodigest', action='store_true', \
+ help='Do not check package digests.')
+
+ p.add_option('--nofiles', action='store_true', \
+ help='Do not do file checks.')
+
+ p.add_option('--noscripts', action='store_true', \
+ help='Do not run verification scripts.')
+
+ p.add_option('--nosignature', action='store_true', \
+ help='Do not do package signature verification.')
+
+ p.add_option('--nolinkto', action='store_true', \
+ help='Do not do symlink tests.')
+
+ p.add_option('--nomd5', action='store_true', \
+ help='''Do not do MD5 checksums on files. Note that this does
+ not work for prelink files yet.''')
+
+ p.add_option('--nosize', action='store_true', \
+ help='''Do not do file size tests. Note that this does not work
+ for prelink files yet.''')
+
+ p.add_option('--nouser', action='store_true', \
+ help='Do not check file user ownership.')
+
+ p.add_option('--nogroup', action='store_true', \
+ help='Do not check file group ownership.')
+
+ p.add_option('--nomtime', action='store_true', \
+ help='Do not check file modification times.')
+
+ p.add_option('--nomode', action='store_true', \
+ help='Do not check file modes (permissions).')
+
+ p.add_option('--nordev', action='store_true', \
+ help='Do not check device node.')
+
+ p.add_option('--notriggers', action='store_true', \
+ help='Do not do not generate triggers on erase.')
+
+ p.add_option('--repackage', action='store_true', \
+ help='''Do repackage on erase.i Packages are put
+ in /var/spool/repackage.''')
+
+ p.add_option('--allmatches', action='store_true', \
+ help='''Remove all package instances that match the
+ pkgspec.
+
+ ***************************************************
+ NO WARNINGS ARE GIVEN. IF THERE IS NO PACKAGE SPEC
+ THAT MEANS ALL PACKAGES!!!!
+ ***************************************************''')
+
+ options, arguments = p.parse_args()
+
+ pkgspec = {}
+ rpm_options = []
+
+ if options.nodeps:
+ rpm_options.append('nodeps')
+
+ if options.nodigest:
+ rpm_options.append('nodigest')
+
+ if options.nofiles:
+ rpm_options.append('nofiles')
+
+ if options.noscripts:
+ rpm_options.append('noscripts')
+
+ if options.nosignature:
+ rpm_options.append('nosignature')
+
+ if options.nolinkto:
+ rpm_options.append('nolinkto')
+
+ if options.nomd5:
+ rpm_options.append('nomd5')
+
+ if options.nosize:
+ rpm_options.append('nosize')
+
+ if options.nouser:
+ rpm_options.append('nouser')
+
+ if options.nogroup:
+ rpm_options.append('nogroup')
+
+ if options.nomtime:
+ rpm_options.append('nomtime')
+
+ if options.nomode:
+ rpm_options.append('nomode')
+
+ if options.nordev:
+ rpm_options.append('nordev')
+
+ if options.repackage:
+ rpm_options.append('repackage')
+
+ if options.allmatches:
+ rpm_options.append('allmatches')
+
+ main_ts = rpmtransactionset()
+
+ cmdline_pkgspec = {}
+ if options.name != 'all':
+ if options.name:
+ cmdline_pkgspec['name'] = str(options.name)
+ if options.epoch:
+ cmdline_pkgspec['epoch'] = str(options.epoch)
+ if options.version:
+ cmdline_pkgspec['version'] = str(options.version)
+ if options.release:
+ cmdline_pkgspec['release'] = str(options.release)
+ if options.arch:
+ cmdline_pkgspec['arch'] = str(options.arch)
+
+ if options.verify:
+ results = rpm_verify(main_ts, cmdline_pkgspec, rpm_options)
+ for r in results:
+ files = r.get('files', '')
+ for f in files:
+ display_verify_file(f)
+
+ elif options.list:
+ for p in rpmpackagelist(main_ts):
+ print(p)
+
+ elif options.erase:
+ if options.name:
+ rpm_erase([cmdline_pkgspec], rpm_options)
+ else:
+ print('You must specify the "--name" option')
diff --git a/build/lib/Bcfg2/Client/XML.py b/build/lib/Bcfg2/Client/XML.py
new file mode 100644
index 000000000..42b1017ac
--- /dev/null
+++ b/build/lib/Bcfg2/Client/XML.py
@@ -0,0 +1,37 @@
+'''XML lib compatibility layer for the Bcfg2 client'''
+__revision__ = '$Revision$'
+
+# library will use lxml, then builtin xml.etree, then ElementTree
+
+try:
+ from lxml.etree import Element, SubElement, XML, tostring
+ from lxml.etree import XMLSyntaxError as ParseError
+ driver = 'lxml'
+except ImportError:
+ # lxml not available
+ from xml.parsers.expat import ExpatError as ParseError
+ try:
+ import xml.etree.ElementTree
+ Element = xml.etree.ElementTree.Element
+ SubElement = xml.etree.ElementTree.SubElement
+ XML = xml.etree.ElementTree.XML
+ def tostring(e, encoding=None, xml_declaration=None):
+ return xml.etree.ElementTree.tostring(e, encoding=encoding)
+ driver = 'etree-py'
+ except ImportError:
+ try:
+ from elementtree.ElementTree import Element, SubElement, XML, tostring
+ driver = 'etree'
+ import elementtree.ElementTree
+ Element = elementtree.ElementTree.Element
+ SubElement = elementtree.ElementTree.SubElement
+ XML = elementtree.ElementTree.XML
+ def tostring(e, encoding=None, xml_declaration=None):
+ return elementtree.ElementTree.tostring(e)
+
+ except ImportError:
+ print("Failed to load lxml, xml.etree and elementtree.ElementTree")
+ print("Cannot continue")
+ raise SystemExit(1)
+
+len([Element, SubElement, XML, tostring, ParseError])
diff --git a/build/lib/Bcfg2/Client/__init__.py b/build/lib/Bcfg2/Client/__init__.py
new file mode 100644
index 000000000..ea60a4259
--- /dev/null
+++ b/build/lib/Bcfg2/Client/__init__.py
@@ -0,0 +1,4 @@
+"""This contains all Bcfg2 Client modules"""
+__revision__ = '$Revision$'
+
+__all__ = ["Frame", "Tools", "XML"]
diff --git a/build/lib/Bcfg2/Component.py b/build/lib/Bcfg2/Component.py
new file mode 100644
index 000000000..2a3ef20e1
--- /dev/null
+++ b/build/lib/Bcfg2/Component.py
@@ -0,0 +1,277 @@
+"""Cobalt component base."""
+
+__revision__ = '$Revision$'
+
+__all__ = ["Component", "exposed", "automatic", "run_component"]
+
+import inspect
+import logging
+import os
+import pydoc
+import sys
+import time
+import threading
+import urlparse
+import xmlrpclib
+
+import Bcfg2.Logger
+from Bcfg2.Statistics import Statistics
+from Bcfg2.SSLServer import XMLRPCServer
+
+logger = logging.getLogger()
+
+class NoExposedMethod (Exception):
+ """There is no method exposed with the given name."""
+
+def run_component(component_cls, location, daemon, pidfile_name, to_file,
+ cfile, argv=None, register=True,
+ state_name=False, cls_kwargs={}, extra_getopt='', time_out=10,
+ protocol='xmlrpc/ssl', certfile=None, keyfile=None, ca=None):
+
+ # default settings
+ level = logging.INFO
+
+ logging.getLogger().setLevel(level)
+ Bcfg2.Logger.setup_logging(component_cls.implementation,
+ True,
+ True,
+ to_file=to_file)
+
+ if daemon:
+ child_pid = os.fork()
+ if child_pid != 0:
+ return
+
+ os.setsid()
+
+ child_pid = os.fork()
+ if child_pid != 0:
+ os._exit(0)
+
+ redirect_file = open("/dev/null", "w+")
+ os.dup2(redirect_file.fileno(), sys.__stdin__.fileno())
+ os.dup2(redirect_file.fileno(), sys.__stdout__.fileno())
+ os.dup2(redirect_file.fileno(), sys.__stderr__.fileno())
+
+ os.chdir(os.sep)
+
+ pidfile = open(pidfile_name or "/dev/null", "w")
+ print >> pidfile, os.getpid()
+ pidfile.close()
+
+ component = component_cls(cfile=cfile, **cls_kwargs)
+ up = urlparse.urlparse(location)
+ port = tuple(up[1].split(':'))
+ port = (port[0], int(port[1]))
+ try:
+ server = XMLRPCServer(port, keyfile=keyfile, certfile=certfile,
+ register=register, timeout=time_out, ca=ca,
+ protocol=protocol)
+ except:
+ logger.error("Server startup failed")
+ os._exit(1)
+ server.register_instance(component)
+
+ try:
+ server.serve_forever()
+ finally:
+ server.server_close()
+ component.shutdown()
+
+def exposed(func):
+ """Mark a method to be exposed publically.
+
+ Examples:
+ class MyComponent (Component):
+ @expose
+ def my_method (self, param1, param2):
+ do_stuff()
+
+ class MyComponent (Component):
+ def my_method (self, param1, param2):
+ do_stuff()
+ my_method = expose(my_method)
+
+ """
+ func.exposed = True
+ return func
+
+def automatic(func, period=10):
+ """Mark a method to be run periodically."""
+ func.automatic = True
+ func.automatic_period = period
+ func.automatic_ts = -1
+ return func
+
+def locking(func):
+ """Mark a function as being internally thread safe"""
+ func.locking = True
+ return func
+
+def readonly(func):
+ """Mark a function as read-only -- no data effects in component inst"""
+ func.readonly = True
+ return func
+
+class Component (object):
+ """Base component.
+
+ Intended to be served as an instance by Cobalt.Component.XMLRPCServer
+ >>> server = Cobalt.Component.XMLRPCServer(location, keyfile)
+ >>> component = Cobalt.Component.Component()
+ >>> server.serve_instance(component)
+
+ Class attributes:
+ name -- logical component name (e.g., "queue-manager", "process-manager")
+ implementation -- implementation identifier (e.g., "BlueGene/L", "BlueGene/P")
+
+ Methods:
+ save -- pickle the component to a file
+ do_tasks -- perform automatic tasks for the component
+
+ """
+
+ name = "component"
+ implementation = "generic"
+
+ def __init__(self, **kwargs):
+ """Initialize a new component.
+
+ Keyword arguments:
+ statefile -- file in which to save state automatically
+
+ """
+ self.statefile = kwargs.get("statefile", None)
+ self.logger = logging.getLogger("%s %s" % (self.implementation, self.name))
+ self.lock = threading.Lock()
+ self.instance_statistics = Statistics()
+
+ def do_tasks(self):
+ """Perform automatic tasks for the component.
+
+ Automatic tasks are member callables with an attribute
+ automatic == True.
+
+ """
+ for name, func in inspect.getmembers(self, callable):
+ if getattr(func, "automatic", False):
+ need_to_lock = not getattr(func, 'locking', False)
+ if (time.time() - func.automatic_ts) > \
+ func.automatic_period:
+ if need_to_lock:
+ t1 = time.time()
+ self.lock.acquire()
+ t2 = time.time()
+ self.instance_statistics.add_value('component_lock', t2-t1)
+ try:
+ mt1 = time.time()
+ try:
+ func()
+ except:
+ self.logger.error("Automatic method %s failed" \
+ % (name), exc_info=1)
+ finally:
+ mt2 = time.time()
+
+ if need_to_lock:
+ self.lock.release()
+ self.instance_statistics.add_value(name, mt2-mt1)
+ func.__dict__['automatic_ts'] = time.time()
+
+ def _resolve_exposed_method(self, method_name):
+ """Resolve an exposed method.
+
+ Arguments:
+ method_name -- name of the method to resolve
+
+ """
+ try:
+ func = getattr(self, method_name)
+ except AttributeError:
+ raise NoExposedMethod(method_name)
+ if not getattr(func, "exposed", False):
+ raise NoExposedMethod(method_name)
+ return func
+
+ def _dispatch(self, method, args, dispatch_dict):
+ """Custom XML-RPC dispatcher for components.
+
+ method -- XML-RPC method name
+ args -- tuple of paramaters to method
+
+ """
+ need_to_lock = True
+ if method in dispatch_dict:
+ method_func = dispatch_dict[method]
+ else:
+ try:
+ method_func = self._resolve_exposed_method(method)
+ except NoExposedMethod:
+ self.logger.error("Unknown method %s" % (method))
+ raise xmlrpclib.Fault(7, "Unknown method %s" % method)
+ except Exception, e:
+ if getattr(e, "log", True):
+ self.logger.error(e, exc_info=True)
+ raise xmlrpclib.Fault(getattr(e, "fault_code", 1), str(e))
+
+ if getattr(method_func, 'locking', False):
+ need_to_lock = False
+ if need_to_lock:
+ lock_start = time.time()
+ self.lock.acquire()
+ lock_done = time.time()
+ try:
+ method_start = time.time()
+ try:
+ result = method_func(*args)
+ finally:
+ method_done = time.time()
+ if need_to_lock:
+ self.lock.release()
+ self.instance_statistics.add_value('component_lock',
+ lock_done - lock_start)
+ self.instance_statistics.add_value(method, method_done - method_start)
+ except xmlrpclib.Fault:
+ raise
+ except Exception, e:
+ if getattr(e, "log", True):
+ self.logger.error(e, exc_info=True)
+ raise xmlrpclib.Fault(getattr(e, "fault_code", 1), str(e))
+ return result
+
+ @exposed
+ def listMethods(self):
+ """Custom XML-RPC introspective method list."""
+ return [
+ name for name, func in inspect.getmembers(self, callable)
+ if getattr(func, "exposed", False)
+ ]
+
+ @exposed
+ def methodHelp(self, method_name):
+ """Custom XML-RPC introspective method help.
+
+ Arguments:
+ method_name -- name of method to get help on
+
+ """
+ try:
+ func = self._resolve_exposed_method(method_name)
+ except NoExposedMethod:
+ return ""
+ return pydoc.getdoc(func)
+
+ def get_name(self):
+ """The name of the component."""
+ return self.name
+ get_name = exposed(get_name)
+
+ def get_implementation(self):
+ """The implementation of the component."""
+ return self.implementation
+ get_implementation = exposed(get_implementation)
+
+ def get_statistics(self, _):
+ """Get current statistics about component execution"""
+ return self.instance_statistics.display()
+ get_statistics = exposed(get_statistics)
diff --git a/build/lib/Bcfg2/Logger.py b/build/lib/Bcfg2/Logger.py
new file mode 100644
index 000000000..e8cdd492d
--- /dev/null
+++ b/build/lib/Bcfg2/Logger.py
@@ -0,0 +1,233 @@
+"""Bcfg2 logging support"""
+__revision__ = '$Revision$'
+
+import copy
+import fcntl
+import logging
+import logging.handlers
+import math
+import socket
+import struct
+import sys
+import termios
+
+logging.raiseExceptions = 0
+
+def print_attributes(attrib):
+ """Add the attributes for an element."""
+ return ' '.join(['%s="%s"' % data for data in list(attrib.items())])
+
+def print_text(text):
+ """Add text to the output (which will need normalising."""
+ charmap = {'<':'&lt;', '>':'&gt;', '&':'&amp;'}
+ return ''.join([charmap.get(char, char) for char in text]) + '\n'
+
+def xml_print(element, running_indent=0, indent=4):
+ """Add an element and its children to the return string."""
+ if (len(element.getchildren()) == 0) and (not element.text):
+ ret = (' ' * running_indent)
+ ret += '<%s %s/>\n' % (element.tag, print_attributes(element.attrib))
+ else:
+ child_indent = running_indent + indent
+ ret = (' ' * running_indent)
+ ret += '<%s%s>\n' % (element.tag, print_attributes(element))
+ if element.text:
+ ret += (' '* child_indent) + print_text(element.text)
+ for child in element.getchildren():
+ ret += xml_print(child, child_indent, indent)
+ ret += (' ' * running_indent) + '</%s>\n' % (element.tag)
+ if element.tail:
+ ret += (' ' * child_indent) + print_text(element.tail)
+ return ret
+
+class TermiosFormatter(logging.Formatter):
+ """The termios formatter displays output in a terminal-sensitive fashion."""
+
+ def __init__(self, fmt=None, datefmt=None):
+ logging.Formatter.__init__(self, fmt, datefmt)
+ if sys.stdout.isatty():
+ # now get termios info
+ try:
+ self.width = struct.unpack('hhhh', fcntl.ioctl(0, termios.TIOCGWINSZ,
+ "\000"*8))[1]
+ if self.width == 0:
+ self.width = 80
+ except:
+ self.width = 80
+ else:
+ # output to a pipe
+ self.width = 32768
+
+ def format(self, record):
+ '''format a record for display'''
+ returns = []
+ line_len = self.width
+ if isinstance(record.msg, str):
+ for line in record.msg.split('\n'):
+ if len(line) <= line_len:
+ returns.append(line)
+ else:
+ inner_lines = int(math.floor(float(len(line)) / line_len))+1
+ for i in range(inner_lines):
+ returns.append("%s" % (line[i*line_len:(i+1)*line_len]))
+ elif isinstance(record.msg, list):
+ if not record.msg:
+ return ''
+ record.msg.sort()
+ msgwidth = self.width
+ columnWidth = max([len(item) for item in record.msg])
+ columns = int(math.floor(float(msgwidth) / (columnWidth+2)))
+ lines = int(math.ceil(float(len(record.msg)) / columns))
+ for lineNumber in range(lines):
+ indices = [idx for idx in [(colNum * lines) + lineNumber
+ for colNum in range(columns)] if idx < len(record.msg)]
+ format = (len(indices) * (" %%-%ds " % columnWidth))
+ returns.append(format % tuple([record.msg[idx] for idx in indices]))
+ #elif type(record.msg) == lxml.etree._Element:
+ # returns.append(str(xml_print(record.msg)))
+ else:
+ returns.append(str(record.msg))
+ if record.exc_info:
+ returns.append(self.formatException(record.exc_info))
+ return '\n'.join(returns)
+
+class FragmentingSysLogHandler(logging.handlers.SysLogHandler):
+ """
+ This handler fragments messages into
+ chunks smaller than 250 characters
+ """
+
+ def __init__(self, procname, path, facility):
+ self.procname = procname
+ self.unixsocket = False
+ logging.handlers.SysLogHandler.__init__(self, path, facility)
+
+ def emit(self, record):
+ """Chunk and deliver records."""
+ record.name = self.procname
+ if str(record.msg) > 250:
+ msgs = []
+ error = record.exc_info
+ record.exc_info = None
+ msgdata = record.msg
+ while msgdata:
+ newrec = copy.deepcopy(record)
+ newrec.msg = msgdata[:250]
+ msgs.append(newrec)
+ msgdata = msgdata[250:]
+ msgs[0].exc_info = error
+ else:
+ msgs = [record]
+ for newrec in msgs:
+ msg = self.log_format_string % (self.encodePriority(self.facility,
+ newrec.levelname.lower()), self.format(newrec))
+ try:
+ self.socket.send(msg)
+ except socket.error:
+ for i in xrange(10):
+ try:
+ if isinstance(self.address, tuple):
+ self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ self.socket.connect(self.address)
+ else:
+ self._connect_unixsocket(self.address)
+ break
+ except socket.error:
+ continue
+ try:
+ self.socket.send("Reconnected to syslog")
+ self.socket.send(msg)
+ except:
+ """
+ If we still fail then drop it. Running bcfg2-server as non-root can
+ trigger permission denied exceptions.
+ """
+ pass
+
+def setup_logging(procname, to_console=True, to_syslog=True, syslog_facility='daemon', level=0, to_file=None):
+ """Setup logging for Bcfg2 software."""
+ if hasattr(logging, 'already_setup'):
+ return
+ # add the handler to the root logger
+ if to_console:
+ console = logging.StreamHandler(sys.stdout)
+ console.setLevel(logging.DEBUG)
+ # tell the handler to use this format
+ console.setFormatter(TermiosFormatter())
+ logging.root.addHandler(console)
+ if to_syslog:
+ try:
+ try:
+ syslog = FragmentingSysLogHandler(procname, '/dev/log', syslog_facility)
+ except socket.error:
+ syslog = FragmentingSysLogHandler(procname, ('localhost', 514), syslog_facility)
+ syslog.setLevel(logging.DEBUG)
+ syslog.setFormatter(logging.Formatter('%(name)s[%(process)d]: %(message)s'))
+ logging.root.addHandler(syslog)
+ except socket.error:
+ logging.root.error("failed to activate syslogging")
+ except:
+ print("Failed to activate syslogging")
+ if not to_file == None:
+ filelog = logging.FileHandler(to_file)
+ filelog.setLevel(logging.DEBUG)
+ filelog.setFormatter(logging.Formatter('%(asctime)s %(name)s[%(process)d]: %(message)s'))
+ logging.root.addHandler(filelog)
+ logging.root.setLevel(level)
+ logging.already_setup = True
+
+def trace_process(**kwargs):
+ """Literally log every line of python code as it runs.
+
+ Keyword arguments:
+ log -- file (name) to log to (default stderr)
+ scope -- base scope to log to (default Cobalt)
+
+ """
+
+ file_name = kwargs.get("log", None)
+ if file_name is not None:
+ log_file = open(file_name, "w")
+ else:
+ log_file = sys.stderr
+
+ scope = kwargs.get("scope", "Cobalt")
+
+ def traceit(frame, event, arg):
+ if event == "line":
+ lineno = frame.f_lineno
+ filename = frame.f_globals["__file__"]
+ if (filename.endswith(".pyc") or
+ filename.endswith(".pyo")):
+ filename = filename[:-1]
+ name = frame.f_globals["__name__"]
+ line = linecache.getline(filename, lineno)
+ print >> log_file, "%s:%s: %s" % (name, lineno, line.rstrip())
+ return traceit
+
+ sys.settrace(traceit)
+
+def log_to_stderr(logger_name, level=logging.INFO):
+ """Set up console logging."""
+ try:
+ logger = logging.getLogger(logger_name)
+ except:
+ # assume logger_name is already a logger
+ logger = logger_name
+ handler = logging.StreamHandler() # sys.stderr is the default stream
+ handler.setLevel(level)
+ handler.setFormatter(TermiosFormatter()) # investigate this formatter
+ logger.addHandler(handler)
+
+def log_to_syslog(logger_name, level=logging.INFO, format='%(name)s[%(process)d]: %(message)s'):
+ """Set up syslog logging."""
+ try:
+ logger = logging.getLogger(logger_name)
+ except:
+ # assume logger_name is already a logger
+ logger = logger_name
+ # anticipate an exception somewhere below
+ handler = logging.handlers.SysLogHandler() # investigate FragmentingSysLogHandler
+ handler.setLevel(level)
+ handler.setFormatter(logging.Formatter(format))
+ logger.addHandler(handler)
diff --git a/build/lib/Bcfg2/Options.py b/build/lib/Bcfg2/Options.py
new file mode 100644
index 000000000..1dcad6427
--- /dev/null
+++ b/build/lib/Bcfg2/Options.py
@@ -0,0 +1,334 @@
+"""Option parsing library for utilities."""
+__revision__ = '$Revision$'
+
+import ConfigParser
+import getopt
+import os
+import sys
+import Bcfg2.Client.Tools
+
+def bool_cook(x):
+ if x:
+ return True
+ else:
+ return False
+
+class OptionFailure(Exception):
+ pass
+
+DEFAULT_CONFIG_LOCATION = '/etc/bcfg2.conf' #/etc/bcfg2.conf
+DEFAULT_INSTALL_PREFIX = '/usr' #/usr
+
+class Option(object):
+ cfpath = DEFAULT_CONFIG_LOCATION
+ __cfp = False
+
+ def getCFP(self):
+ if not self.__cfp:
+ self.__cfp = ConfigParser.ConfigParser()
+ self.__cfp.readfp(open(self.cfpath))
+ return self.__cfp
+ cfp = property(getCFP)
+
+ def get_cooked_value(self, value):
+ if self.boolean:
+ return True
+ if self.cook:
+ return self.cook(value)
+ else:
+ return value
+
+ def __init__(self, desc, default, cmd=False, odesc=False,
+ env=False, cf=False, cook=False, long_arg=False):
+ self.desc = desc
+ self.default = default
+ self.cmd = cmd
+ self.long = long_arg
+ if not self.long:
+ if cmd and (cmd[0] != '-' or len(cmd) != 2):
+ raise OptionFailure("Poorly formed command %s" % cmd)
+ else:
+ if cmd and (not cmd.startswith('--')):
+ raise OptionFailure("Poorly formed command %s" % cmd)
+ self.odesc = odesc
+ self.env = env
+ self.cf = cf
+ self.boolean = False
+ if not odesc and not cook:
+ self.boolean = True
+ self.cook = cook
+
+ def buildHelpMessage(self):
+ msg = ''
+ if self.cmd:
+ if not self.long:
+ msg = self.cmd.ljust(3)
+ else:
+ msg = self.cmd
+ if self.odesc:
+ if self.long:
+ msg = "%-28s" % ("%s=%s" % (self.cmd, self.odesc))
+ else:
+ msg += '%-25s' % (self.odesc)
+ else:
+ msg += '%-25s' % ('')
+ msg += "%s\n" % self.desc
+ return msg
+
+ def buildGetopt(self):
+ gstr = ''
+ if self.long:
+ return gstr
+ if self.cmd:
+ gstr = self.cmd[1]
+ if self.odesc:
+ gstr += ':'
+ return gstr
+
+ def buildLongGetopt(self):
+ if self.odesc:
+ return self.cmd[2:]+'='
+ else:
+ return self.cmd[2:]
+
+ def parse(self, opts, rawopts):
+ if self.cmd and opts:
+ # processing getopted data
+ optinfo = [opt[1] for opt in opts if opt[0] == self.cmd]
+ if optinfo:
+ if optinfo[0]:
+ self.value = self.get_cooked_value(optinfo[0])
+ else:
+ self.value = True
+ return
+ if self.cmd and self.cmd in rawopts:
+ data = rawopts[rawopts.index(self.cmd) + 1]
+ self.value = self.get_cooked_value(data)
+ return
+ # no command line option found
+ if self.env and self.env in os.environ:
+ self.value = self.get_cooked_value(os.environ[self.env])
+ return
+ if self.cf:
+ try:
+ self.value = self.get_cooked_value(self.cfp.get(*self.cf))
+ return
+ except:
+ pass
+ # default value not cooked
+ self.value = self.default
+
+class OptionSet(dict):
+ def __init__(self, *args):
+ dict.__init__(self, *args)
+ self.hm = self.buildHelpMessage()
+
+ def buildGetopt(self):
+ return ''.join([opt.buildGetopt() for opt in list(self.values())])
+
+ def buildLongGetopt(self):
+ return [opt.buildLongGetopt() for opt in list(self.values()) if opt.long]
+
+ def buildHelpMessage(self):
+ if hasattr(self, 'hm'):
+ return self.hm
+ return ' '.join([opt.buildHelpMessage() for opt in list(self.values())])
+
+ def helpExit(self, msg='', code=1):
+ if msg:
+ print(msg)
+ print("Usage:\n %s" % self.buildHelpMessage())
+ raise SystemExit(code)
+
+ def parse(self, argv, do_getopt=True):
+ '''Parse options'''
+ if do_getopt:
+ try:
+ opts, args = getopt.getopt(argv, self.buildGetopt(),
+ self.buildLongGetopt())
+ except getopt.GetoptError, err:
+ self.helpExit(err)
+ if '-h' in argv:
+ self.helpExit('', 0)
+ self['args'] = args
+ for key in list(self.keys()):
+ if key == 'args':
+ continue
+ option = self[key]
+ if do_getopt:
+ option.parse(opts, [])
+ else:
+ option.parse([], argv)
+ if hasattr(option, 'value'):
+ val = option.value
+ self[key] = val
+
+list_split = lambda x:x.replace(' ','').split(',')
+flist_split = lambda x:list_split(x.replace(':', '').lower())
+
+def colon_split(c_string):
+ if c_string:
+ return c_string.split(':')
+ return []
+
+#General options
+CFILE = Option('Specify configuration file', DEFAULT_CONFIG_LOCATION, cmd='-C',
+ odesc='<conffile>')
+LOCKFILE = Option('Specify lockfile',
+ "/var/lock/bcfg2.run",
+ cf=('components', 'lockfile'),
+ odesc='<Path to lockfile>')
+HELP = Option('Print this usage message', False, cmd='-h')
+DEBUG = Option("Enable debugging output", False, cmd='-d')
+VERBOSE = Option("Enable verbose output", False, cmd='-v')
+DAEMON = Option("Daemonize process, storing pid", False,
+ cmd='-D', odesc="<pidfile>")
+INSTALL_PREFIX = Option('Installation location', cf=('server', 'prefix'),
+ default=DEFAULT_INSTALL_PREFIX, odesc='</path>')
+SENDMAIL_PATH = Option('Path to sendmail', cf=('reports', 'sendmailpath'),
+ default='/usr/lib/sendmail')
+INTERACTIVE = Option('prompt the user for each change', default=False,
+ cmd='-I', )
+ENCODING = Option('Encoding of cfg files', default=sys.getdefaultencoding(),
+ cmd='-E', odesc='<encoding>',
+ cf=('components', 'encoding'))
+PARANOID_PATH = Option('Specify path for paranoid file backups',
+ default='/var/cache/bcfg2', cf=('paranoid', 'path'),
+ odesc='<paranoid backup path>')
+PARANOID_MAX_COPIES = Option('Specify the number of paranoid copies you want',
+ default=1, cf=('paranoid', 'max_copies'),
+ odesc='<max paranoid copies>')
+OMIT_LOCK_CHECK = Option('Omit lock check', default=False, cmd='-O')
+CORE_PROFILE = Option('profile',
+ default=False, cmd='-p', )
+
+#Metadata options
+MDATA_OWNER = Option('Default Path owner',
+ default='root', cf=('mdata', 'owner'),
+ odesc='owner permissions')
+MDATA_GROUP = Option('Default Path group',
+ default='root', cf=('mdata', 'group'),
+ odesc='group permissions')
+MDATA_IMPORTANT = Option('Default Path priority (importance)',
+ default='False', cf=('mdata', 'important'),
+ odesc='Important entries are installed first')
+MDATA_PERMS = Option('Default Path permissions',
+ '644', cf=('mdata', 'perms'),
+ odesc='octal permissions')
+MDATA_PARANOID = Option('Default Path paranoid setting',
+ 'false', cf=('mdata', 'paranoid'),
+ odesc='Path paranoid setting')
+
+#Server options
+SERVER_REPOSITORY = Option('Server repository path', '/var/lib/bcfg2',
+ cf=('server', 'repository'), cmd='-Q',
+ odesc='<repository path>')
+SERVER_PLUGINS = Option('Server plugin list', cf=('server', 'plugins'),
+ # default server plugins
+ default=[
+ 'Base',
+ 'Bundler',
+ 'Cfg',
+ 'Metadata',
+ 'Pkgmgr',
+ 'Rules',
+ 'SSHbase',
+ ],
+ cook=list_split)
+SERVER_MCONNECT = Option('Server Metadata Connector list', cook=list_split,
+ cf=('server', 'connectors'), default=['Probes'], )
+SERVER_FILEMONITOR = Option('Server file monitor', cf=('server', 'filemonitor'),
+ default='default', odesc='File monitoring driver')
+SERVER_LOCATION = Option('Server Location', cf=('components', 'bcfg2'),
+ default='https://localhost:6789', cmd='-S',
+ odesc='https://server:port')
+SERVER_STATIC = Option('Server runs on static port', cf=('components', 'bcfg2'),
+ default=False, cook=bool_cook)
+SERVER_KEY = Option('Path to SSL key', cf=('communication', 'key'),
+ default=False, cmd='--ssl-key', odesc='<ssl key>',
+ long_arg=True)
+SERVER_CERT = Option('Path to SSL certificate', default='/etc/bcfg2.key',
+ cf=('communication', 'certificate'), odesc='<ssl cert>')
+SERVER_CA = Option('Path to SSL CA Cert', default=None,
+ cf=('communication', 'ca'), odesc='<ca cert>')
+SERVER_PASSWORD = Option('Communication Password', cmd='-x', odesc='<password>',
+ cf=('communication', 'password'), default=False)
+SERVER_PROTOCOL = Option('Server Protocol', cf=('communication', 'procotol'),
+ default='xmlrpc/ssl')
+#Client options
+CLIENT_KEY = Option('Path to SSL key', cf=('communication', 'key'),
+ default=None, cmd="--ssl-key", odesc='<ssl key>',
+ long_arg=True)
+CLIENT_CERT = Option('Path to SSL certificate', default=None, cmd="--ssl-cert",
+ cf=('communication', 'certificate'), odesc='<ssl cert>',
+ long_arg=True)
+CLIENT_CA = Option('Path to SSL CA Cert', default=None, cmd="--ca-cert",
+ cf=('communication', 'ca'), odesc='<ca cert>',
+ long_arg=True)
+CLIENT_SCNS = Option('list of server commonNames', default=None, cmd="--ssl-cns",
+ cf=('communication', 'serverCommonNames'),
+ odesc='<commonName1:commonName2>', cook=list_split,
+ long_arg=True)
+CLIENT_PROFILE = Option('assert the given profile for the host',
+ default=False, cmd='-p', odesc="<profile>")
+CLIENT_RETRIES = Option('the number of times to retry network communication',
+ default='3', cmd='-R', cf=('communication', 'retries'),
+ odesc="<retry count>")
+CLIENT_DRYRUN = Option('do not actually change the system',
+ default=False, cmd='-n', )
+CLIENT_EXTRA_DISPLAY = Option('enable extra entry output',
+ default=False, cmd='-e', )
+CLIENT_PARANOID = Option('make automatic backups of config files',
+ default=False, cmd='-P', cf=('client', 'paranoid'))
+CLIENT_DRIVERS = Option('Specify tool driver set', cmd='-D',
+ cf=('client', 'drivers'),
+ odesc="<driver1,driver2>", cook=list_split,
+ default=Bcfg2.Client.Tools.default)
+CLIENT_CACHE = Option('store the configuration in a file',
+ default=False, cmd='-c', odesc="<cache path>")
+CLIENT_REMOVE = Option('force removal of additional configuration items',
+ default=False, cmd='-r', odesc="<entry type|all>")
+CLIENT_BUNDLE = Option('only configure the given bundle(s)', default=[],
+ cmd='-b', odesc='<bundle:bundle>', cook=colon_split)
+CLIENT_BUNDLEQUICK = Option('only verify/configure the given bundle(s)', default=False,
+ cmd='-Q')
+CLIENT_INDEP = Option('only configure the given bundle(s)', default=False,
+ cmd='-z')
+CLIENT_KEVLAR = Option('run in kevlar (bulletproof) mode', default=False,
+ cmd='-k', )
+CLIENT_DLIST = Option('run client in server decision list mode', default=False,
+ cmd='-l', odesc='<whitelist|blacklist>')
+CLIENT_FILE = Option('configure from a file rather than querying the server',
+ default=False, cmd='-f', odesc='<specification path>')
+CLIENT_QUICK = Option('disable some checksum verification', default=False,
+ cmd='-q', )
+CLIENT_USER = Option('the user to provide for authentication', default='root',
+ cmd='-u', cf=('communication', 'user'), odesc='<user>')
+CLIENT_SERVICE_MODE = Option('Set client service mode', default='default',
+ cmd='-s', odesc='<default|disabled|build>')
+
+# APT client tool options
+CLIENT_APT_TOOLS_INSTALL_PATH = Option('Apt tools install path',
+ cf=('APT', 'install_path'),
+ default='/usr')
+CLIENT_APT_TOOLS_VAR_PATH = Option('Apt tools var path',
+ cf=('APT', 'var_path'), default='/var')
+CLIENT_SYSTEM_ETC_PATH = Option('System etc path', cf=('APT', 'etc_path'),
+ default='/etc')
+
+#Logging options
+LOGGING_FILE_PATH = Option('Set path of file log', default=None,
+ cmd='-o', odesc='<path>', cf=('logging', 'path'))
+
+class OptionParser(OptionSet):
+ """
+ OptionParser bootstraps option parsing,
+ getting the value of the config file
+ """
+ def __init__(self, args):
+ self.Bootstrap = OptionSet([('configfile', CFILE)])
+ self.Bootstrap.parse(sys.argv[1:], do_getopt=False)
+ if self.Bootstrap['configfile'] != Option.cfpath:
+ Option.cfpath = self.Bootstrap['configfile']
+ Option.__cfp = False
+ OptionSet.__init__(self, args)
diff --git a/build/lib/Bcfg2/Proxy.py b/build/lib/Bcfg2/Proxy.py
new file mode 100644
index 000000000..275405faf
--- /dev/null
+++ b/build/lib/Bcfg2/Proxy.py
@@ -0,0 +1,316 @@
+"""RPC client access to cobalt components.
+
+Classes:
+ComponentProxy -- an RPC client proxy to Cobalt components
+
+Functions:
+load_config -- read configuration files
+
+"""
+
+__revision__ = '$Revision: $'
+
+
+from xmlrpclib import _Method
+
+import httplib
+import logging
+import re
+import socket
+
+# The ssl module is provided by either Python 2.6 or a separate ssl
+# package that works on older versions of Python (see
+# http://pypi.python.org/pypi/ssl). If neither can be found, look for
+# M2Crypto instead.
+try:
+ import ssl
+ SSL_LIB = 'py26_ssl'
+except ImportError, e:
+ from M2Crypto import SSL
+ import M2Crypto.SSL.Checker
+ SSL_LIB = 'm2crypto'
+
+
+import string
+import sys
+import time
+import urlparse
+import xmlrpclib
+
+version = sys.version_info[:2]
+has_py23 = map(int, version) >= [2, 3]
+has_py26 = map(int, version) >= [2, 6]
+
+__all__ = ["ComponentProxy", "RetryMethod", "SSLHTTPConnection", "XMLRPCTransport"]
+
+class CertificateError(Exception):
+ def __init__(self, commonName):
+ self.commonName = commonName
+
+class RetryMethod(_Method):
+ """Method with error handling and retries built in."""
+ log = logging.getLogger('xmlrpc')
+ max_retries = 4
+ def __call__(self, *args):
+ for retry in range(self.max_retries):
+ try:
+ return _Method.__call__(self, *args)
+ except xmlrpclib.ProtocolError, err:
+ self.log.error("Server failure: Protocol Error: %s %s" % \
+ (err.errcode, err.errmsg))
+ raise xmlrpclib.Fault(20, "Server Failure")
+ except xmlrpclib.Fault:
+ raise
+ except socket.error, err:
+ if hasattr(err, 'errno') and err.errno == 336265218:
+ self.log.error("SSL Key error")
+ break
+ if retry == 3:
+ self.log.error("Server failure: %s" % err)
+ raise xmlrpclib.Fault(20, err)
+ except CertificateError, ce:
+ self.log.error("Got unallowed commonName %s from server" \
+ % ce.commonName)
+ break
+ except KeyError:
+ self.log.error("Server disallowed connection")
+ break
+ except:
+ self.log.error("Unknown failure", exc_info=1)
+ break
+ time.sleep(0.5)
+ raise xmlrpclib.Fault(20, "Server Failure")
+
+# sorry jon
+xmlrpclib._Method = RetryMethod
+
+class SSLHTTPConnection(httplib.HTTPConnection):
+ """Extension of HTTPConnection that implements SSL and related behaviors."""
+
+ logger = logging.getLogger('Bcfg2.Proxy.SSLHTTPConnection')
+
+ def __init__(self, host, port=None, strict=None, timeout=90, key=None,
+ cert=None, ca=None, scns=None, protocol='xmlrpc/ssl'):
+ """Initializes the `httplib.HTTPConnection` object and stores security
+ parameters
+
+ Parameters
+ ----------
+ host : string
+ Name of host to contact
+ port : int, optional
+ Port on which to contact the host. If none is specified,
+ the default port of 80 will be used unless the `host`
+ string has a port embedded in the form host:port.
+ strict : Boolean, optional
+ Passed to the `httplib.HTTPConnection` constructor and if
+ True, causes the `BadStatusLine` exception to be raised if
+ the status line cannot be parsed as a valid HTTP 1.0 or
+ 1.1 status.
+ timeout : int, optional
+ Causes blocking operations to timeout after `timeout`
+ seconds.
+ key : string, optional
+ The file system path to the local endpoint's SSL key. May
+ specify the same file as `cert` if using a file that
+ contains both. See
+ http://docs.python.org/library/ssl.html#ssl-certificates
+ for details. Required if using xmlrpc/ssl with client
+ certificate authentication.
+ cert : string, optional
+ The file system path to the local endpoint's SSL
+ certificate. May specify the same file as `cert` if using
+ a file that contains both. See
+ http://docs.python.org/library/ssl.html#ssl-certificates
+ for details. Required if using xmlrpc/ssl with client
+ certificate authentication.
+ ca : string, optional
+ The file system path to a set of concatenated certificate
+ authority certs, which are used to validate certificates
+ passed from the other end of the connection.
+ scns : array-like, optional
+ List of acceptable server commonNames. The peer cert's
+ common name must appear in this list, otherwise the
+ connect() call will throw a `CertificateError`.
+ protocol : {'xmlrpc/ssl', 'xmlrpc/tlsv1'}, optional
+ Communication protocol to use.
+
+ """
+ if not has_py26:
+ httplib.HTTPConnection.__init__(self, host, port, strict)
+ else:
+ httplib.HTTPConnection.__init__(self, host, port, strict, timeout)
+ self.key = key
+ self.cert = cert
+ self.ca = ca
+ self.scns = scns
+ self.protocol = protocol
+ self.timeout = timeout
+
+ def connect(self):
+ """Initiates a connection using previously set attributes."""
+ if SSL_LIB == 'py26_ssl':
+ self._connect_py26ssl()
+ elif SSL_LIB == 'm2crypto':
+ self._connect_m2crypto()
+ else:
+ raise Exception, "No SSL module support"
+
+
+ def _connect_py26ssl(self):
+ """Initiates a connection using the ssl module."""
+ rawsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ if self.protocol == 'xmlrpc/ssl':
+ ssl_protocol_ver = ssl.PROTOCOL_SSLv23
+ elif self.protocol == 'xmlrpc/tlsv1':
+ ssl_protocol_ver = ssl.PROTOCOL_TLSv1
+ else:
+ self.logger.error("Unknown protocol %s" % (self.protocol))
+ raise Exception, "unknown protocol %s" % self.protocol
+ if self.ca:
+ other_side_required = ssl.CERT_REQUIRED
+ else:
+ other_side_required = ssl.CERT_NONE
+ self.logger.warning("No ca is specified. Cannot authenticate the server with SSL.")
+ if self.cert and not self.key:
+ self.logger.warning("SSL cert specfied, but key. Cannot authenticate this client with SSL.")
+ self.cert = None
+ if self.key and not self.cert:
+ self.logger.warning("SSL key specfied, but no cert. Cannot authenticate this client with SSL.")
+ self.key = None
+
+ if has_py23:
+ rawsock.settimeout(self.timeout)
+ self.sock = ssl.SSLSocket(rawsock, cert_reqs=other_side_required,
+ ca_certs=self.ca, suppress_ragged_eofs=True,
+ keyfile=self.key, certfile=self.cert,
+ ssl_version=ssl_protocol_ver)
+ self.sock.connect((self.host, self.port))
+ peer_cert = self.sock.getpeercert()
+ if peer_cert and self.scns:
+ scn = [x[0][1] for x in peer_cert['subject'] if x[0][0] == 'commonName'][0]
+ if scn not in self.scns:
+ raise CertificateError, scn
+ self.sock.closeSocket = True
+
+ def _connect_m2crypto(self):
+ """Initiates a connection using the M2Crypto module."""
+
+ if self.protocol == 'xmlrpc/ssl':
+ ctx = SSL.Context('sslv23')
+ elif self.protocol == 'xmlrpc/tlsv1':
+ ctx = SSL.Context('tlsv1')
+ else:
+ self.logger.error("Unknown protocol %s" % (self.protocol))
+ raise Exception, "unknown protocol %s" % self.protocol
+
+ if self.ca:
+ # Use the certificate authority to validate the cert
+ # presented by the server
+ ctx.set_verify(SSL.verify_peer | SSL.verify_fail_if_no_peer_cert, depth=9)
+ if ctx.load_verify_locations(self.ca) != 1:
+ raise Exception('No CA certs')
+ else:
+ self.logger.warning("No ca is specified. Cannot authenticate the server with SSL.")
+
+ if self.cert and self.key:
+ # A cert/key is defined, use them to support client
+ # authentication to the server
+ ctx.load_cert(self.cert, self.key)
+ elif self.cert:
+ self.logger.warning("SSL cert specfied, but key. Cannot authenticate this client with SSL.")
+ elif self.key:
+ self.logger.warning("SSL key specfied, but no cert. Cannot authenticate this client with SSL.")
+
+ self.sock = SSL.Connection(ctx)
+ if re.match('\\d+\\.\\d+\\.\\d+\\.\\d+', self.host):
+ # host is ip address
+ try:
+ hostname = socket.gethostbyaddr(self.host)[0]
+ except:
+ # fall back to ip address
+ hostname = self.host
+ else:
+ hostname = self.host
+ try:
+ self.sock.connect((hostname, self.port))
+ # automatically checks cert matches host
+ except M2Crypto.SSL.Checker.WrongHost, wr:
+ raise CertificateError, wr
+
+
+class XMLRPCTransport(xmlrpclib.Transport):
+ def __init__(self, key=None, cert=None, ca=None, scns=None, use_datetime=0, timeout=90):
+ if hasattr(xmlrpclib.Transport, '__init__'):
+ xmlrpclib.Transport.__init__(self, use_datetime)
+ self.key = key
+ self.cert = cert
+ self.ca = ca
+ self.scns = scns
+ self.timeout = timeout
+
+ def make_connection(self, host):
+ host = self.get_host_info(host)[0]
+ http = SSLHTTPConnection(host, key=self.key, cert=self.cert, ca=self.ca,
+ scns=self.scns, timeout=self.timeout)
+ https = httplib.HTTP()
+ https._setup(http)
+ return https
+
+ def request(self, host, handler, request_body, verbose=0):
+ """Send request to server and return response."""
+ h = self.make_connection(host)
+ self.send_request(h, handler, request_body)
+ self.send_host(h, host)
+ self.send_user_agent(h)
+ self.send_content(h, request_body)
+
+ errcode, errmsg, headers = h.getreply()
+
+ if errcode != 200:
+ raise xmlrpclib.ProtocolError(host + handler, errcode, errmsg, headers)
+
+ self.verbose = verbose
+ msglen = int(headers.dict['content-length'])
+ return self._get_response(h.getfile(), msglen)
+
+ def _get_response(self, fd, length):
+ # read response from input file/socket, and parse it
+ recvd = 0
+
+ p, u = self.getparser()
+
+ while recvd < length:
+ rlen = min(length - recvd, 1024)
+ response = fd.read(rlen)
+ recvd += len(response)
+ if not response:
+ break
+ if self.verbose:
+ print "body:", repr(response), len(response)
+ p.feed(response)
+
+ fd.close()
+ p.close()
+
+ return u.close()
+
+def ComponentProxy(url, user=None, password=None, key=None, cert=None, ca=None,
+ allowedServerCNs=None, timeout=90):
+
+ """Constructs proxies to components.
+
+ Arguments:
+ component_name -- name of the component to connect to
+
+ Additional arguments are passed to the ServerProxy constructor.
+
+ """
+
+ if user and password:
+ method, path = urlparse.urlparse(url)[:2]
+ newurl = "%s://%s:%s@%s" % (method, user, password, path)
+ else:
+ newurl = url
+ ssl_trans = XMLRPCTransport(key, cert, ca, allowedServerCNs, timeout=timeout)
+ return xmlrpclib.ServerProxy(newurl, allow_none=True, transport=ssl_trans)
diff --git a/build/lib/Bcfg2/SSLServer.py b/build/lib/Bcfg2/SSLServer.py
new file mode 100644
index 000000000..7c5b76664
--- /dev/null
+++ b/build/lib/Bcfg2/SSLServer.py
@@ -0,0 +1,416 @@
+"""Bcfg2 SSL server."""
+
+__revision__ = '$Revision$'
+
+__all__ = [
+ "SSLServer", "XMLRPCRequestHandler", "XMLRPCServer",
+]
+
+import os
+import sys
+import xmlrpclib
+import socket
+import SocketServer
+import SimpleXMLRPCServer
+import base64
+import select
+import signal
+import logging
+import ssl
+import threading
+import time
+
+class ForkedChild(Exception):
+ pass
+
+class XMLRPCDispatcher (SimpleXMLRPCServer.SimpleXMLRPCDispatcher):
+ logger = logging.getLogger("Cobalt.Server.XMLRPCDispatcher")
+ def __init__(self, allow_none, encoding):
+ try:
+ SimpleXMLRPCServer.SimpleXMLRPCDispatcher.__init__(self,
+ allow_none,
+ encoding)
+ except:
+ # Python 2.4?
+ SimpleXMLRPCServer.SimpleXMLRPCDispatcher.__init__(self)
+
+ self.allow_none = allow_none
+ self.encoding = encoding
+
+ def _marshaled_dispatch(self, address, data):
+ method_func = None
+ params, method = xmlrpclib.loads(data)
+ try:
+ if '.' not in method:
+ params = (address, ) + params
+ response = self.instance._dispatch(method, params, self.funcs)
+ response = (response, )
+ raw_response = xmlrpclib.dumps(response, methodresponse=1,
+ allow_none=self.allow_none,
+ encoding=self.encoding)
+ except xmlrpclib.Fault, fault:
+ raw_response = xmlrpclib.dumps(fault,
+ allow_none=self.allow_none,
+ encoding=self.encoding)
+ except:
+ self.logger.error("Unexpected handler error", exc_info=1)
+ # report exception back to server
+ raw_response = xmlrpclib.dumps(
+ xmlrpclib.Fault(1, "%s:%s" % (sys.exc_type, sys.exc_value)),
+ allow_none=self.allow_none, encoding=self.encoding)
+ return raw_response
+
+class SSLServer (SocketServer.TCPServer, object):
+
+ """TCP server supporting SSL encryption.
+
+ Methods:
+ handshake -- perform a SSL/TLS handshake
+
+ Properties:
+ url -- A url pointing to this server.
+
+ """
+
+ allow_reuse_address = True
+ logger = logging.getLogger("Cobalt.Server.TCPServer")
+
+ def __init__(self, server_address, RequestHandlerClass, keyfile=None,
+ certfile=None, reqCert=False, ca=None, timeout=None, protocol='xmlrpc/ssl'):
+
+ """Initialize the SSL-TCP server.
+
+ Arguments:
+ server_address -- address to bind to the server
+ RequestHandlerClass -- class to handle requests
+
+ Keyword arguments:
+ keyfile -- private encryption key filename (enables ssl encryption)
+ certfile -- certificate file (enables ssl encryption)
+ reqCert -- client must present certificate
+ timeout -- timeout for non-blocking request handling
+
+ """
+
+ all_iface_address = ('', server_address[1])
+ try:
+ SocketServer.TCPServer.__init__(self, all_iface_address,
+ RequestHandlerClass)
+ except socket.error:
+ self.logger.error("Failed to bind to socket")
+ raise
+
+ self.timeout = timeout
+ self.socket.settimeout(timeout)
+ self.keyfile = keyfile
+ if keyfile != None:
+ if keyfile == False or not os.path.exists(keyfile):
+ self.logger.error("Keyfile %s does not exist" % keyfile)
+ raise Exception, "keyfile doesn't exist"
+ self.certfile = certfile
+ if certfile != None:
+ if certfile == False or not os.path.exists(certfile):
+ self.logger.error("Certfile %s does not exist" % certfile)
+ raise Exception, "certfile doesn't exist"
+ self.ca = ca
+ if ca != None:
+ if ca == False or not os.path.exists(ca):
+ self.logger.error("CA %s does not exist" % ca)
+ raise Exception, "ca doesn't exist"
+ self.reqCert = reqCert
+ if ca and certfile:
+ self.mode = ssl.CERT_OPTIONAL
+ else:
+ self.mode = ssl.CERT_NONE
+ if protocol == 'xmlrpc/ssl':
+ self.ssl_protocol = ssl.PROTOCOL_SSLv23
+ elif protocol == 'xmlrpc/tlsv1':
+ self.ssl_protocol = ssl.PROTOCOL_TLSv1
+ else:
+ self.logger.error("Unknown protocol %s" % (protocol))
+ raise Exception, "unknown protocol %s" % protocol
+
+ def get_request(self):
+ (sock, sockinfo) = self.socket.accept()
+ sock.settimeout(self.timeout)
+ sslsock = ssl.wrap_socket(sock, server_side=True, certfile=self.certfile,
+ keyfile=self.keyfile, cert_reqs=self.mode,
+ ca_certs=self.ca, ssl_version=self.ssl_protocol)
+ return sslsock, sockinfo
+
+ def close_request(self, request):
+ try:
+ request.unwrap()
+ except:
+ pass
+ try:
+ request.close()
+ except:
+ pass
+
+ def _get_url(self):
+ port = self.socket.getsockname()[1]
+ hostname = socket.gethostname()
+ protocol = "https"
+ return "%s://%s:%i" % (protocol, hostname, port)
+ url = property(_get_url)
+
+
+class XMLRPCRequestHandler (SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
+
+ """Component XML-RPC request handler.
+
+ Adds support for HTTP authentication.
+
+ Exceptions:
+ CouldNotAuthenticate -- client did not present acceptable authentication information
+
+ Methods:
+ authenticate -- prompt a check of a client's provided username and password
+ handle_one_request -- handle a single rpc (optionally authenticating)
+
+ """
+ logger = logging.getLogger("Cobalt.Server.XMLRPCRequestHandler")
+
+ def authenticate(self):
+ try:
+ header = self.headers['Authorization']
+ except KeyError:
+ self.logger.error("No authentication data presented")
+ return False
+ auth_type, auth_content = header.split()
+ auth_content = base64.standard_b64decode(auth_content)
+ try:
+ username, password = auth_content.split(":")
+ except ValueError:
+ username = auth_content
+ password = ""
+ cert = self.request.getpeercert()
+ client_address = self.request.getpeername()
+ return self.server.instance.authenticate(cert, username,
+ password, client_address)
+
+ def parse_request(self):
+ """Extends parse_request.
+
+ Optionally check HTTP authentication when parsing.
+
+ """
+ if not SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.parse_request(self):
+ return False
+ try:
+ if not self.authenticate():
+ self.logger.error("Authentication Failure")
+ self.send_error(401, self.responses[401][0])
+ return False
+ except:
+ self.logger.error("Unexpected Authentication Failure", exc_info=1)
+ self.send_error(401, self.responses[401][0])
+ return False
+ return True
+
+ ### need to override do_POST here
+ def do_POST(self):
+ try:
+ max_chunk_size = 10*1024*1024
+ size_remaining = int(self.headers["content-length"])
+ L = []
+ while size_remaining:
+ try:
+ select.select([self.rfile.fileno()], [], [], 3)
+ except select.error:
+ print "got select timeout"
+ raise
+ chunk_size = min(size_remaining, max_chunk_size)
+ L.append(self.rfile.read(chunk_size))
+ size_remaining -= len(L[-1])
+ data = ''.join(L)
+ response = self.server._marshaled_dispatch(self.client_address, data)
+ except:
+ try:
+ self.send_response(500)
+ self.end_headers()
+ except:
+ (type, msg) = sys.exc_info()[:2]
+ self.logger.error("Error sending 500 response (%s): %s" % \
+ (type, msg))
+ raise
+ else:
+ # got a valid XML RPC response
+ try:
+ self.send_response(200)
+ self.send_header("Content-type", "text/xml")
+ self.send_header("Content-length", str(len(response)))
+ self.end_headers()
+ failcount = 0
+ while True:
+ try:
+ # If we hit SSL3_WRITE_PENDING here try to resend.
+ self.wfile.write(response)
+ break
+ except ssl.SSLError, e:
+ if str(e).find("SSL3_WRITE_PENDING") < 0:
+ raise
+ self.logger.error("SSL3_WRITE_PENDING")
+ failcount += 1
+ if failcount < 5:
+ continue
+ raise
+ except:
+ (type, msg) = sys.exc_info()[:2]
+ if str(type) == 'socket.error' and msg[0] == 32:
+ self.logger.warning("Connection dropped from %s" % self.client_address[0])
+ elif str(type) == 'socket.error' and msg[0] == 104:
+ self.logger.warning("Connection reset by peer: %s" % self.client_address[0])
+ else:
+ self.logger.error("Error sending response (%s): %s" % \
+ (type, msg))
+ raise
+
+ def finish(self):
+ # shut down the connection
+ if not self.wfile.closed:
+ self.wfile.flush()
+ self.wfile.close()
+ self.rfile.close()
+
+class XMLRPCServer (SocketServer.ThreadingMixIn, SSLServer,
+ XMLRPCDispatcher, object):
+
+ """Component XMLRPCServer.
+
+ Methods:
+ serve_daemon -- serve_forever in a daemonized process
+ serve_forever -- handle_one_request until not self.serve
+ shutdown -- stop serve_forever (by setting self.serve = False)
+ ping -- return all arguments received
+
+ RPC methods:
+ ping
+
+ (additional system.* methods are inherited from base dispatcher)
+
+ Properties:
+ require_auth -- the request handler is requiring authorization
+ credentials -- valid credentials being used for authentication
+
+ """
+
+ def __init__(self, server_address, RequestHandlerClass=None,
+ keyfile=None, certfile=None, ca=None, protocol='xmlrpc/ssl',
+ timeout=10,
+ logRequests=False,
+ register=True, allow_none=True, encoding=None):
+
+ """Initialize the XML-RPC server.
+
+ Arguments:
+ server_address -- address to bind to the server
+ RequestHandlerClass -- request handler used by TCP server (optional)
+
+ Keyword arguments:
+ keyfile -- private encryption key filename
+ certfile -- certificate file
+ logRequests -- log all requests (default False)
+ register -- presence should be reported to service-location (default True)
+ allow_none -- allow None values in xml-rpc
+ encoding -- encoding to use for xml-rpc (default UTF-8)
+
+ """
+
+ XMLRPCDispatcher.__init__(self, allow_none, encoding)
+
+ if not RequestHandlerClass:
+ class RequestHandlerClass (XMLRPCRequestHandler):
+ """A subclassed request handler to prevent class-attribute conflicts."""
+
+ SSLServer.__init__(self,
+ server_address, RequestHandlerClass, ca=ca,
+ timeout=timeout, keyfile=keyfile, certfile=certfile, protocol=protocol)
+ self.logRequests = logRequests
+ self.serve = False
+ self.register = register
+ self.register_introspection_functions()
+ self.register_function(self.ping)
+ self.logger.info("service available at %s" % self.url)
+ self.timeout = timeout
+
+ def _tasks_thread(self):
+ try:
+ while self.serve:
+ try:
+ if self.instance and hasattr(self.instance, 'do_tasks'):
+ self.instance.do_tasks()
+ except:
+ self.logger.error("Unexpected task failure", exc_info=1)
+ time.sleep(self.timeout)
+ except:
+ self.logger.error("tasks_thread failed", exc_info=1)
+
+ def server_close(self):
+ SSLServer.server_close(self)
+ self.logger.info("server_close()")
+
+ def _get_require_auth(self):
+ return getattr(self.RequestHandlerClass, "require_auth", False)
+ def _set_require_auth(self, value):
+ self.RequestHandlerClass.require_auth = value
+ require_auth = property(_get_require_auth, _set_require_auth)
+
+ def _get_credentials(self):
+ try:
+ return self.RequestHandlerClass.credentials
+ except AttributeError:
+ return dict()
+ def _set_credentials(self, value):
+ self.RequestHandlerClass.credentials = value
+ credentials = property(_get_credentials, _set_credentials)
+
+ def register_instance(self, instance, *args, **kwargs):
+ XMLRPCDispatcher.register_instance(self, instance, *args, **kwargs)
+ try:
+ name = instance.name
+ except AttributeError:
+ name = "unknown"
+ if hasattr(instance, 'plugins'):
+ for pname, pinst in instance.plugins.iteritems():
+ for mname in pinst.__rmi__:
+ xmname = "%s.%s" % (pname, mname)
+ fn = getattr(pinst, mname)
+ self.register_function(fn, name=xmname)
+ self.logger.info("serving %s at %s" % (name, self.url))
+
+ def serve_forever(self):
+ """Serve single requests until (self.serve == False)."""
+ self.serve = True
+ self.task_thread = threading.Thread(target=self._tasks_thread)
+ self.task_thread.start()
+ self.logger.info("serve_forever() [start]")
+ signal.signal(signal.SIGINT, self._handle_shutdown_signal)
+ signal.signal(signal.SIGTERM, self._handle_shutdown_signal)
+
+ try:
+ while self.serve:
+ try:
+ self.handle_request()
+ except socket.timeout:
+ pass
+ except select.error:
+ pass
+ except:
+ self.logger.error("Got unexpected error in handle_request",
+ exc_info=1)
+ finally:
+ self.logger.info("serve_forever() [stop]")
+
+ def shutdown(self):
+ """Signal that automatic service should stop."""
+ self.serve = False
+
+ def _handle_shutdown_signal(self, *_):
+ self.shutdown()
+
+ def ping(self, *args):
+ """Echo response."""
+ self.logger.info("ping(%s)" % (", ".join([repr(arg) for arg in args])))
+ return args
diff --git a/build/lib/Bcfg2/Server/Admin/Backup.py b/build/lib/Bcfg2/Server/Admin/Backup.py
new file mode 100644
index 000000000..d6458f97d
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Admin/Backup.py
@@ -0,0 +1,33 @@
+import glob
+import os
+import sys
+import time
+import tarfile
+import Bcfg2.Server.Admin
+import Bcfg2.Options
+
+class Backup(Bcfg2.Server.Admin.MetadataCore):
+ __shorthelp__ = "Make a backup of the Bcfg2 repository."
+ __longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin backup")
+ #"\n\nbcfg2-admin backup restore")
+ __usage__ = ("bcfg2-admin backup")
+
+ def __init__(self, configfile):
+ Bcfg2.Server.Admin.MetadataCore.__init__(self, configfile,
+ self.__usage__)
+
+ def __call__(self, args):
+ Bcfg2.Server.Admin.MetadataCore.__call__(self, args)
+ # Get Bcfg2 repo directory
+ opts = {'repo': Bcfg2.Options.SERVER_REPOSITORY}
+ setup = Bcfg2.Options.OptionParser(opts)
+ setup.parse(sys.argv[1:])
+ self.datastore = setup['repo']
+ timestamp = time.strftime('%Y%m%d%H%M%S')
+ format = 'gz'
+ mode = 'w:' + format
+ filename = timestamp + '.tar' + '.' + format
+ out = tarfile.open(self.datastore + '/' + filename, mode=mode)
+ out.add(self.datastore, os.path.basename(self.datastore))
+ out.close()
+ print "Archive %s was stored under %s" % (filename, self.datastore)
diff --git a/build/lib/Bcfg2/Server/Admin/Bundle.py b/build/lib/Bcfg2/Server/Admin/Bundle.py
new file mode 100644
index 000000000..41cd5727e
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Admin/Bundle.py
@@ -0,0 +1,100 @@
+import lxml.etree
+import glob
+import sys
+import re
+import Bcfg2.Server.Admin
+import Bcfg2.Options
+from Bcfg2.Server.Plugins.Metadata import MetadataConsistencyError
+
+class Bundle(Bcfg2.Server.Admin.MetadataCore):
+ __shorthelp__ = "Create or delete bundle entries"
+ __longhelp__ = (__shorthelp__ + #"\n\nbcfg2-admin bundle add <bundle> "
+ #"\n\nbcfg2-admin bundle del <bundle>"
+ "\n\nbcfg2-admin bundle list-xml"
+ "\n\nbcfg2-admin bundle list-genshi"
+ "\n\nbcfg2-admin bundle show")
+ __usage__ = ("bcfg2-admin bundle [options] [add|del] [group]")
+
+ def __init__(self, configfile):
+ Bcfg2.Server.Admin.MetadataCore.__init__(self, configfile,
+ self.__usage__)
+
+ def __call__(self, args):
+ Bcfg2.Server.Admin.MetadataCore.__call__(self, args)
+ reg='((?:[a-z][a-z\\.\\d\\-]+)\\.(?:[a-z][a-z\\-]+))(?![\\w\\.])'
+
+ #Get all bundles out of the Bundle/ directory
+ opts = {'repo': Bcfg2.Options.SERVER_REPOSITORY}
+ setup = Bcfg2.Options.OptionParser(opts)
+ setup.parse(sys.argv[1:])
+ repo = setup['repo']
+ xml_list = glob.glob("%s/Bundler/*.xml" % repo)
+ genshi_list = glob.glob("%s/Bundler/*.genshi" % repo)
+
+ if len(args) == 0:
+ self.errExit("No argument specified.\n"
+ "Please see bcfg2-admin bundle help for usage.")
+# if args[0] == 'add':
+# try:
+# self.metadata.add_bundle(args[1])
+# except MetadataConsistencyError:
+# print "Error in adding bundle."
+# raise SystemExit(1)
+# elif args[0] in ['delete', 'remove', 'del', 'rm']:
+# try:
+# self.metadata.remove_bundle(args[1])
+# except MetadataConsistencyError:
+# print "Error in deleting bundle."
+# raise SystemExit(1)
+ #Lists all available xml bundles
+ elif args[0] in ['list-xml', 'ls-xml']:
+ bundle_name = []
+ for bundle_path in xml_list:
+ rg = re.compile(reg,re.IGNORECASE|re.DOTALL)
+ bundle_name.append(rg.search(bundle_path).group(1))
+ for bundle in bundle_name:
+ print bundle.split('.')[0]
+ #Lists all available genshi bundles
+ elif args[0] in ['list-genshi', 'ls-gen']:
+ bundle_name = []
+ for bundle_path in genshi_list:
+ rg = re.compile(reg,re.IGNORECASE|re.DOTALL)
+ bundle_name.append(rg.search(bundle_path).group(1))
+ for bundle in bundle_name:
+ print bundle.split('.')[0]
+ #Shows a list of all available bundles and prints bundle
+ #details after the user choose one bundle.
+ #FIXME: Add support for detailed output of genshi bundles
+ elif args[0] in ['show']:
+ bundle_name = []
+ bundle_list = xml_list + genshi_list
+ for bundle_path in bundle_list:
+ rg = re.compile(reg,re.IGNORECASE|re.DOTALL)
+ bundle_name.append(rg.search(bundle_path).group(1))
+ text = "Available bundles (Number of bundles: %s)" % \
+ (len(bundle_list))
+ print text
+ print "%s" % (len(text) * "-")
+ for i in range(len(bundle_list)):
+ print "[%i]\t%s" % (i, bundle_name[i])
+ print "Enter the line number of a bundle for details:",
+ lineno = raw_input()
+ if int(lineno) >= int(len(bundle_list)):
+ print "No line with this number."
+ else:
+ if '%s/Bundler/%s' % \
+ (repo, bundle_name[int(lineno)]) in genshi_list:
+ print "Detailed output for *.genshi bundle is not supported."
+ else:
+ print 'Details for the "%s" bundle:' % \
+ (bundle_name[int(lineno)].split('.')[0])
+ tree = lxml.etree.parse(bundle_list[int(lineno)])
+ #Prints bundle content
+ #print lxml.etree.tostring(tree)
+ names = ['Action', 'Package', 'Path', 'Service']
+ for name in names:
+ for node in tree.findall("//" + name):
+ print "%s:\t%s" % (name, node.attrib["name"])
+ else:
+ print "No command specified"
+ raise SystemExit(1)
diff --git a/build/lib/Bcfg2/Server/Admin/Client.py b/build/lib/Bcfg2/Server/Admin/Client.py
new file mode 100644
index 000000000..0eee22ae4
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Admin/Client.py
@@ -0,0 +1,64 @@
+import lxml.etree
+import Bcfg2.Server.Admin
+from Bcfg2.Server.Plugins.Metadata import MetadataConsistencyError
+
+class Client(Bcfg2.Server.Admin.MetadataCore):
+ __shorthelp__ = "Create, delete, or modify client entries"
+ __longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin client add <client> "
+ "attr1=val1 attr2=val2\n"
+ "\n\nbcfg2-admin client update <client> "
+ "attr1=val1 attr2=val2\n"
+ "\n\nbcfg2-admin client list\n"
+ "bcfg2-admin client del <client>")
+ __usage__ = ("bcfg2-admin client [options] [add|del|update|list] [attr=val]")
+
+ def __init__(self, configfile):
+ Bcfg2.Server.Admin.MetadataCore.__init__(self, configfile,
+ self.__usage__)
+
+ def __call__(self, args):
+ Bcfg2.Server.Admin.MetadataCore.__call__(self, args)
+ if len(args) == 0:
+ self.errExit("No argument specified.\n"
+ "Please see bcfg2-admin client help for usage.")
+ if args[0] == 'add':
+ attr_d = {}
+ for i in args[2:]:
+ attr, val = i.split('=', 1)
+ if attr not in ['profile', 'uuid', 'password',
+ 'location', 'secure', 'address']:
+ print "Attribute %s unknown" % attr
+ raise SystemExit(1)
+ attr_d[attr] = val
+ try:
+ self.metadata.add_client(args[1], attr_d)
+ except MetadataConsistencyError:
+ print "Error in adding client"
+ raise SystemExit(1)
+ elif args[0] in ['update', 'up']:
+ attr_d = {}
+ for i in args[2:]:
+ attr, val = i.split('=', 1)
+ if attr not in ['profile', 'uuid', 'password',
+ 'location', 'secure', 'address']:
+ print "Attribute %s unknown" % attr
+ raise SystemExit(1)
+ attr_d[attr] = val
+ try:
+ self.metadata.update_client(args[1], attr_d)
+ except MetadataConsistencyError:
+ print "Error in updating client"
+ raise SystemExit(1)
+ elif args[0] in ['delete', 'remove', 'del', 'rm']:
+ try:
+ self.metadata.remove_client(args[1])
+ except MetadataConsistencyError:
+ print "Error in deleting client"
+ raise SystemExit(1)
+ elif args[0] in ['list', 'ls']:
+ tree = lxml.etree.parse(self.metadata.data + "/clients.xml")
+ for node in tree.findall("//Client"):
+ print node.attrib["name"]
+ else:
+ print "No command specified"
+ raise SystemExit(1)
diff --git a/build/lib/Bcfg2/Server/Admin/Compare.py b/build/lib/Bcfg2/Server/Admin/Compare.py
new file mode 100644
index 000000000..f97233b0e
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Admin/Compare.py
@@ -0,0 +1,137 @@
+import lxml.etree, os
+import Bcfg2.Server.Admin
+
+class Compare(Bcfg2.Server.Admin.Mode):
+ __shorthelp__ = ("Determine differences between files or "
+ "directories of client specification instances")
+ __longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin compare <file1> <file2>"
+ "\nbcfg2-admin compare -r <dir1> <dir2>")
+ __usage__ = ("bcfg2-admin compare <old> <new>\n\n"
+ " -r\trecursive")
+
+ def __init__(self, configfile):
+ Bcfg2.Server.Admin.Mode.__init__(self, configfile)
+ self.important = {'Package':['name', 'version'],
+ 'Service':['name', 'status'],
+ 'Directory':['name', 'owner', 'group', 'perms'],
+ 'SymLink':['name', 'to'],
+ 'ConfigFile':['name', 'owner', 'group', 'perms'],
+ 'Permissions':['name', 'perms'],
+ 'PostInstall':['name']}
+
+ def compareStructures(self, new, old):
+ for child in new.getchildren():
+ equiv = old.xpath('%s[@name="%s"]' %
+ (child.tag, child.get('name')))
+ if child.tag in self.important:
+ print "tag type %s not handled" % (child.tag)
+ continue
+ if len(equiv) == 0:
+ print ("didn't find matching %s %s" %
+ (child.tag, child.get('name')))
+ continue
+ elif len(equiv) >= 1:
+ if child.tag == 'ConfigFile':
+ if child.text != equiv[0].text:
+ print " %s %s contents differ" \
+ % (child.tag, child.get('name'))
+ continue
+ noattrmatch = [field for field in self.important[child.tag] if \
+ child.get(field) != equiv[0].get(field)]
+ if not noattrmatch:
+ new.remove(child)
+ old.remove(equiv[0])
+ else:
+ print " %s %s attributes %s do not match" % \
+ (child.tag, child.get('name'), noattrmatch)
+ if len(old.getchildren()) == 0 and len(new.getchildren()) == 0:
+ return True
+ if new.tag == 'Independent':
+ name = 'Base'
+ else:
+ name = new.get('name')
+ both = []
+ oldl = ["%s %s" % (entry.tag, entry.get('name')) for entry in old]
+ newl = ["%s %s" % (entry.tag, entry.get('name')) for entry in new]
+ for entry in newl:
+ if entry in oldl:
+ both.append(entry)
+ newl.remove(entry)
+ oldl.remove(entry)
+ for entry in both:
+ print " %s differs (in bundle %s)" % (entry, name)
+ for entry in oldl:
+ print " %s only in old configuration (in bundle %s)" % (entry, name)
+ for entry in newl:
+ print " %s only in new configuration (in bundle %s)" % (entry, name)
+ return False
+
+ def compareSpecifications(self, path1, path2):
+ try:
+ new = lxml.etree.parse(path1).getroot()
+ except IOError:
+ print "Failed to read %s" % (path1)
+ raise SystemExit(1)
+
+ try:
+ old = lxml.etree.parse(path2).getroot()
+ except IOError:
+ print "Failed to read %s" % (path2)
+ raise SystemExit(1)
+
+ for src in [new, old]:
+ for bundle in src.findall('./Bundle'):
+ if bundle.get('name')[-4:] == '.xml':
+ bundle.set('name', bundle.get('name')[:-4])
+
+ rcs = []
+ for bundle in new.findall('./Bundle'):
+ equiv = old.xpath('Bundle[@name="%s"]' % (bundle.get('name')))
+ if len(equiv) == 0:
+ print "couldnt find matching bundle for %s" % bundle.get('name')
+ continue
+ if len(equiv) == 1:
+ if self.compareStructures(bundle, equiv[0]):
+ new.remove(bundle)
+ old.remove(equiv[0])
+ rcs.append(True)
+ else:
+ rcs.append(False)
+ else:
+ print "Unmatched bundle %s" % (bundle.get('name'))
+ rcs.append(False)
+ i1 = new.find('./Independent')
+ i2 = old.find('./Independent')
+ if self.compareStructures(i1, i2):
+ new.remove(i1)
+ old.remove(i2)
+ else:
+ rcs.append(False)
+ return False not in rcs
+
+ def __call__(self, args):
+ Bcfg2.Server.Admin.Mode.__call__(self, args)
+ if len(args) == 0:
+ self.errExit("No argument specified.\n"
+ "Please see bcfg2-admin compare help for usage.")
+ if '-r' in args:
+ args = list(args)
+ args.remove('-r')
+ (oldd, newd) = args
+ (old, new) = [os.listdir(spot) for spot in args]
+ for item in old:
+ print "Entry:", item
+ state = self.__call__([oldd + '/' + item, newd + '/' + item])
+ new.remove(item)
+ if state:
+ print "Entry:", item, "good"
+ else:
+ print "Entry:", item, "bad"
+ if new:
+ print "new has extra entries", new
+ return
+ try:
+ (old, new) = args
+ except IndexError:
+ print self.__call__.__doc__
+ raise SystemExit(1)
diff --git a/build/lib/Bcfg2/Server/Admin/Examples.py b/build/lib/Bcfg2/Server/Admin/Examples.py
new file mode 100644
index 000000000..3335c5e10
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Admin/Examples.py
@@ -0,0 +1,71 @@
+import dulwich
+import time
+import tarfile
+from subprocess import Popen
+import Bcfg2.Server.Admin
+from Bcfg2.Server.Plugins.Metadata import MetadataConsistencyError
+
+class Examples(Bcfg2.Server.Admin.MetadataCore):
+ __shorthelp__ = "Pulls in the data from the Bcfg2 sample repository"
+ __longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin examples pull\n"
+ "\n\nbcfg2-admin examples update\n"
+ "bcfg2-admin examples backup")
+ __usage__ = ("bcfg2-admin examples [options] [add|del|update|list] [attr=val]")
+
+ def __init__(self, configfile):
+ Bcfg2.Server.Admin.MetadataCore.__init__(self, configfile,
+ self.__usage__)
+
+ def __call__(self, args):
+ Bcfg2.Server.Admin.MetadataCore.__call__(self, args)
+
+
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Version.__init__(self)
+ self.core = core
+ self.datastore = datastore
+
+ if len(args) == 0:
+ self.errExit("No argument specified.\n"
+ "Please see bcfg2-admin examples help for usage.")
+
+ if args[0] == 'pull':
+ try:
+ # FIXME: Repo URL is hardcoded for now
+ Popen(['git', 'clone', 'https://github.com/solj/bcfg2-repo.git', datastore])
+ except MetadataConsistencyError:
+ print "Error in pulling examples."
+ raise SystemExit(1)
+
+#fatal: destination path 'bcfg2-test' already exists and is not an empty directory.
+
+ elif args[0] == 'backup':
+ try:
+ self.metadata.add_group(args[1], attr_d)
+ except MetadataConsistencyError:
+ print "Error in adding group"
+ raise SystemExit(1)
+
+
+ elif args[0] == 'backup':
+ try:
+ self.metadata.add_group(args[1], attr_d)
+ except MetadataConsistencyError:
+ print "Error in adding group"
+ raise SystemExit(1)
+
+ else:
+ print "No command specified"
+ raise SystemExit(1)
+
+ def repobackup():
+ """Make a backup of the existing files in the Bcfg2 repo directory."""
+ if os.path.isdir(datastore):
+ print 'Backup in progress...'
+ target = time.strftime('%Y%m%d%H%M%S')
+
+
+ out = tarfile.open(filename, w.gz)
+ else:
+ logger.error("%s doesn't exist." % datastore)
+ #raise Bcfg2.Server.Plugin.PluginInitError
diff --git a/build/lib/Bcfg2/Server/Admin/Group.py b/build/lib/Bcfg2/Server/Admin/Group.py
new file mode 100644
index 000000000..6a1c13775
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Admin/Group.py
@@ -0,0 +1,66 @@
+import lxml.etree
+import Bcfg2.Server.Admin
+from Bcfg2.Server.Plugins.Metadata import MetadataConsistencyError
+
+class Group(Bcfg2.Server.Admin.MetadataCore):
+ __shorthelp__ = "Create, delete, or modify group entries"
+ __longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin group add <group> "
+ "attr1=val1 attr2=val2\n"
+ "\n\nbcfg2-admin group update <group> "
+ "attr1=val1 attr2=val2\n"
+ "\n\nbcfg2-admin group list\n"
+ "bcfg2-admin group del <group>")
+ __usage__ = ("bcfg2-admin group [options] [add|del|update|list] [attr=val]")
+
+ def __init__(self, configfile):
+ Bcfg2.Server.Admin.MetadataCore.__init__(self, configfile,
+ self.__usage__)
+
+ def __call__(self, args):
+ Bcfg2.Server.Admin.MetadataCore.__call__(self, args)
+ if len(args) == 0:
+ self.errExit("No argument specified.\n"
+ "Please see bcfg2-admin group help for usage.")
+ if args[0] == 'add':
+ attr_d = {}
+ for i in args[2:]:
+ attr, val = i.split('=', 1)
+ if attr not in ['profile', 'public', 'default',
+ 'name', 'auth', 'toolset', 'category',
+ 'comment']:
+ print "Attribute %s unknown" % attr
+ raise SystemExit(1)
+ attr_d[attr] = val
+ try:
+ self.metadata.add_group(args[1], attr_d)
+ except MetadataConsistencyError:
+ print "Error in adding group"
+ raise SystemExit(1)
+ elif args[0] in ['update', 'up']:
+ attr_d = {}
+ for i in args[2:]:
+ attr, val = i.split('=', 1)
+ if attr not in ['profile', 'public', 'default',
+ 'name', 'auth', 'toolset', 'category',
+ 'comment']:
+ print "Attribute %s unknown" % attr
+ raise SystemExit(1)
+ attr_d[attr] = val
+ try:
+ self.metadata.update_group(args[1], attr_d)
+ except MetadataConsistencyError:
+ print "Error in updating group"
+ raise SystemExit(1)
+ elif args[0] in ['delete', 'remove', 'del', 'rm']:
+ try:
+ self.metadata.remove_group(args[1])
+ except MetadataConsistencyError:
+ print "Error in deleting group"
+ raise SystemExit(1)
+ elif args[0] in ['list', 'ls']:
+ tree = lxml.etree.parse(self.metadata.data + "/groups.xml")
+ for node in tree.findall("//Group"):
+ print node.attrib["name"]
+ else:
+ print "No command specified"
+ raise SystemExit(1)
diff --git a/build/lib/Bcfg2/Server/Admin/Init.py b/build/lib/Bcfg2/Server/Admin/Init.py
new file mode 100644
index 000000000..c6d1f9e3d
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Admin/Init.py
@@ -0,0 +1,280 @@
+import getpass
+import os
+import random
+import socket
+import string
+import subprocess
+import Bcfg2.Server.Admin
+import Bcfg2.Server.Plugin
+import Bcfg2.Options
+
+# default config file
+config = '''
+[server]
+repository = %s
+plugins = %s
+
+[statistics]
+sendmailpath = %s
+database_engine = sqlite3
+# 'postgresql', 'mysql', 'mysql_old', 'sqlite3' or 'ado_mssql'.
+database_name =
+# Or path to database file if using sqlite3.
+#<repository>/etc/brpt.sqlite is default path if left empty
+database_user =
+# Not used with sqlite3.
+database_password =
+# Not used with sqlite3.
+database_host =
+# Not used with sqlite3.
+database_port =
+# Set to empty string for default. Not used with sqlite3.
+web_debug = True
+
+[communication]
+protocol = %s
+password = %s
+certificate = %s/%s
+key = %s/%s
+ca = %s/%s
+
+[components]
+bcfg2 = %s
+'''
+
+# Default groups
+groups = '''<Groups version='3.0'>
+ <Group profile='true' public='true' default='true' name='basic'>
+ <Group name='%s'/>
+ </Group>
+ <Group name='ubuntu'/>
+ <Group name='debian'/>
+ <Group name='freebsd'/>
+ <Group name='gentoo'/>
+ <Group name='redhat'/>
+ <Group name='suse'/>
+ <Group name='mandrake'/>
+ <Group name='solaris'/>
+</Groups>
+'''
+
+# Default contents of clients.xml
+clients = '''<Clients version="3.0">
+ <Client profile="basic" pingable="Y" pingtime="0" name="%s"/>
+</Clients>
+'''
+
+# Mapping of operating system names to groups
+os_list = [
+ ('Red Hat/Fedora/RHEL/RHAS/Centos', 'redhat'),
+ ('SUSE/SLES', 'suse'),
+ ('Mandrake', 'mandrake'),
+ ('Debian', 'debian'),
+ ('Ubuntu', 'ubuntu'),
+ ('Gentoo', 'gentoo'),
+ ('FreeBSD', 'freebsd')
+ ]
+
+# Complete list of plugins
+plugin_list = ['Account', 'Base', 'Bundler', 'Cfg',
+ 'Decisions', 'Deps', 'Metadata', 'Packages',
+ 'Pkgmgr', 'Probes', 'Properties', 'Rules',
+ 'Snapshots', 'SSHbase', 'Statistics', 'Svcmgr',
+ 'TCheetah', 'TGenshi']
+
+# Default list of plugins to use
+default_plugins = ['SSHbase', 'Cfg', 'Pkgmgr', 'Rules',
+ 'Metadata', 'Base', 'Bundler']
+
+def gen_password(length):
+ """Generates a random alphanumeric password with length characters."""
+ chars = string.letters + string.digits
+ newpasswd = ''
+ for i in range(length):
+ newpasswd = newpasswd + random.choice(chars)
+ return newpasswd
+
+def create_key(hostname, keypath, certpath):
+ """Creates a bcfg2.key at the directory specifed by keypath."""
+ kcstr = "openssl req -batch -x509 -nodes -subj '/C=US/ST=Illinois/L=Argonne/CN=%s' -days 1000 -newkey rsa:2048 -keyout %s -noout" % (hostname, keypath)
+ subprocess.call((kcstr), shell=True)
+ ccstr = "openssl req -batch -new -subj '/C=US/ST=Illinois/L=Argonne/CN=%s' -key %s | openssl x509 -req -days 1000 -signkey %s -out %s" % (hostname, keypath, keypath, certpath)
+ subprocess.call((ccstr), shell=True)
+ os.chmod(keypath, 0600)
+
+def create_conf(confpath, confdata):
+ # don't overwrite existing bcfg2.conf file
+ if os.path.exists(confpath):
+ result = raw_input("\nWarning: %s already exists. "
+ "Overwrite? [y/N]: " % confpath)
+ if result not in ['Y', 'y']:
+ print("Leaving %s unchanged" % confpath)
+ return
+ try:
+ open(confpath, "w").write(confdata)
+ os.chmod(confpath, 0600)
+ except Exception, e:
+ print("Error %s occured while trying to write configuration "
+ "file to '%s'\n" %
+ (e, confpath))
+ raise SystemExit(1)
+
+
+class Init(Bcfg2.Server.Admin.Mode):
+ __shorthelp__ = ("Interactively initialize a new repository.")
+ __longhelp__ = __shorthelp__ + "\n\nbcfg2-admin init"
+ __usage__ = "bcfg2-admin init"
+ options = {
+ 'configfile': Bcfg2.Options.CFILE,
+ 'plugins' : Bcfg2.Options.SERVER_PLUGINS,
+ 'proto' : Bcfg2.Options.SERVER_PROTOCOL,
+ 'repo' : Bcfg2.Options.SERVER_REPOSITORY,
+ 'sendmail' : Bcfg2.Options.SENDMAIL_PATH,
+ }
+ repopath = ""
+ response = ""
+ def __init__(self, configfile):
+ Bcfg2.Server.Admin.Mode.__init__(self, configfile)
+
+ def _set_defaults(self):
+ """Set default parameters."""
+ self.configfile = self.opts['configfile']
+ self.repopath = self.opts['repo']
+ self.password = gen_password(8)
+ self.server_uri = "https://%s:6789" % socket.getfqdn()
+ self.plugins = default_plugins
+
+ def __call__(self, args):
+ Bcfg2.Server.Admin.Mode.__call__(self, args)
+
+ # Parse options
+ self.opts = Bcfg2.Options.OptionParser(self.options)
+ self.opts.parse(args)
+ self._set_defaults()
+
+ # Prompt the user for input
+ self._prompt_config()
+ self._prompt_repopath()
+ self._prompt_password()
+ self._prompt_hostname()
+ self._prompt_server()
+ self._prompt_groups()
+
+ # Initialize the repository
+ self.init_repo()
+
+ def _prompt_hostname(self):
+ """Ask for the server hostname."""
+ data = raw_input("What is the server's hostname [%s]: " % socket.getfqdn())
+ if data != '':
+ self.shostname = data
+ else:
+ self.shostname = socket.getfqdn()
+
+ def _prompt_config(self):
+ """Ask for the configuration file path."""
+ newconfig = raw_input("Store bcfg2 configuration in [%s]: " %
+ self.configfile)
+ if newconfig != '':
+ self.configfile = newconfig
+
+ def _prompt_repopath(self):
+ """Ask for the repository path."""
+ while True:
+ newrepo = raw_input("Location of bcfg2 repository [%s]: " %
+ self.repopath)
+ if newrepo != '':
+ self.repopath = newrepo
+ if os.path.isdir(self.repopath):
+ response = raw_input("Directory %s exists. Overwrite? [y/N]:"\
+ % self.repopath)
+ if response.lower().strip() == 'y':
+ break
+ else:
+ break
+
+ def _prompt_password(self):
+ """Ask for a password or generate one if none is provided."""
+ newpassword = getpass.getpass(
+ "Input password used for communication verification "
+ "(without echoing; leave blank for a random): ").strip()
+ if len(newpassword) != 0:
+ self.password = newpassword
+
+ def _prompt_server(self):
+ """Ask for the server name."""
+ newserver = raw_input("Input the server location [%s]: " % self.server_uri)
+ if newserver != '':
+ self.server_uri = newserver
+
+ def _prompt_groups(self):
+ """Create the groups.xml file."""
+ prompt = '''Input base Operating System for clients:\n'''
+ for entry in os_list:
+ prompt += "%d: %s\n" % (os_list.index(entry) + 1, entry[0])
+ prompt += ': '
+ while True:
+ try:
+ self.os_sel = os_list[int(raw_input(prompt))-1][1]
+ break
+ except ValueError:
+ continue
+
+ def _prompt_plugins(self):
+ default = raw_input("Use default plugins? (%s) [Y/n]: " % ''.join(default_plugins)).lower()
+ if default != 'y' or default != '':
+ while True:
+ plugins_are_valid = True
+ plug_str = raw_input("Specify plugins: ")
+ plugins = plug_str.split(',')
+ for plugin in plugins:
+ plugin = plugin.strip()
+ if not plugin in plugin_list:
+ plugins_are_valid = False
+ print "ERROR: plugin %s not recognized" % plugin
+ if plugins_are_valid:
+ break
+
+ def _init_plugins(self):
+ """Initialize each plugin-specific portion of the repository."""
+ for plugin in self.plugins:
+ if plugin == 'Metadata':
+ Bcfg2.Server.Plugins.Metadata.Metadata.init_repo(self.repopath, groups, self.os_sel, clients)
+ else:
+ try:
+ module = __import__("Bcfg2.Server.Plugins.%s" % plugin, '',
+ '', ["Bcfg2.Server.Plugins"])
+ cls = getattr(module, plugin)
+ cls.init_repo(self.repopath)
+ except Exception, e:
+ print 'Plugin setup for %s failed: %s\n Check that dependencies are installed?' % (plugin, e)
+
+ def init_repo(self):
+ """Setup a new repo and create the content of the configuration file."""
+ keypath = os.path.dirname(os.path.abspath(self.configfile))
+ confdata = config % (
+ self.repopath,
+ ','.join(self.opts['plugins']),
+ self.opts['sendmail'],
+ self.opts['proto'],
+ self.password,
+ keypath, 'bcfg2.crt',
+ keypath, 'bcfg2.key',
+ keypath, 'bcfg2.crt',
+ self.server_uri
+ )
+
+ # Create the configuration file and SSL key
+ create_conf(self.configfile, confdata)
+ kpath = keypath + '/bcfg2.key'
+ cpath = keypath + '/bcfg2.crt'
+ create_key(self.shostname, kpath, cpath)
+
+ # Create the repository
+ path = "%s/%s" % (self.repopath, 'etc')
+ try:
+ os.makedirs(path)
+ self._init_plugins()
+ print "Repository created successfuly in %s" % (self.repopath)
+ except OSError:
+ print("Failed to create %s." % path)
diff --git a/build/lib/Bcfg2/Server/Admin/Minestruct.py b/build/lib/Bcfg2/Server/Admin/Minestruct.py
new file mode 100644
index 000000000..02edf2b75
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Admin/Minestruct.py
@@ -0,0 +1,69 @@
+import getopt
+import lxml.etree
+import sys
+
+import Bcfg2.Server.Admin
+
+class Minestruct(Bcfg2.Server.Admin.StructureMode):
+ """Pull extra entries out of statistics."""
+ __shorthelp__ = "Extract extra entry lists from statistics"
+ __longhelp__ = (__shorthelp__ +
+ "\n\nbcfg2-admin minestruct [-f filename] "
+ "[-g groups] client")
+ __usage__ = ("bcfg2-admin minestruct [options] <client>\n\n"
+ " %-25s%s\n"
+ " %-25s%s\n" %
+ ("-f <filename>",
+ "build a particular file",
+ "-g <groups>",
+ "only build config for groups"))
+
+ def __init__(self, configfile):
+ Bcfg2.Server.Admin.StructureMode.__init__(self, configfile,
+ self.__usage__)
+
+ def __call__(self, args):
+ Bcfg2.Server.Admin.Mode.__call__(self, args)
+ if len(args) == 0:
+ self.errExit("No argument specified.\n"
+ "Please see bcfg2-admin minestruct help for usage.")
+ try:
+ (opts, args) = getopt.getopt(args, 'f:g:h')
+ except:
+ self.log.error(self.__shorthelp__)
+ raise SystemExit(1)
+
+ client = args[0]
+ output = sys.stdout
+ groups = []
+
+ for (opt, optarg) in opts:
+ if opt == '-f':
+ try:
+ output = open(optarg, 'w')
+ except IOError:
+ self.log.error("Failed to open file: %s" % (optarg))
+ raise SystemExit(1)
+ elif opt == '-g':
+ groups = optarg.split(':')
+
+ try:
+ extra = set()
+ for source in self.bcore.pull_sources:
+ for item in source.GetExtra(client):
+ extra.add(item)
+ except:
+ self.log.error("Failed to find extra entry info for client %s" %
+ client)
+ raise SystemExit(1)
+ root = lxml.etree.Element("Base")
+ self.log.info("Found %d extra entries" % (len(extra)))
+ add_point = root
+ for g in groups:
+ add_point = lxml.etree.SubElement(add_point, "Group", name=g)
+ for tag, name in extra:
+ self.log.info("%s: %s" % (tag, name))
+ lxml.etree.SubElement(add_point, tag, name=name)
+
+ tree = lxml.etree.ElementTree(root)
+ tree.write(output, pretty_print=True)
diff --git a/build/lib/Bcfg2/Server/Admin/Perf.py b/build/lib/Bcfg2/Server/Admin/Perf.py
new file mode 100644
index 000000000..6f1cb8dbb
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Admin/Perf.py
@@ -0,0 +1,37 @@
+import Bcfg2.Options
+import Bcfg2.Proxy
+import Bcfg2.Server.Admin
+
+import sys
+
+class Perf(Bcfg2.Server.Admin.Mode):
+ __shorthelp__ = ("Query server for performance data")
+ __longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin perf")
+ __usage__ = ("bcfg2-admin perf")
+
+ def __init__(self, configfile):
+ Bcfg2.Server.Admin.Mode.__init__(self, configfile)
+
+ def __call__(self, args):
+ output = [('Name', 'Min', 'Max', 'Mean', 'Count')]
+ optinfo = {
+ 'ca': Bcfg2.Options.CLIENT_CA,
+ 'certificate': Bcfg2.Options.CLIENT_CERT,
+ 'key': Bcfg2.Options.SERVER_KEY,
+ 'password': Bcfg2.Options.SERVER_PASSWORD,
+ 'server': Bcfg2.Options.SERVER_LOCATION,
+ 'user': Bcfg2.Options.CLIENT_USER,
+ }
+ setup = Bcfg2.Options.OptionParser(optinfo)
+ setup.parse(sys.argv[2:])
+ proxy = Bcfg2.Proxy.ComponentProxy(setup['server'],
+ setup['user'],
+ setup['password'],
+ key = setup['key'],
+ cert = setup['certificate'],
+ ca = setup['ca'])
+ data = proxy.get_statistics()
+ for key, value in data.iteritems():
+ data = tuple(["%.06f" % (item) for item in value[:-1]] + [value[-1]])
+ output.append((key, ) + data)
+ self.print_table(output)
diff --git a/build/lib/Bcfg2/Server/Admin/Pull.py b/build/lib/Bcfg2/Server/Admin/Pull.py
new file mode 100644
index 000000000..aa732c67f
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Admin/Pull.py
@@ -0,0 +1,138 @@
+import getopt
+import sys
+import Bcfg2.Server.Admin
+
+class Pull(Bcfg2.Server.Admin.MetadataCore):
+ """Pull mode retrieves entries from clients and
+ integrates the information into the repository.
+ """
+ __shorthelp__ = ("Integrate configuration information "
+ "from clients into the server repository")
+ __longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin pull [-v] [-f][-I] [-s] "
+ "<client> <entry type> <entry name>")
+ __usage__ = ("bcfg2-admin pull [options] <client> <entry type> "
+ "<entry name>\n\n"
+ " %-25s%s\n"
+ " %-25s%s\n"
+ " %-25s%s\n"
+ " %-25s%s\n" %
+ ("-v",
+ "be verbose",
+ "-f",
+ "force",
+ "-I",
+ "interactive",
+ "-s",
+ "stdin"))
+ allowed = ['Metadata', 'BB', "DBStats", "Statistics", "Cfg", "SSHbase"]
+
+ def __init__(self, configfile):
+ Bcfg2.Server.Admin.MetadataCore.__init__(self, configfile,
+ self.__usage__)
+ self.log = False
+ self.mode = 'interactive'
+
+ def __call__(self, args):
+ Bcfg2.Server.Admin.Mode.__call__(self, args)
+ use_stdin = False
+ try:
+ opts, gargs = getopt.getopt(args, 'vfIs')
+ except:
+ print self.__shorthelp__
+ raise SystemExit(1)
+ for opt in opts:
+ if opt[0] == '-v':
+ self.log = True
+ elif opt[0] == '-f':
+ self.mode = 'force'
+ elif opt[0] == '-I':
+ self.mode == 'interactive'
+ elif opt[0] == '-s':
+ use_stdin = True
+
+ if use_stdin:
+ for line in sys.stdin:
+ try:
+ self.PullEntry(*line.split(None, 3))
+ except SystemExit:
+ print " for %s" % line
+ except:
+ print "Bad entry: %s" % line.strip()
+ elif len(gargs) < 3:
+ print self.__longhelp__
+ raise SystemExit(1)
+ else:
+ self.PullEntry(gargs[0], gargs[1], gargs[2])
+
+ def BuildNewEntry(self, client, etype, ename):
+ """Construct a new full entry for given client/entry from statistics."""
+ new_entry = {'type':etype, 'name':ename}
+ for plugin in self.bcore.pull_sources:
+ try:
+ (owner, group, perms, contents) = \
+ plugin.GetCurrentEntry(client, etype, ename)
+ break
+ except Bcfg2.Server.Plugin.PluginExecutionError:
+ if plugin == self.bcore.pull_sources[-1]:
+ print "Pull Source failure; could not fetch current state"
+ raise SystemExit(1)
+
+ try:
+ data = {'owner':owner, 'group':group, 'perms':perms, 'text':contents}
+ except UnboundLocalError:
+ print("Unable to build entry. "
+ "Do you have a statistics plugin enabled?")
+ raise SystemExit(1)
+ for k, v in data.iteritems():
+ if v:
+ new_entry[k] = v
+ #print new_entry
+ return new_entry
+
+ def Choose(self, choices):
+ """Determine where to put pull data."""
+ if self.mode == 'interactive':
+ for choice in choices:
+ print "Plugin returned choice:"
+ if id(choice) == id(choices[0]):
+ print "(current entry)",
+ if choice.all:
+ print " => global entry"
+ elif choice.group:
+ print (" => group entry: %s (prio %d)" %
+ (choice.group, choice.prio))
+ else:
+ print " => host entry: %s" % (choice.hostname)
+ if raw_input("Use this entry? [yN]: ") in ['y', 'Y']:
+ return choice
+ return False
+ else:
+ # mode == 'force'
+ if not choices:
+ return False
+ return choices[0]
+
+ def PullEntry(self, client, etype, ename):
+ """Make currently recorded client state correct for entry."""
+ new_entry = self.BuildNewEntry(client, etype, ename)
+
+ meta = self.bcore.build_metadata(client)
+ # find appropriate plugin in bcore
+ glist = [gen for gen in self.bcore.generators if
+ ename in gen.Entries.get(etype, {})]
+ if len(glist) != 1:
+ self.errExit("Got wrong numbers of matching generators for entry:" \
+ + "%s" % ([g.name for g in glist]))
+ plugin = glist[0]
+ if not isinstance(plugin, Bcfg2.Server.Plugin.PullTarget):
+ self.errExit("Configuration upload not supported by plugin %s" \
+ % (plugin.name))
+ try:
+ choices = plugin.AcceptChoices(new_entry, meta)
+ specific = self.Choose(choices)
+ if specific:
+ plugin.AcceptPullData(specific, new_entry, self.log)
+ except Bcfg2.Server.Plugin.PluginExecutionError:
+ self.errExit("Configuration upload not supported by plugin %s" \
+ % (plugin.name))
+ # FIXME svn commit if running under svn
diff --git a/build/lib/Bcfg2/Server/Admin/Query.py b/build/lib/Bcfg2/Server/Admin/Query.py
new file mode 100644
index 000000000..b5af9bad2
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Admin/Query.py
@@ -0,0 +1,78 @@
+import logging
+import Bcfg2.Logger
+import Bcfg2.Server.Admin
+
+class Query(Bcfg2.Server.Admin.Mode):
+ __shorthelp__ = "Query clients"
+ __longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin query [-n] [-c] "
+ "[-f filename] g=group p=profile")
+ __usage__ = ("bcfg2-admin query [options] <g=group> <p=profile>\n\n"
+ " %-25s%s\n"
+ " %-25s%s\n"
+ " %-25s%s\n" %
+ ("-n",
+ "query results delimited with newlines",
+ "-c",
+ "query results delimited with commas",
+ "-f filename",
+ "write query to file"))
+
+ def __init__(self, cfile):
+ logging.root.setLevel(100)
+ Bcfg2.Logger.setup_logging(100, to_console=False, to_syslog=False)
+ Bcfg2.Server.Admin.Mode.__init__(self, cfile)
+ try:
+ self.bcore = Bcfg2.Server.Core.Core(self.get_repo_path(),
+ ['Metadata', 'Probes'],
+ 'foo', False, 'UTF-8')
+ except Bcfg2.Server.Core.CoreInitError, msg:
+ self.errExit("Core load failed because %s" % msg)
+ self.bcore.fam.handle_events_in_interval(1)
+ self.meta = self.bcore.metadata
+
+ def __call__(self, args):
+ Bcfg2.Server.Admin.Mode.__call__(self, args)
+ clients = self.meta.clients.keys()
+ filename_arg = False
+ filename = None
+ for arg in args:
+ if filename_arg == True:
+ filename = arg
+ filename_arg = False
+ continue
+ if arg in ['-n', '-c']:
+ continue
+ if arg in ['-f']:
+ filename_arg = True
+ continue
+ try:
+ k, v = arg.split('=')
+ except:
+ print "Unknown argument %s" % arg
+ continue
+ if k == 'p':
+ nc = self.meta.get_client_names_by_profiles(v.split(','))
+ elif k == 'g':
+ nc = self.meta.get_client_names_by_groups(v.split(','))
+ # add probed groups (if present)
+ for conn in self.bcore.connectors:
+ if isinstance(conn, Bcfg2.Server.Plugins.Probes.Probes):
+ for c, glist in conn.cgroups.items():
+ for g in glist:
+ if g in v.split(','):
+ nc.append(c)
+ else:
+ print "One of g= or p= must be specified"
+ raise SystemExit(1)
+ clients = [c for c in clients if c in nc]
+ if '-n' in args:
+ for client in clients:
+ print client
+ else:
+ print ','.join(clients)
+ if '-f' in args:
+ f = open(filename, "w")
+ for client in clients:
+ f.write(client + "\n")
+ f.close()
+ print "Wrote results to %s" % (filename)
diff --git a/build/lib/Bcfg2/Server/Admin/Reports.py b/build/lib/Bcfg2/Server/Admin/Reports.py
new file mode 100644
index 000000000..a4dd19064
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Admin/Reports.py
@@ -0,0 +1,357 @@
+'''Admin interface for dynamic reports'''
+import Bcfg2.Logger
+import Bcfg2.Server.Admin
+import ConfigParser
+import datetime
+import os
+import logging
+import pickle
+import platform
+import sys
+import traceback
+from Bcfg2.Server.Reports.importscript import load_stats
+from Bcfg2.Server.Reports.updatefix import update_database
+from Bcfg2.Server.Reports.utils import *
+from lxml.etree import XML, XMLSyntaxError
+
+# FIXME: Remove when server python dep is 2.5 or greater
+if sys.version_info >= (2, 5):
+ from hashlib import md5
+else:
+ from md5 import md5
+
+# Load django
+import django.core.management
+
+# FIXME - settings file uses a hardcoded path for /etc/bcfg2.conf
+try:
+ import Bcfg2.Server.Reports.settings
+except Exception, e:
+ sys.stderr.write("Failed to load configuration settings. %s\n" % e)
+ sys.exit(1)
+
+project_directory = os.path.dirname(Bcfg2.Server.Reports.settings.__file__)
+project_name = os.path.basename(project_directory)
+sys.path.append(os.path.join(project_directory, '..'))
+project_module = __import__(project_name, '', '', [''])
+sys.path.pop()
+
+# Set DJANGO_SETTINGS_MODULE appropriately.
+os.environ['DJANGO_SETTINGS_MODULE'] = '%s.settings' % project_name
+from django.db import connection, transaction
+
+from Bcfg2.Server.Reports.reports.models import Client, Interaction, Entries, \
+ Entries_interactions, Performance, \
+ Reason, Ping, TYPE_CHOICES, InternalDatabaseVersion
+
+def printStats(fn):
+ """
+ Print db stats.
+
+ Decorator for purging. Prints database statistics after a run.
+ """
+ def print_stats(*data):
+ start_client = Client.objects.count()
+ start_i = Interaction.objects.count()
+ start_ei = Entries_interactions.objects.count()
+ start_perf = Performance.objects.count()
+ start_ping = Ping.objects.count()
+
+ fn(*data)
+
+ print "Clients removed: %s" % (start_client - Client.objects.count())
+ print "Interactions removed: %s" % (start_i - Interaction.objects.count())
+ print "Interactions->Entries removed: %s" % \
+ (start_ei - Entries_interactions.objects.count())
+ print "Metrics removed: %s" % (start_perf - Performance.objects.count())
+ print "Ping metrics removed: %s" % (start_ping - Ping.objects.count())
+
+ return print_stats
+
+class Reports(Bcfg2.Server.Admin.Mode):
+ '''Admin interface for dynamic reports'''
+ __shorthelp__ = "Manage dynamic reports"
+ __longhelp__ = (__shorthelp__)
+ __usage__ = ("bcfg2-admin reports [command] [options]\n"
+ " -v|--verbose Be verbose\n"
+ " -q|--quiet Print only errors\n"
+ "\n"
+ " Commands:\n"
+ " init Initialize the database\n"
+ " load_stats Load statistics data\n"
+ " -s|--stats Path to statistics.xml file\n"
+ " -c|--clients-file Path to clients.xml file\n"
+ " -O3 Fast mode. Duplicates data!\n"
+ " purge Purge records\n"
+ " --client [n] Client to operate on\n"
+ " --days [n] Records older then n days\n"
+ " --expired Expired clients only\n"
+ " scrub Scrub the database for duplicate reasons and orphaned entries\n"
+ " update Apply any updates to the reporting database\n"
+ "\n")
+
+ def __init__(self, cfile):
+ Bcfg2.Server.Admin.Mode.__init__(self, cfile)
+ self.log.setLevel(logging.INFO)
+ self.django_commands = [ 'syncdb', 'sqlall', 'validate' ]
+ self.__usage__ = self.__usage__ + " Django commands:\n " + \
+ "\n ".join(self.django_commands)
+
+ def __call__(self, args):
+ Bcfg2.Server.Admin.Mode.__call__(self, args)
+ if len(args) == 0 or args[0] == '-h':
+ print(self.__usage__)
+ raise SystemExit(0)
+
+ verb = 0
+
+ if '-v' in args or '--verbose' in args:
+ self.log.setLevel(logging.DEBUG)
+ verb = 1
+ if '-q' in args or '--quiet' in args:
+ self.log.setLevel(logging.WARNING)
+
+ # FIXME - dry run
+
+ if args[0] in self.django_commands:
+ self.django_command_proxy(args[0])
+ elif args[0] == 'scrub':
+ self.scrub()
+ elif args[0] == 'init':
+ update_database()
+ elif args[0] == 'update':
+ update_database()
+ elif args[0] == 'load_stats':
+ quick = '-O3' in args
+ stats_file=None
+ clients_file=None
+ i=1
+ while i < len(args):
+ if args[i] == '-s' or args[i] == '--stats':
+ stats_file = args[i+1]
+ if stats_file[0] == '-':
+ self.errExit("Invalid statistics file: %s" % stats_file)
+ elif args[i] == '-c' or args[i] == '--clients-file':
+ clients_file = args[i+1]
+ if clients_file[0] == '-':
+ self.errExit("Invalid clients file: %s" % clients_file)
+ i = i + 1
+ self.load_stats(stats_file, clients_file, verb, quick)
+ elif args[0] == 'purge':
+ expired=False
+ client=None
+ maxdate=None
+ state=None
+ i=1
+ while i < len(args):
+ if args[i] == '-c' or args[i] == '--client':
+ if client:
+ self.errExit("Only one client per run")
+ client = args[i+1]
+ print client
+ i = i + 1
+ elif args[i] == '--days':
+ if maxdate:
+ self.errExit("Max date specified multiple times")
+ try:
+ maxdate = datetime.datetime.now() - datetime.timedelta(days=int(args[i+1]))
+ except:
+ self.log.error("Invalid number of days: %s" % args[i+1])
+ raise SystemExit, -1
+ i = i + 1
+ elif args[i] == '--expired':
+ expired=True
+ i = i + 1
+ if expired:
+ if state:
+ self.log.error("--state is not valid with --expired")
+ raise SystemExit, -1
+ self.purge_expired(maxdate)
+ else:
+ self.purge(client, maxdate, state)
+ else:
+ print "Unknown command: %s" % args[0]
+
+ @transaction.commit_on_success
+ def scrub(self):
+ ''' Perform a thorough scrub and cleanup of the database '''
+
+ # Currently only reasons are a problem
+ try:
+ start_count = Reason.objects.count()
+ except Exception, e:
+ self.log.error("Failed to load reason objects: %s" % e)
+ return
+ dup_reasons = []
+
+ cmp_reasons = dict()
+ batch_update = []
+ for reason in BatchFetch(Reason.objects):
+ ''' Loop through each reason and create a key out of the data. \
+ This lets us take advantage of a fast hash lookup for \
+ comparisons '''
+ id = reason.id
+ reason.id = None
+ key=md5(pickle.dumps(reason)).hexdigest()
+ reason.id = id
+
+ if key in cmp_reasons:
+ self.log.debug("Update interactions from %d to %d" \
+ % (reason.id, cmp_reasons[key]))
+ dup_reasons.append([reason.id])
+ batch_update.append([cmp_reasons[key], reason.id])
+ else:
+ cmp_reasons[key] = reason.id
+ self.log.debug("key %d" % reason.id)
+
+ self.log.debug("Done with updates, deleting dupes")
+ try:
+ cursor = connection.cursor()
+ cursor.executemany('update reports_entries_interactions set reason_id=%s where reason_id=%s', batch_update)
+ cursor.executemany('delete from reports_reason where id = %s', dup_reasons)
+ transaction.set_dirty()
+ except Exception, ex:
+ self.log.error("Failed to delete reasons: %s" % ex)
+ raise
+
+ self.log.info("Found %d dupes out of %d" % (len(dup_reasons), start_count))
+
+ # Cleanup orphans
+ start_count = Reason.objects.count()
+ Reason.prune_orphans()
+ self.log.info("Pruned %d Reason records" % (start_count - Reason.objects.count()))
+
+ start_count = Entries.objects.count()
+ Entries.prune_orphans()
+ self.log.info("Pruned %d Entries records" % (start_count - Entries.objects.count()))
+
+ def django_command_proxy(self, command):
+ '''Call a django command'''
+ if command == 'sqlall':
+ django.core.management.call_command(command, 'reports')
+ else:
+ django.core.management.call_command(command)
+
+ def load_stats(self, stats_file=None, clientspath=None, verb=0, quick=False):
+ '''Load statistics data into the database'''
+ location = ''
+
+ if not stats_file:
+ try:
+ stats_file = "%s/etc/statistics.xml" % self.cfp.get('server', 'repository')
+ except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
+ self.errExit("Could not read bcfg2.conf; exiting")
+ try:
+ statsdata = XML(open(stats_file).read())
+ except (IOError, XMLSyntaxError):
+ self.errExit("StatReports: Failed to parse %s"%(stats_file))
+
+ if not clientspath:
+ try:
+ clientspath = "%s/Metadata/clients.xml" % \
+ self.cfp.get('server', 'repository')
+ except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
+ self.errExit("Could not read bcfg2.conf; exiting")
+ try:
+ clientsdata = XML(open(clientspath).read())
+ except (IOError, XMLSyntaxError):
+ self.errExit("StatReports: Failed to parse %s"%(clientspath))
+
+ try:
+ load_stats(clientsdata, statsdata, verb, self.log, quick=quick, location=platform.node())
+ except:
+ pass
+
+ @printStats
+ def purge(self, client=None, maxdate=None, state=None):
+ '''Purge historical data from the database'''
+
+ filtered = False # indicates whether or not a client should be deleted
+
+ if not client and not maxdate and not state:
+ self.errExit("Reports.prune: Refusing to prune all data")
+
+ ipurge = Interaction.objects
+ if client:
+ try:
+ cobj = Client.objects.get(name=client)
+ ipurge = ipurge.filter(client=cobj)
+ except Client.DoesNotExist:
+ self.log.error("Client %s not in database" % client)
+ raise SystemExit, -1
+ self.log.debug("Filtering by client: %s" % client)
+
+ if maxdate:
+ filtered = True
+ if not isinstance(maxdate, datetime.datetime):
+ raise TypeError, "maxdate is not a DateTime object"
+ self.log.debug("Filtering by maxdate: %s" % maxdate)
+ ipurge = ipurge.filter(timestamp__lt=maxdate)
+
+ # Handle ping data as well
+ ping = Ping.objects.filter(endtime__lt=maxdate)
+ if client:
+ ping = ping.filter(client=cobj)
+ ping.delete()
+
+ if state:
+ filtered = True
+ if state not in ('dirty','clean','modified'):
+ raise TypeError, "state is not one of the following values " + \
+ "('dirty','clean','modified')"
+ self.log.debug("Filtering by state: %s" % state)
+ ipurge = ipurge.filter(state=state)
+
+ count = ipurge.count()
+ rnum = 0
+ try:
+ while rnum < count:
+ grp = list(ipurge[:1000].values("id"))
+ # just in case...
+ if not grp:
+ break
+ Interaction.objects.filter(id__in=[x['id'] for x in grp]).delete()
+ rnum += len(grp)
+ self.log.debug("Deleted %s of %s" % (rnum, count))
+ except:
+ self.log.error("Failed to remove interactions")
+ (a, b, c) = sys.exc_info()
+ msg = traceback.format_exception(a, b, c, limit=2)[-1][:-1]
+ del a, b, c
+ self.log.error(msg)
+
+ # bulk operations bypass the Interaction.delete method
+ self.log.debug("Pruning orphan Performance objects")
+ Performance.prune_orphans()
+
+ if client and not filtered:
+ '''Delete the client, ping data is automatic'''
+ try:
+ self.log.debug("Purging client %s" % client)
+ cobj.delete()
+ except:
+ self.log.error("Failed to delete client %s" % client)
+ (a, b, c) = sys.exc_info()
+ msg = traceback.format_exception(a, b, c, limit=2)[-1][:-1]
+ del a, b, c
+ self.log.error(msg)
+
+ @printStats
+ def purge_expired(self, maxdate=None):
+ '''Purge expired clients from the database'''
+
+ if maxdate:
+ if not isinstance(maxdate, datetime.datetime):
+ raise TypeError, "maxdate is not a DateTime object"
+ self.log.debug("Filtering by maxdate: %s" % maxdate)
+ clients = Client.objects.filter(expiration__lt=maxdate)
+ else:
+ clients = Client.objects.filter(expiration__isnull=False)
+
+ for client in clients:
+ self.log.debug("Purging client %s" % client)
+ Interaction.objects.filter(client=client).delete()
+ client.delete()
+ self.log.debug("Pruning orphan Performance objects")
+ Performance.prune_orphans()
+
diff --git a/build/lib/Bcfg2/Server/Admin/Snapshots.py b/build/lib/Bcfg2/Server/Admin/Snapshots.py
new file mode 100644
index 000000000..004de0ddb
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Admin/Snapshots.py
@@ -0,0 +1,163 @@
+from datetime import date
+import sys
+
+# prereq issues can be signaled with ImportError, so no try needed
+import sqlalchemy, sqlalchemy.orm
+import Bcfg2.Server.Admin
+import Bcfg2.Server.Snapshots
+import Bcfg2.Server.Snapshots.model
+from Bcfg2.Server.Snapshots.model import Snapshot, Client, Metadata, Base, \
+ File, Group, Package, Service
+
+class Snapshots(Bcfg2.Server.Admin.Mode):
+ __shorthelp__ = "Interact with the Snapshots system"
+ __longhelp__ = (__shorthelp__)
+ __usage__ = ("bcfg2-admin snapshots [init|query qtype]")
+
+ q_dispatch = {'client':Client,
+ 'group':Group,
+ 'metadata':Metadata,
+ 'package':Package,
+ 'snapshot':Snapshot}
+
+ def __init__(self, configfile):
+ Bcfg2.Server.Admin.Mode.__init__(self, configfile)
+ #self.session = Bcfg2.Server.Snapshots.setup_session(debug=True)
+ self.session = Bcfg2.Server.Snapshots.setup_session(configfile)
+ self.cfile = configfile
+
+ def __call__(self, args):
+ Bcfg2.Server.Admin.Mode.__call__(self, args)
+ if len(args) == 0 or args[0] == '-h':
+ print(self.__usage__)
+ raise SystemExit(0)
+
+ if args[0] == 'query':
+ if args[1] in self.q_dispatch:
+ q_obj = self.q_dispatch[args[1]]
+ if q_obj == Client:
+ rows = []
+ labels = ('Client', 'Active')
+ for host in \
+ self.session.query(q_obj).filter(q_obj.active == False):
+ rows.append([host.name, 'No'])
+ for host in \
+ self.session.query(q_obj).filter(q_obj.active == True):
+ rows.append([host.name, 'Yes'])
+ self.print_table([labels]+rows,
+ justify='left',
+ hdr=True,
+ vdelim=" ",
+ padding=1)
+ elif q_obj == Group:
+ print("Groups:")
+ for group in self.session.query(q_obj).all():
+ print(" %s" % group.name)
+ else:
+ results = self.session.query(q_obj).all()
+ else:
+ print('error')
+ raise SystemExit(1)
+ elif args[0] == 'init':
+ # Initialize the Snapshots database
+ dbpath = Bcfg2.Server.Snapshots.db_from_config(self.cfile)
+ engine = sqlalchemy.create_engine(dbpath, echo=True)
+ metadata = Base.metadata
+ metadata.create_all(engine)
+ Session = sqlalchemy.orm.sessionmaker()
+ Session.configure(bind=engine)
+ session = Session()
+ session.commit()
+ elif args[0] == 'dump':
+ client = args[1]
+ snap = Snapshot.get_current(self.session, unicode(client))
+ if not snap:
+ print("Current snapshot for %s not found" % client)
+ sys.exit(1)
+ print("Client %s last run at %s" % (client, snap.timestamp))
+ for pkg in snap.packages:
+ print("C:", pkg.correct, 'M:', pkg.modified)
+ print("start", pkg.start.name, pkg.start.version)
+ print("end", pkg.end.name, pkg.end.version)
+ elif args[0] == 'reports':
+ # bcfg2-admin reporting interface for Snapshots
+ if '-a' in args[1:]:
+ # Query all hosts for Name, Status, Revision, Timestamp
+ q = self.session.query(Client.name,
+ Snapshot.correct,
+ Snapshot.revision,
+ Snapshot.timestamp)\
+ .filter(Client.id==Snapshot.client_id)\
+ .group_by(Client.id)
+ rows = []
+ labels = ('Client', 'Correct', 'Revision', 'Time')
+ for item in q.all():
+ cli, cor, time, rev = item
+ rows.append([cli, cor, time, rev])
+ self.print_table([labels]+rows,
+ justify='left',
+ hdr=True, vdelim=" ",
+ padding=1)
+ elif '-b' in args[1:]:
+ # Query a single host for bad entries
+ if len(args) < 3:
+ print("Usage: bcfg2-admin snapshots -b <client>")
+ return
+ client = args[2]
+ snap = Snapshot.get_current(self.session, unicode(client))
+ if not snap:
+ print("Current snapshot for %s not found" % client)
+ sys.exit(1)
+ print("Bad entries:")
+ bad_pkgs = [self.session.query(Package)
+ .filter(Package.id==p.start_id).one().name \
+ for p in snap.packages if p.correct == False]
+ for p in bad_pkgs:
+ print(" Package:%s" % p)
+ bad_files = [self.session.query(File)
+ .filter(File.id==f.start_id).one().name \
+ for f in snap.files if f.correct == False]
+ for filename in bad_files:
+ print(" File:%s" % filename)
+ bad_svcs = [self.session.query(Service)
+ .filter(Service.id==s.start_id).one().name \
+ for s in snap.services if s.correct == False]
+ for svc in bad_svcs:
+ print(" Service:%s" % svc)
+ elif '-e' in args[1:]:
+ # Query a single host for extra entries
+ client = args[2]
+ snap = Snapshot.get_current(self.session, unicode(client))
+ if not snap:
+ print("Current snapshot for %s not found" % client)
+ sys.exit(1)
+ print("Extra entries:")
+ for pkg in snap.extra_packages:
+ print(" Package:%s" % pkg.name)
+ # FIXME: Do we know about extra files yet?
+ for f in snap.extra_files:
+ print(" File:%s" % f.name)
+ for svc in snap.extra_services:
+ print(" Service:%s" % svc.name)
+ elif '--date' in args[1:]:
+ year, month, day = args[2:]
+ timestamp = date(int(year), int(month), int(day))
+ snaps = []
+ for client in self.session.query(Client).filter(Client.active == True):
+ snaps.append(Snapshot.get_by_date(self.session,
+ client.name,
+ timestamp))
+ rows = []
+ labels = ('Client', 'Correct', 'Revision', 'Time')
+ for snap in snaps:
+ rows.append([snap.client.name,
+ snap.correct,
+ snap.revision,
+ snap.timestamp])
+ self.print_table([labels]+rows,
+ justify='left',
+ hdr=True,
+ vdelim=" ",
+ padding=1)
+ else:
+ print("Unknown options: ", args[1:])
diff --git a/build/lib/Bcfg2/Server/Admin/Tidy.py b/build/lib/Bcfg2/Server/Admin/Tidy.py
new file mode 100644
index 000000000..c02ddf110
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Admin/Tidy.py
@@ -0,0 +1,66 @@
+import os
+import re
+import socket
+
+import Bcfg2.Server.Admin
+
+class Tidy(Bcfg2.Server.Admin.Mode):
+ __shorthelp__ = "Clean up useless files in the repo"
+ __longhelp__ = __shorthelp__ + "\n\nbcfg2-admin tidy [-f] [-I]"
+ __usage__ = ("bcfg2-admin tidy [options]\n\n"
+ " %-25s%s\n"
+ " %-25s%s\n" %
+ ("-f",
+ "force",
+ "-I",
+ "interactive"))
+
+ def __init__(self, cfile):
+ Bcfg2.Server.Admin.Mode.__init__(self, cfile)
+
+ def __call__(self, args):
+ Bcfg2.Server.Admin.Mode.__call__(self, args)
+ badfiles = self.buildTidyList()
+ if '-f' in args or '-I' in args:
+ if '-I' in args:
+ for name in badfiles[:]:
+ answer = raw_input("Unlink file %s? [yN] " % name)
+ if answer not in ['y', 'Y']:
+ badfiles.remove(name)
+ for name in badfiles:
+ try:
+ os.unlink(name)
+ except IOError:
+ print "Failed to unlink %s" % name
+ else:
+ for name in badfiles:
+ print name
+
+ def buildTidyList(self):
+ """Clean up unused or unusable files from the repository."""
+ hostmatcher = re.compile('.*\.H_(\S+)$')
+ to_remove = []
+ good = []
+ bad = []
+
+ # clean up unresolvable hosts in SSHbase
+ for name in os.listdir("%s/SSHbase" % (self.get_repo_path())):
+ if hostmatcher.match(name):
+ hostname = hostmatcher.match(name).group(1)
+ if hostname in good + bad:
+ continue
+ try:
+ socket.gethostbyname(hostname)
+ good.append(hostname)
+ except:
+ bad.append(hostname)
+ for name in os.listdir("%s/SSHbase" % (self.get_repo_path())):
+ if not hostmatcher.match(name):
+ to_remove.append("%s/SSHbase/%s" % (self.get_repo_path(), name))
+ else:
+ if hostmatcher.match(name).group(1) in bad:
+ to_remove.append("%s/SSHbase/%s" %
+ (self.get_repo_path(), name))
+ # clean up file~
+ # clean up files without parsable names in Cfg
+ return to_remove
diff --git a/build/lib/Bcfg2/Server/Admin/Viz.py b/build/lib/Bcfg2/Server/Admin/Viz.py
new file mode 100644
index 000000000..245ca8398
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Admin/Viz.py
@@ -0,0 +1,101 @@
+import getopt
+from subprocess import Popen, PIPE
+import Bcfg2.Server.Admin
+
+class Viz(Bcfg2.Server.Admin.MetadataCore):
+ __shorthelp__ = "Produce graphviz diagrams of metadata structures"
+ __longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin viz [--includehosts] "
+ "[--includebundles] [--includekey] "
+ "[-o output.png] [--raw]")
+ __usage__ = ("bcfg2-admin viz [options]\n\n"
+ " %-25s%s\n"
+ " %-25s%s\n"
+ " %-25s%s\n"
+ " %-25s%s\n" %
+ ("-H, --includehosts",
+ "include hosts in the viz output",
+ "-b, --includebundles",
+ "include bundles in the viz output",
+ "-k, --includekey",
+ "show a key for different digraph shapes",
+ "-o, --outfile <file>",
+ "write viz output to an output file"))
+
+ colors = ['steelblue1', 'chartreuse', 'gold', 'magenta',
+ 'indianred1', 'limegreen', 'orange1', 'lightblue2',
+ 'green1', 'blue1', 'yellow1', 'darkturquoise', 'gray66']
+ plugin_blacklist = ['DBStats', 'Snapshots', 'Cfg', 'Pkgmgr', 'Packages',
+ 'Rules', 'Account', 'Decisions', 'Deps', 'Git', 'Svn',
+ 'Fossil', 'Bzr', 'Bundler', 'TGenshi', 'SGenshi', 'Base']
+
+ def __init__(self, cfile):
+
+ Bcfg2.Server.Admin.MetadataCore.__init__(self, cfile,
+ self.__usage__,
+ pblacklist=self.plugin_blacklist)
+
+ def __call__(self, args):
+ Bcfg2.Server.Admin.MetadataCore.__call__(self, args)
+ # First get options to the 'viz' subcommand
+ try:
+ opts, args = getopt.getopt(args, 'Hbko:',
+ ['includehosts', 'includebundles',
+ 'includekey', 'outfile='])
+ except getopt.GetoptError, msg:
+ print msg
+
+ #FIXME: is this for --raw?
+ #rset = False
+ hset = False
+ bset = False
+ kset = False
+ outputfile = False
+ for opt, arg in opts:
+ if opt in ("-H", "--includehosts"):
+ hset = True
+ elif opt in ("-b", "--includebundles"):
+ bset = True
+ elif opt in ("-k", "--includekey"):
+ kset = True
+ elif opt in ("-o", "--outfile"):
+ outputfile = arg
+
+ data = self.Visualize(self.get_repo_path(), hset, bset,
+ kset, outputfile)
+ print data
+ raise SystemExit, 0
+
+ def Visualize(self, repopath, hosts=False,
+ bundles=False, key=False, output=False):
+ """Build visualization of groups file."""
+ if output:
+ format = output.split('.')[-1]
+ else:
+ format = 'png'
+
+ cmd = "dot -T%s" % (format)
+ if output:
+ cmd += " -o %s" % output
+ dotpipe = Popen(cmd, shell=True, stdin=PIPE,
+ stdout=PIPE, close_fds=True)
+ try:
+ dotpipe.stdin.write("digraph groups {\n")
+ except:
+ print "write to dot process failed. Is graphviz installed?"
+ raise SystemExit(1)
+ dotpipe.stdin.write('\trankdir="LR";\n')
+ dotpipe.stdin.write(self.metadata.viz(hosts, bundles,
+ key, self.colors))
+ if key:
+ dotpipe.stdin.write("\tsubgraph cluster_key {\n")
+ dotpipe.stdin.write('''\tstyle="filled";\n''')
+ dotpipe.stdin.write('''\tcolor="lightblue";\n''')
+ dotpipe.stdin.write('''\tBundle [ shape="septagon" ];\n''')
+ dotpipe.stdin.write('''\tGroup [shape="ellipse"];\n''')
+ dotpipe.stdin.write('''\tProfile [style="bold", shape="ellipse"];\n''')
+ dotpipe.stdin.write('''\tHblock [label="Host1|Host2|Host3", shape="record"];\n''')
+ dotpipe.stdin.write('''\tlabel="Key";\n''')
+ dotpipe.stdin.write("\t}\n")
+ dotpipe.stdin.write("}\n")
+ dotpipe.stdin.close()
+ return dotpipe.stdout.read()
diff --git a/build/lib/Bcfg2/Server/Admin/Web.py b/build/lib/Bcfg2/Server/Admin/Web.py
new file mode 100644
index 000000000..5ad14f2b9
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Admin/Web.py
@@ -0,0 +1,79 @@
+import os
+import sys
+import BaseHTTPServer
+import SimpleHTTPServer
+import daemon
+import Bcfg2.Server.Admin
+import Bcfg2.Options
+
+# For debugging output only
+import logging
+logger = logging.getLogger('Bcfg2.Server.Admin.Web')
+
+class Web(Bcfg2.Server.Admin.Mode):
+ __shorthelp__ = "A simple webserver to display the content of the Bcfg2 repos."
+ __longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin web start\n"
+ "\n\nbcfg2-admin web stop")
+ __usage__ = ("bcfg2-admin web [start|stop]")
+
+ def __init__(self, configfile):
+ Bcfg2.Server.Admin.Mode.__init__(self, configfile)
+
+ def __call__(self, args):
+ Bcfg2.Server.Admin.Mode.__call__(self, args)
+ opts = {'repo': Bcfg2.Options.SERVER_REPOSITORY}
+ setup = Bcfg2.Options.OptionParser(opts)
+ setup.parse(sys.argv[1:])
+ repo = setup['repo']
+
+ if len(args) == 0 or args[0] == '-h':
+ print(self.__usage__)
+ raise SystemExit(0)
+
+ if len(args) == 0:
+ self.errExit("No argument specified.\n"
+ "Please see bcfg2-admin web help for usage.")
+
+ if args[0] in ['start', 'up']:
+ # Change directory to the Bcfg2 repo
+ if not os.path.exists(repo):
+ #print "Path '%s' doesn't exisit" % repo
+ logger.error("%s doesn't exist" % repo)
+ else:
+ os.chdir(repo)
+ self.start_web()
+
+ elif args[0] in ['stop', 'down']:
+ self.stop_web()
+
+ else:
+ print "No command specified"
+ raise SystemExit(1)
+
+ # The web server part with hardcoded port number
+ def start_web(self, port=6788):
+ """Starts the webserver for directory listing of the Bcfg2 repo."""
+ try:
+ server_class = BaseHTTPServer.HTTPServer
+ handler_class = SimpleHTTPServer.SimpleHTTPRequestHandler
+ server_address = ('', port)
+ server = server_class(server_address, handler_class)
+ #server.serve_forever()
+ # Make the context manager for becoming a daemon process
+ daemon_context = daemon.DaemonContext()
+ daemon_context.files_preserve = [server.fileno()]
+
+ # Become a daemon process
+ with daemon_context:
+ server.serve_forever()
+ except:
+ logger.error("Failed to start webserver")
+ #raise Bcfg2.Server.Admin.AdminInitError
+
+ def stop_web(self):
+ """Stops the webserver."""
+# self.shutdown = 1
+ self.shutdown()
+ # self.stopped = True
+# self.serve_forever()
+
diff --git a/build/lib/Bcfg2/Server/Admin/Xcmd.py b/build/lib/Bcfg2/Server/Admin/Xcmd.py
new file mode 100644
index 000000000..80d5cfb25
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Admin/Xcmd.py
@@ -0,0 +1,49 @@
+import Bcfg2.Options
+import Bcfg2.Proxy
+import Bcfg2.Server.Admin
+
+import sys
+import xmlrpclib
+
+class Xcmd(Bcfg2.Server.Admin.Mode):
+ __shorthelp__ = ("XML-RPC Command Interface")
+ __longhelp__ = (__shorthelp__ + "\n\nbcfg2-admin xcmd command")
+ __usage__ = ("bcfg2-admin xcmd <command>")
+
+ def __call__(self, args):
+ optinfo = {
+ 'server': Bcfg2.Options.SERVER_LOCATION,
+ 'user': Bcfg2.Options.CLIENT_USER,
+ 'password': Bcfg2.Options.SERVER_PASSWORD,
+ 'key': Bcfg2.Options.SERVER_KEY,
+ 'certificate' : Bcfg2.Options.CLIENT_CERT,
+ 'ca' : Bcfg2.Options.CLIENT_CA
+ }
+ setup = Bcfg2.Options.OptionParser(optinfo)
+ setup.parse(sys.argv[2:])
+ Bcfg2.Proxy.RetryMethod.max_retries = 1
+ proxy = Bcfg2.Proxy.ComponentProxy(setup['server'],
+ setup['user'],
+ setup['password'],
+ key = setup['key'],
+ cert = setup['certificate'],
+ ca = setup['ca'], timeout=180)
+ if len(setup['args']) == 0:
+ print("Usage: xcmd <xmlrpc method> <optional arguments>")
+ return
+ cmd = setup['args'][0]
+ args = ()
+ if len(setup['args']) > 1:
+ args = tuple(setup['args'][1:])
+ try:
+ data = apply(getattr(proxy, cmd), args)
+ except xmlrpclib.Fault, flt:
+ if flt.faultCode == 7:
+ print("Unknown method %s" % cmd)
+ return
+ elif flt.faultCode == 20:
+ return
+ else:
+ raise
+ if data != None:
+ print data
diff --git a/build/lib/Bcfg2/Server/Admin/__init__.py b/build/lib/Bcfg2/Server/Admin/__init__.py
new file mode 100644
index 000000000..bb5c41895
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Admin/__init__.py
@@ -0,0 +1,114 @@
+__revision__ = '$Revision$'
+
+__all__ = ['Mode', 'Client', 'Compare', 'Init', 'Minestruct', 'Perf',
+ 'Pull', 'Query', 'Reports', 'Snapshots', 'Tidy', 'Viz',
+ 'Xcmd', 'Group', 'Backup']
+
+import ConfigParser
+import logging
+import lxml.etree
+import sys
+
+import Bcfg2.Server.Core
+import Bcfg2.Options
+
+class ModeOperationError(Exception):
+ pass
+
+class Mode(object):
+ """Help message has not yet been added for mode."""
+ __shorthelp__ = 'Shorthelp not defined yet'
+ __longhelp__ = 'Longhelp not defined yet'
+ __args__ = []
+ def __init__(self, configfile):
+ self.configfile = configfile
+ self.__cfp = False
+ self.log = logging.getLogger('Bcfg2.Server.Admin.Mode')
+
+ def getCFP(self):
+ if not self.__cfp:
+ self.__cfp = ConfigParser.ConfigParser()
+ self.__cfp.read(self.configfile)
+ return self.__cfp
+
+ cfp = property(getCFP)
+
+ def __call__(self, args):
+ if len(args) > 0 and args[0] == 'help':
+ print self.__longhelp__
+ raise SystemExit(0)
+
+ def errExit(self, emsg):
+ print emsg
+ raise SystemExit(1)
+
+ def get_repo_path(self):
+ """Return repository path"""
+ return self.cfp.get('server', 'repository')
+
+ def load_stats(self, client):
+ stats = lxml.etree.parse("%s/etc/statistics.xml" %
+ (self.get_repo_path()))
+ hostent = stats.xpath('//Node[@name="%s"]' % client)
+ if not hostent:
+ self.errExit("Could not find stats for client %s" % (client))
+ return hostent[0]
+
+ def print_table(self, rows, justify='left', hdr=True, vdelim=" ", padding=1):
+ """Pretty print a table
+
+ rows - list of rows ([[row 1], [row 2], ..., [row n]])
+ hdr - if True the first row is treated as a table header
+ vdelim - vertical delimiter between columns
+ padding - # of spaces around the longest element in the column
+ justify - may be left,center,right
+
+ """
+ hdelim = "="
+ justify = {'left':str.ljust,
+ 'center':str.center,
+ 'right':str.rjust}[justify.lower()]
+
+ """
+ Calculate column widths (longest item in each column
+ plus padding on both sides)
+
+ """
+ cols = list(zip(*rows))
+ colWidths = [max([len(str(item))+2*padding for \
+ item in col]) for col in cols]
+ borderline = vdelim.join([w*hdelim for w in colWidths])
+
+ # print out the table
+ print(borderline)
+ for row in rows:
+ print(vdelim.join([justify(str(item), width) for \
+ (item, width) in zip(row, colWidths)]))
+ if hdr:
+ print(borderline)
+ hdr = False
+
+class MetadataCore(Mode):
+ """Base class for admin-modes that handle metadata."""
+ def __init__(self, configfile, usage, pwhitelist=None, pblacklist=None):
+ Mode.__init__(self, configfile)
+ options = {'plugins': Bcfg2.Options.SERVER_PLUGINS,
+ 'configfile': Bcfg2.Options.CFILE}
+ setup = Bcfg2.Options.OptionParser(options)
+ setup.hm = usage
+ setup.parse(sys.argv[1:])
+ if pwhitelist is not None:
+ setup['plugins'] = [x for x in setup['plugins'] if x in pwhitelist]
+ elif pblacklist is not None:
+ setup['plugins'] = [x for x in setup['plugins'] if x not in pblacklist]
+ try:
+ self.bcore = Bcfg2.Server.Core.Core(self.get_repo_path(),
+ setup['plugins'],
+ 'foo', 'UTF-8')
+ except Bcfg2.Server.Core.CoreInitError, msg:
+ self.errExit("Core load failed because %s" % msg)
+ self.bcore.fam.handle_events_in_interval(5)
+ self.metadata = self.bcore.metadata
+
+class StructureMode(MetadataCore):
+ pass
diff --git a/build/lib/Bcfg2/Server/Admin/test.py b/build/lib/Bcfg2/Server/Admin/test.py
new file mode 100644
index 000000000..06271b186
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Admin/test.py
@@ -0,0 +1,73 @@
+import os
+import time
+import tarfile
+import sys
+datastore = '/var/lib/bcfg2'
+
+#Popen(['git', 'clone', 'https://github.com/solj/bcfg2-repo.git', datastore])
+#timestamp = time.strftime('%Y%m%d%H%M%S')
+#format = 'gz'
+#mode = 'w:' + format
+#filename = timestamp + '.tar' + '.' + format
+#out = tarfile.open('/home/fab/' + filename, mode=mode)
+
+
+#content = os.listdir(os.getcwd())
+#for item in content:
+# out.add(item)
+#out.close()
+#print "Archive %s was stored.\nLocation: %s" % (filename, datastore)
+
+#print os.getcwd()
+#print os.listdir(os.getcwd())
+
+#import shlex
+#args = shlex.split('env LC_ALL=C git clone https://github.com/solj/bcfg2-repo.git datastore')
+#print args
+
+#Popen("env LC_ALL=C git clone https://github.com/solj/bcfg2-repo.git datastore")
+
+#timestamp = time.strftime('%Y%m%d%H%M%S')
+#format = 'gz'
+#mode = 'w:' + format
+#filename = timestamp + '.tar' + '.' + format
+#out = tarfile.open(name = filename, mode = mode)
+##content = os.listdir(datastore)
+##for item in content:
+## out.add(item)
+##out.close()
+
+###t = tarfile.open(name = destination, mode = 'w:gz')
+#out.add(datastore, os.path.basename(datastore))
+#out.close()
+
+#print datastore, os.path.basename(datastore)
+
+#content = os.listdir(datastore)
+#for item in content:
+# #out.add(item)
+# print item
+
+#timestamp = time.strftime('%Y%m%d%H%M%S')
+#format = 'gz'
+#mode = 'w:' + format
+#filename = timestamp + '.tar' + '.' + format
+
+if len(sys.argv) == 0:
+ destination = datastore + '/'
+else:
+ destination = sys.argv[1]
+
+print destination
+#out = tarfile.open(destination + filename, mode=mode)
+#out.add(self.datastore, os.path.basename(self.datastore))
+#out.close()
+#print "Archive %s was stored at %s" % (filename, destination)
+
+#print 'Die Kommandozeilenparameter sind:'
+##for i in sys.argv:
+## print i
+
+#print sys.argv[0]
+#print sys.argv[1]
+##print sys.argv[2]
diff --git a/build/lib/Bcfg2/Server/Core.py b/build/lib/Bcfg2/Server/Core.py
new file mode 100644
index 000000000..ac67b8a69
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Core.py
@@ -0,0 +1,439 @@
+"""Bcfg2.Server.Core provides the runtime support for Bcfg2 modules."""
+__revision__ = '$Revision$'
+
+import atexit
+import logging
+import lxml.etree
+import select
+import threading
+import time
+import xmlrpclib
+
+from Bcfg2.Component import Component, exposed
+from Bcfg2.Server.Plugin import PluginInitError, PluginExecutionError
+import Bcfg2.Server.FileMonitor
+import Bcfg2.Server.Plugins.Metadata
+
+logger = logging.getLogger('Bcfg2.Server.Core')
+
+def critical_error(operation):
+ """Log and err, traceback and return an xmlrpc fault to client."""
+ logger.error(operation, exc_info=1)
+ raise xmlrpclib.Fault(7, "Critical unexpected failure: %s" % (operation))
+
+try:
+ import psyco
+ psyco.full()
+except:
+ pass
+
+class CoreInitError(Exception):
+ """This error is raised when the core cannot be initialized."""
+ pass
+
+class Core(Component):
+ """The Core object is the container for all Bcfg2 Server logic and modules."""
+ name = 'bcfg2-server'
+ implementation = 'bcfg2-server'
+
+ def __init__(self, repo, plugins, password, encoding,
+ cfile='/etc/bcfg2.conf', ca=None,
+ filemonitor='default', start_fam_thread=False):
+ Component.__init__(self)
+ self.datastore = repo
+ if filemonitor not in Bcfg2.Server.FileMonitor.available:
+ logger.error("File monitor driver %s not available; forcing to default" % filemonitor)
+ filemonitor = 'default'
+ try:
+ self.fam = Bcfg2.Server.FileMonitor.available[filemonitor]()
+ except IOError:
+ logger.error("Failed to instantiate fam driver %s" % filemonitor,
+ exc_info=1)
+ raise CoreInitError, "failed to instantiate fam driver (used %s)" % \
+ filemonitor
+ self.pubspace = {}
+ self.cfile = cfile
+ self.cron = {}
+ self.plugins = {}
+ self.plugin_blacklist = {}
+ self.revision = '-1'
+ self.password = password
+ self.encoding = encoding
+ atexit.register(self.shutdown)
+ # Create an event to signal worker threads to shutdown
+ self.terminate = threading.Event()
+
+ if '' in plugins:
+ plugins.remove('')
+
+ for plugin in plugins:
+ if not plugin in self.plugins:
+ self.init_plugins(plugin)
+ # Remove blacklisted plugins
+ for p, bl in self.plugin_blacklist.items():
+ if len(bl) > 0:
+ logger.error("The following plugins conflict with %s;"
+ "Unloading %s" % (p, bl))
+ for plug in bl:
+ del self.plugins[plug]
+ # This section loads the experimental plugins
+ expl = [plug for (name, plug) in self.plugins.iteritems()
+ if plug.experimental]
+ if expl:
+ logger.info("Loading experimental plugin(s): %s" % \
+ (" ".join([x.name for x in expl])))
+ logger.info("NOTE: Interfaces subject to change")
+ depr = [plug for (name, plug) in self.plugins.iteritems()
+ if plug.deprecated]
+ # This section loads the deprecated plugins
+ if depr:
+ logger.info("Loading deprecated plugin(s): %s" % \
+ (" ".join([x.name for x in depr])))
+
+
+ mlist = [p for p in self.plugins.values() if \
+ isinstance(p, Bcfg2.Server.Plugin.Metadata)]
+ if len(mlist) == 1:
+ self.metadata = mlist[0]
+ else:
+ logger.error("No Metadata Plugin loaded; failed to instantiate Core")
+ raise CoreInitError, "No Metadata Plugin"
+ self.statistics = [plugin for plugin in self.plugins.values() if \
+ isinstance(plugin, Bcfg2.Server.Plugin.Statistics)]
+ self.pull_sources = [plugin for plugin in self.statistics if \
+ isinstance(plugin, Bcfg2.Server.Plugin.PullSource)]
+ self.generators = [plugin for plugin in self.plugins.values() if \
+ isinstance(plugin, Bcfg2.Server.Plugin.Generator)]
+ self.structures = [plugin for plugin in self.plugins.values() if \
+ isinstance(plugin, Bcfg2.Server.Plugin.Structure)]
+ self.connectors = [plugin for plugin in self.plugins.values() if \
+ isinstance(plugin, Bcfg2.Server.Plugin.Connector)]
+ self.ca = ca
+ self.fam_thread = threading.Thread(target=self._file_monitor_thread)
+ if start_fam_thread:
+ self.fam_thread.start()
+
+ def _file_monitor_thread(self):
+ """The thread for monitor the files."""
+ famfd = self.fam.fileno()
+ terminate = self.terminate
+ while not terminate.isSet():
+ try:
+ if famfd:
+ select.select([famfd], [], [], 2)
+ else:
+ if not self.fam.pending():
+ terminate.wait(15)
+ self.fam.handle_event_set(self.lock)
+ except:
+ continue
+ # VCS plugin periodic updates
+ for plugin in self.plugins.values():
+ if isinstance(plugin, Bcfg2.Server.Plugin.Version):
+ self.revision = plugin.get_revision()
+
+ def init_plugins(self, plugin):
+ """Handling for the plugins."""
+ try:
+ mod = getattr(__import__("Bcfg2.Server.Plugins.%s" %
+ (plugin)).Server.Plugins, plugin)
+ except ImportError, e:
+ try:
+ mod = __import__(plugin)
+ except:
+ logger.error("Failed to load plugin %s" % (plugin))
+ return
+ plug = getattr(mod, plugin)
+ # Blacklist conflicting plugins
+ cplugs = [conflict for conflict in plug.conflicts
+ if conflict in self.plugins]
+ self.plugin_blacklist[plug.name] = cplugs
+ try:
+ self.plugins[plugin] = plug(self, self.datastore)
+ except PluginInitError:
+ logger.error("Failed to instantiate plugin %s" % (plugin))
+ except:
+ logger.error("Unexpected instantiation failure for plugin %s" %
+ (plugin), exc_info=1)
+
+ def shutdown(self):
+ """Shuting down the plugins."""
+ if not self.terminate.isSet():
+ self.terminate.set()
+ for plugin in self.plugins.values():
+ plugin.shutdown()
+
+ def validate_data(self, metadata, data, base_cls):
+ """Checks the data structure."""
+ for plugin in self.plugins.values():
+ if isinstance(plugin, base_cls):
+ try:
+ if base_cls == Bcfg2.Server.Plugin.StructureValidator:
+ plugin.validate_structures(metadata, data)
+ elif base_cls == Bcfg2.Server.Plugin.GoalValidator:
+ plugin.validate_goals(metadata, data)
+ except Bcfg2.Server.Plugin.ValidationError, err:
+ logger.error("Plugin %s structure validation failed: %s" \
+ % (plugin.name, err.message))
+ raise
+ except:
+ logger.error("Plugin %s: unexpected structure validation failure" \
+ % (plugin.name), exc_info=1)
+
+ def GetStructures(self, metadata):
+ """Get all structures for client specified by metadata."""
+ structures = reduce(lambda x, y:x+y,
+ [struct.BuildStructures(metadata) for struct \
+ in self.structures], [])
+ sbundles = [b.get('name') for b in structures if b.tag == 'Bundle']
+ missing = [b for b in metadata.bundles if b not in sbundles]
+ if missing:
+ logger.error("Client %s configuration missing bundles: %s" \
+ % (metadata.hostname, ':'.join(missing)))
+ return structures
+
+ def BindStructure(self, structure, metadata):
+ """Bind a complete structure."""
+ for entry in structure.getchildren():
+ if entry.tag.startswith("Bound"):
+ entry.tag = entry.tag[5:]
+ continue
+ try:
+ self.Bind(entry, metadata)
+ except PluginExecutionError:
+ if 'failure' not in entry.attrib:
+ entry.set('failure', 'bind error')
+ logger.error("Failed to bind entry: %s %s" % \
+ (entry.tag, entry.get('name')))
+ except:
+ logger.error("Unexpected failure in BindStructure: %s %s" \
+ % (entry.tag, entry.get('name')), exc_info=1)
+
+ def Bind(self, entry, metadata):
+ """Bind an entry using the appropriate generator."""
+ if 'altsrc' in entry.attrib:
+ oldname = entry.get('name')
+ entry.set('name', entry.get('altsrc'))
+ entry.set('realname', oldname)
+ del entry.attrib['altsrc']
+ try:
+ ret = self.Bind(entry, metadata)
+ entry.set('name', oldname)
+ del entry.attrib['realname']
+ return ret
+ except:
+ entry.set('name', oldname)
+ logger.error("Failed binding entry %s:%s with altsrc %s" \
+ % (entry.tag, entry.get('name'),
+ entry.get('altsrc')))
+ logger.error("Falling back to %s:%s" % (entry.tag,
+ entry.get('name')))
+
+ glist = [gen for gen in self.generators if
+ entry.get('name') in gen.Entries.get(entry.tag, {})]
+ if len(glist) == 1:
+ return glist[0].Entries[entry.tag][entry.get('name')](entry, metadata)
+ elif len(glist) > 1:
+ generators = ", ".join([gen.name for gen in glist])
+ logger.error("%s %s served by multiple generators: %s" % \
+ (entry.tag, entry.get('name'), generators))
+ g2list = [gen for gen in self.generators if
+ gen.HandlesEntry(entry, metadata)]
+ if len(g2list) == 1:
+ return g2list[0].HandleEntry(entry, metadata)
+ entry.set('failure', 'no matching generator')
+ raise PluginExecutionError, (entry.tag, entry.get('name'))
+
+ def BuildConfiguration(self, client):
+ """Build configuration for clients."""
+ start = time.time()
+ config = lxml.etree.Element("Configuration", version='2.0', \
+ revision=self.revision)
+ try:
+ meta = self.build_metadata(client)
+ except Bcfg2.Server.Plugins.Metadata.MetadataConsistencyError:
+ logger.error("Metadata consistency error for client %s" % client)
+ return lxml.etree.Element("error", type='metadata error')
+
+ try:
+ structures = self.GetStructures(meta)
+ except:
+ logger.error("error in GetStructures", exc_info=1)
+ return lxml.etree.Element("error", type='structure error')
+
+ self.validate_data(meta, structures,
+ Bcfg2.Server.Plugin.StructureValidator)
+
+ # Perform altsrc consistency checking
+ esrcs = {}
+ for struct in structures:
+ for entry in struct:
+ key = (entry.tag, entry.get('name'))
+ if key in esrcs:
+ if esrcs[key] != entry.get('altsrc'):
+ logger.error("Found inconsistent altsrc mapping for entry %s:%s" % key)
+ else:
+ esrcs[key] = entry.get('altsrc', None)
+ del esrcs
+
+ for astruct in structures:
+ try:
+ self.BindStructure(astruct, meta)
+ config.append(astruct)
+ except:
+ logger.error("error in BindStructure", exc_info=1)
+ self.validate_data(meta, config, Bcfg2.Server.Plugin.GoalValidator)
+ logger.info("Generated config for %s in %.03fs" % \
+ (client, time.time() - start))
+ return config
+
+ def GetDecisions(self, metadata, mode):
+ """Get data for the decision list."""
+ result = []
+ for plugin in self.plugins.values():
+ try:
+ if isinstance(plugin, Bcfg2.Server.Plugin.Decision):
+ result += plugin.GetDecisions(metadata, mode)
+ except:
+ logger.error("Plugin: %s failed to generate decision list" \
+ % plugin.name, exc_info=1)
+ return result
+
+ def build_metadata(self, client_name):
+ """Build the metadata structure."""
+ if not hasattr(self, 'metadata'):
+ # some threads start before metadata is even loaded
+ raise Bcfg2.Server.Plugins.Metadata.MetadataRuntimeError
+ imd = self.metadata.get_initial_metadata(client_name)
+ for conn in self.connectors:
+ grps = conn.get_additional_groups(imd)
+ self.metadata.merge_additional_groups(imd, grps)
+ for conn in self.connectors:
+ data = conn.get_additional_data(imd)
+ self.metadata.merge_additional_data(imd, conn.name, data)
+ imd.query.by_name = self.build_metadata
+ return imd
+
+ def process_statistics(self, client_name, statistics):
+ """Proceed statistics for client."""
+ meta = self.build_metadata(client_name)
+ state = statistics.find(".//Statistics")
+ if state.get('version') >= '2.0':
+ for plugin in self.statistics:
+ try:
+ plugin.process_statistics(meta, statistics)
+ except:
+ logger.error("Plugin %s failed to process stats from %s" \
+ % (plugin.name, meta.hostname),
+ exc_info=1)
+
+ logger.info("Client %s reported state %s" % (client_name,
+ state.get('state')))
+ # XMLRPC handlers start here
+
+ @exposed
+ def GetProbes(self, address):
+ """Fetch probes for a particular client."""
+ resp = lxml.etree.Element('probes')
+ try:
+ name = self.metadata.resolve_client(address)
+ meta = self.build_metadata(name)
+
+ for plugin in [p for p in list(self.plugins.values()) \
+ if isinstance(p, Bcfg2.Server.Plugin.Probing)]:
+ for probe in plugin.GetProbes(meta):
+ resp.append(probe)
+ return lxml.etree.tostring(resp, encoding='UTF-8',
+ xml_declaration=True)
+ except Bcfg2.Server.Plugins.Metadata.MetadataConsistencyError:
+ warning = 'Client metadata resolution error for %s; check server log' % address[0]
+ self.logger.warning(warning)
+ raise xmlrpclib.Fault(6, warning)
+ except Bcfg2.Server.Plugins.Metadata.MetadataRuntimeError:
+ err_msg = 'Metadata system runtime failure'
+ self.logger.error(err_msg)
+ raise xmlrpclib.Fault(6, err_msg)
+ except:
+ critical_error("Error determining client probes")
+
+ @exposed
+ def RecvProbeData(self, address, probedata):
+ """Receive probe data from clients."""
+ try:
+ name = self.metadata.resolve_client(address)
+ meta = self.build_metadata(name)
+ except Bcfg2.Server.Plugins.Metadata.MetadataConsistencyError:
+ warning = 'Metadata consistency error'
+ self.logger.warning(warning)
+ raise xmlrpclib.Fault(6, warning)
+ # clear dynamic groups
+ self.metadata.cgroups[meta.hostname] = []
+ try:
+ xpdata = lxml.etree.XML(probedata)
+ except:
+ self.logger.error("Failed to parse probe data from client %s" % \
+ (address[0]))
+ return False
+
+ sources = []
+ [sources.append(data.get('source')) for data in xpdata
+ if data.get('source') not in sources]
+ for source in sources:
+ if source not in self.plugins:
+ self.logger.warning("Failed to locate plugin %s" % (source))
+ continue
+ dl = [data for data in xpdata if data.get('source') == source]
+ try:
+ self.plugins[source].ReceiveData(meta, dl)
+ except:
+ logger.error("Failed to process probe data from client %s" % \
+ (address[0]), exc_info=1)
+ return True
+
+ @exposed
+ def AssertProfile(self, address, profile):
+ """Set profile for a client."""
+ try:
+ client = self.metadata.resolve_client(address)
+ self.metadata.set_profile(client, profile, address)
+ except (Bcfg2.Server.Plugins.Metadata.MetadataConsistencyError,
+ Bcfg2.Server.Plugins.Metadata.MetadataRuntimeError):
+ warning = 'Metadata consistency error'
+ self.logger.warning(warning)
+ raise xmlrpclib.Fault(6, warning)
+ return True
+
+ @exposed
+ def GetConfig(self, address, checksum=False):
+ """Build config for a client."""
+ try:
+ client = self.metadata.resolve_client(address)
+ config = self.BuildConfiguration(client)
+ return lxml.etree.tostring(config, encoding='UTF-8',
+ xml_declaration=True)
+ except Bcfg2.Server.Plugins.Metadata.MetadataConsistencyError:
+ self.logger.warning("Metadata consistency failure for %s" % (address))
+ raise xmlrpclib.Fault(6, "Metadata consistency failure")
+
+ @exposed
+ def RecvStats(self, address, stats):
+ """Act on statistics upload."""
+ sdata = lxml.etree.XML(stats)
+ client = self.metadata.resolve_client(address)
+ self.process_statistics(client, sdata)
+ return "<ok/>"
+
+ def authenticate(self, cert, user, password, address):
+ if self.ca:
+ acert = cert
+ else:
+ # No ca, so no cert validation can be done
+ acert = None
+ return self.metadata.AuthenticateConnection(acert, user, password, address)
+
+ @exposed
+ def GetDecisionList(self, address, mode):
+ """Get the data of the decision list."""
+ client = self.metadata.resolve_client(address)
+ meta = self.build_metadata(client)
+ return self.GetDecisions(meta, mode)
diff --git a/build/lib/Bcfg2/Server/FileMonitor.py b/build/lib/Bcfg2/Server/FileMonitor.py
new file mode 100644
index 000000000..0f09f7751
--- /dev/null
+++ b/build/lib/Bcfg2/Server/FileMonitor.py
@@ -0,0 +1,307 @@
+"""Bcfg2.Server.FileMonitor provides the support for monitorung files."""
+
+import logging
+import os
+import stat
+from time import sleep, time
+
+logger = logging.getLogger('Bcfg2.Server.FileMonitor')
+
+def ShouldIgnore(event):
+ """Test if the event should be suppresed."""
+ # FIXME should move event suppression out of the core
+ if event.filename.split('/')[-1] == '.svn':
+ return True
+ if event.filename.endswith('~') or \
+ event.filename.startswith('#') or event.filename.startswith('.#'):
+ #logger.error("Suppressing event for file %s" % (event.filename))
+ return True
+ return False
+
+class Event(object):
+ def __init__(self, request_id, filename, code):
+ self.requestID = request_id
+ self.filename = filename
+ self.action = code
+
+ def code2str(self):
+ """return static code for event"""
+ return self.action
+
+available = {}
+class FileMonitor(object):
+ """File Monitor baseclass."""
+ def __init__(self, debug=False):
+ object.__init__(self)
+ self.debug = debug
+ self.handles = dict()
+
+ def get_event(self):
+ return None
+
+ def pending(self):
+ return False
+
+ def fileno(self):
+ return 0
+
+ def handle_one_event(self, event):
+ if ShouldIgnore(event):
+ return
+ if event.requestID not in self.handles:
+ logger.info("Got event for unexpected id %s, file %s" %
+ (event.requestID, event.filename))
+ return
+ if self.debug:
+ logger.info("Dispatching event %s %s to obj %s" \
+ % (event.code2str(), event.filename,
+ self.handles[event.requestID]))
+ try:
+ self.handles[event.requestID].HandleEvent(event)
+ except:
+ logger.error("error in handling of gamin event for %s" % \
+ (event.filename), exc_info=1)
+
+ def handle_event_set(self, lock=None):
+ count = 1
+ event = self.get_event()
+ start = time()
+ if lock:
+ lock.acquire()
+ try:
+ self.handle_one_event(event)
+ while self.pending():
+ self.handle_one_event(self.get_event())
+ count += 1
+ except:
+ pass
+ if lock:
+ lock.release()
+ end = time()
+ logger.info("Handled %d events in %.03fs" % (count, (end-start)))
+
+ def handle_events_in_interval(self, interval):
+ end = time() + interval
+ while time() < end:
+ if self.pending():
+ self.handle_event_set()
+ end = time() + interval
+ else:
+ sleep(0.5)
+
+
+class FamFam(object):
+ """The fam object is a set of callbacks for file alteration events (FAM support)."""
+
+ def __init__(self):
+ object.__init__(self)
+ self.fm = _fam.open()
+ self.users = {}
+ self.handles = {}
+ self.debug = False
+
+ def fileno(self):
+ """Return fam file handle number."""
+ return self.fm.fileno()
+
+ def handle_event_set(self, _):
+ self.Service()
+
+ def handle_events_in_interval(self, interval):
+ now = time()
+ while (time() - now) < interval:
+ if self.Service():
+ now = time()
+
+ def AddMonitor(self, path, obj):
+ """Add a monitor to path, installing a callback to obj.HandleEvent."""
+ mode = os.stat(path)[stat.ST_MODE]
+ if stat.S_ISDIR(mode):
+ handle = self.fm.monitorDirectory(path, None)
+ else:
+ handle = self.fm.monitorFile(path, None)
+ self.handles[handle.requestID()] = handle
+ if obj != None:
+ self.users[handle.requestID()] = obj
+ return handle.requestID()
+
+ def Service(self, interval=0.50):
+ """Handle all fam work."""
+ count = 0
+ collapsed = 0
+ rawevents = []
+ start = time()
+ now = time()
+ while (time() - now) < interval:
+ if self.fm.pending():
+ while self.fm.pending():
+ count += 1
+ rawevents.append(self.fm.nextEvent())
+ now = time()
+ unique = []
+ bookkeeping = []
+ for event in rawevents:
+ if ShouldIgnore(event):
+ continue
+ if event.code2str() != 'changed':
+ # process all non-change events
+ unique.append(event)
+ else:
+ if (event.filename, event.requestID) not in bookkeeping:
+ bookkeeping.append((event.filename, event.requestID))
+ unique.append(event)
+ else:
+ collapsed += 1
+ for event in unique:
+ if event.requestID in self.users:
+ try:
+ self.users[event.requestID].HandleEvent(event)
+ except:
+ logger.error("handling event for file %s" % (event.filename), exc_info=1)
+ end = time()
+ logger.info("Processed %s fam events in %03.03f seconds. %s coalesced" %
+ (count, (end - start), collapsed))
+ return count
+
+
+
+class Fam(FileMonitor):
+ """
+ The fam object is a set of callbacks for
+ file alteration events (FAM support).
+ """
+
+ def __init__(self, debug=False):
+ FileMonitor.__init__(self, debug)
+ self.fm = _fam.open()
+
+ def fileno(self):
+ return self.fm.fileno()
+
+ def AddMonitor(self, path, obj):
+ """Add a monitor to path, installing a callback to obj.HandleEvent."""
+ mode = os.stat(path)[stat.ST_MODE]
+ if stat.S_ISDIR(mode):
+ handle = self.fm.monitorDirectory(path, None)
+ else:
+ handle = self.fm.monitorFile(path, None)
+ if obj != None:
+ self.handles[handle.requestID()] = obj
+ return handle.requestID()
+
+ def pending(self):
+ return self.fm.pending()
+
+ def get_event(self):
+ return self.fm.nextEvent()
+
+class Pseudo(FileMonitor):
+ """
+ The fam object is a set of callbacks for
+ file alteration events (static monitor support).
+ """
+
+ def __init__(self, debug=False):
+ FileMonitor.__init__(self, debug=False)
+ self.pending_events = []
+
+ def pending(self):
+ return len(self.pending_events) != 0
+
+ def get_event(self):
+ return self.pending_events.pop()
+
+ def AddMonitor(self, path, obj):
+ """add a monitor to path, installing a callback to obj.HandleEvent"""
+ handleID = len(self.handles.keys())
+ mode = os.stat(path)[stat.ST_MODE]
+ handle = Event(handleID, path, 'exists')
+ if stat.S_ISDIR(mode):
+ dirList = os.listdir(path)
+ self.pending_events.append(handle)
+ for includedFile in dirList:
+ self.pending_events.append(Event(handleID, includedFile, 'exists'))
+ self.pending_events.append(Event(handleID, path, 'endExist'))
+ else:
+ self.pending_events.append(Event(handleID, path, 'exists'))
+ if obj != None:
+ self.handles[handleID] = obj
+ return handleID
+
+
+try:
+ from gamin import WatchMonitor, GAMCreated, GAMExists, GAMEndExist, \
+ GAMChanged, GAMDeleted, GAMMoved
+
+ class GaminEvent(Event):
+ """
+ This class provides an event analogous to
+ python-fam events based on gamin sources.
+ """
+ def __init__(self, request_id, filename, code):
+ Event.__init__(self, request_id, filename, code)
+ action_map = {GAMCreated: 'created', GAMExists: 'exists',
+ GAMChanged: 'changed', GAMDeleted: 'deleted',
+ GAMEndExist: 'endExist', GAMMoved: 'moved'}
+ if code in action_map:
+ self.action = action_map[code]
+
+ class Gamin(FileMonitor):
+ """
+ The fam object is a set of callbacks for
+ file alteration events (Gamin support)
+ """
+ def __init__(self, debug=False):
+ FileMonitor.__init__(self, debug)
+ self.mon = WatchMonitor()
+ self.counter = 0
+ self.events = []
+
+ def fileno(self):
+ return self.mon.get_fd()
+
+ def queue(self, path, action, request_id):
+ """queue up the event for later handling"""
+ self.events.append(GaminEvent(request_id, path, action))
+
+ def AddMonitor(self, path, obj):
+ """Add a monitor to path, installing a callback to obj.HandleEvent."""
+ handle = self.counter
+ self.counter += 1
+ mode = os.stat(path)[stat.ST_MODE]
+
+ # Flush queued gamin events
+ while self.mon.event_pending():
+ self.mon.handle_one_event()
+
+ if stat.S_ISDIR(mode):
+ self.mon.watch_directory(path, self.queue, handle)
+ else:
+ self.mon.watch_file(path, self.queue, handle)
+ self.handles[handle] = obj
+ return handle
+
+ def pending(self):
+ return len(self.events) > 0 or self.mon.event_pending()
+
+ def get_event(self):
+ if self.mon.event_pending():
+ self.mon.handle_one_event()
+ return self.events.pop(0)
+
+ available['gamin'] = Gamin
+except ImportError:
+ # fall back to _fam
+ pass
+
+try:
+ import _fam
+ available['fam'] = FamFam
+except ImportError:
+ pass
+available['pseudo'] = Pseudo
+
+for fdrv in ['gamin', 'fam', 'pseudo']:
+ if fdrv in available:
+ available['default'] = available[fdrv]
+ break
diff --git a/build/lib/Bcfg2/Server/Hostbase/__init__.py b/build/lib/Bcfg2/Server/Hostbase/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Hostbase/__init__.py
diff --git a/build/lib/Bcfg2/Server/Hostbase/backends.py b/build/lib/Bcfg2/Server/Hostbase/backends.py
new file mode 100644
index 000000000..aa822409c
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Hostbase/backends.py
@@ -0,0 +1,68 @@
+from django.contrib.auth.models import User
+#from ldapauth import *
+from nisauth import *
+
+__revision__ = '$Revision$'
+
+## class LDAPBackend(object):
+
+## def authenticate(self,username=None,password=None):
+## try:
+
+## l = ldapauth(username,password)
+## temp_pass = User.objects.make_random_password(100)
+## ldap_user = dict(username=l.sAMAccountName,
+## )
+## user_session_obj = dict(
+## email=l.email,
+## first_name=l.name_f,
+## last_name=l.name_l,
+## uid=l.badge_no
+## )
+## #fixme: need to add this user session obj to session
+## #print str(ldap_user)
+## user,created = User.objects.get_or_create(username=username)
+## #print user
+## #print "created " + str(created)
+## return user
+
+## except LDAPAUTHError,e:
+## #print str(e)
+## return None
+
+## def get_user(self,user_id):
+## try:
+## return User.objects.get(pk=user_id)
+## except User.DoesNotExist, e:
+## print str(e)
+## return None
+
+
+class NISBackend(object):
+
+ def authenticate(self, username=None, password=None):
+ try:
+ n = nisauth(username, password)
+ temp_pass = User.objects.make_random_password(100)
+ nis_user = dict(username=username,
+ )
+
+ user_session_obj = dict(
+ email = username + "@mcs.anl.gov",
+ first_name = None,
+ last_name = None,
+ uid = n.uid
+ )
+ user, created = User.objects.get_or_create(username=username)
+
+ return user
+
+ except NISAUTHError, e:
+ return None
+
+
+ def get_user(self, user_id):
+ try:
+ return User.objects.get(pk=user_id)
+ except User.DoesNotExist, e:
+ return None
diff --git a/build/lib/Bcfg2/Server/Hostbase/hostbase/__init__.py b/build/lib/Bcfg2/Server/Hostbase/hostbase/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Hostbase/hostbase/__init__.py
diff --git a/build/lib/Bcfg2/Server/Hostbase/hostbase/admin.py b/build/lib/Bcfg2/Server/Hostbase/hostbase/admin.py
new file mode 100644
index 000000000..70a2233cc
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Hostbase/hostbase/admin.py
@@ -0,0 +1,15 @@
+from django.contrib import admin
+
+from models import Host, Interface, IP, MX, Name, CName, Nameserver, ZoneAddress, Zone, Log, ZoneLog
+
+admin.site.register(Host)
+admin.site.register(Interface)
+admin.site.register(IP)
+admin.site.register(MX)
+admin.site.register(Name)
+admin.site.register(CName)
+admin.site.register(Nameserver)
+admin.site.register(ZoneAddress)
+admin.site.register(Zone)
+admin.site.register(Log)
+admin.site.register(ZoneLog)
diff --git a/build/lib/Bcfg2/Server/Hostbase/hostbase/models.py b/build/lib/Bcfg2/Server/Hostbase/hostbase/models.py
new file mode 100644
index 000000000..3f08a09a0
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Hostbase/hostbase/models.py
@@ -0,0 +1,210 @@
+from django.db import models
+
+# Create your models here.
+class Host(models.Model):
+ NETGROUP_CHOICES = (
+ ('none', 'none'),('cave', 'cave'),('ccst', 'ccst'),('mcs', 'mcs'),
+ ('mmlab', 'mmlab'),('sp', 'sp'),('red', 'red'),('virtual', 'virtual'),
+ ('win', 'win'),('xterm', 'xterm'),('lcrc', 'lcrc'),('anlext', 'anlext'),
+ ('teragrid', 'teragrid')
+ )
+ STATUS_CHOICES = (
+ ('active','active'),('dormant','dormant')
+ )
+ SUPPORT_CHOICES = (
+ ('green','green'),('yellow','yellow'),('red','red')
+ )
+ CLASS_CHOICES = (
+ ('scientific','scientific'),
+ ('operations','operations'),('guest','guest'),
+ ('confidential','confidential'),('public','public')
+ )
+ WHATAMI_CHOICES = (
+ ('aix-3', 'aix-3'), ('aix-4', 'aix-4'),
+ ('aix-5', 'aix-5'), ('baytech', 'baytech'),
+ ('decserver', 'decserver'), ('dialup', 'dialup'),
+ ('dos', 'dos'), ('freebsd', 'freebsd'),
+ ('hpux', 'hpux'), ('irix-5', 'irix-5'),
+ ('irix-6', 'irix-6'), ('linux', 'linux'),
+ ('linux-2', 'linux-2'), ('linux-rh73', 'linux-rh73'),
+ ('linux-rh8', 'linux-rh8'), ('linux-sles8', 'linux-sles8'),
+ ('linux-sles8-64', 'linux-sles8-64'), ('linux-sles8-ia32', 'linux-sles8-ia32'),
+ ('linux-sles8-ia64', 'linux-sles8-ia64'), ('mac', 'mac'),
+ ('network', 'network'), ('next', 'next'),
+ ('none', 'none'), ('osf', 'osf'), ('printer', 'printer'),
+ ('robot', 'robot'), ('solaris-2', 'solaris-2'),
+ ('sun4', 'sun4'), ('unknown', 'unknown'), ('virtual', 'virtual'),
+ ('win31', 'win31'), ('win95', 'win95'),
+ ('winNTs', 'winNTs'), ('winNTw', 'winNTw'),
+ ('win2k', 'win2k'), ('winXP', 'winXP'), ('xterm', 'xterm')
+ )
+ hostname = models.CharField(max_length=64)
+ whatami = models.CharField(max_length=16)
+ netgroup = models.CharField(max_length=32, choices=NETGROUP_CHOICES)
+ security_class = models.CharField('class', max_length=16)
+ support = models.CharField(max_length=8, choices=SUPPORT_CHOICES)
+ csi = models.CharField(max_length=32, blank=True)
+ printq = models.CharField(max_length=32, blank=True)
+ outbound_smtp = models.BooleanField()
+ primary_user = models.EmailField()
+ administrator = models.EmailField(blank=True)
+ location = models.CharField(max_length=16)
+ comments = models.TextField(blank=True)
+ expiration_date = models.DateField(null=True, blank=True)
+ last = models.DateField(auto_now=True, auto_now_add=True)
+ status = models.CharField(max_length=7, choices=STATUS_CHOICES)
+ dirty = models.BooleanField()
+
+ class Admin:
+ list_display = ('hostname', 'last')
+ search_fields = ['hostname']
+
+ def __str__(self):
+ return self.hostname
+
+ def get_logs(self):
+ """
+ Get host's log.
+ """
+ return Log.objects.filter(hostname=self.hostname)
+
+class Interface(models.Model):
+ TYPE_CHOICES = (
+ ('eth', 'ethernet'), ('wl', 'wireless'), ('virtual', 'virtual'), ('myr', 'myr'),
+ ('mgmt', 'mgmt'), ('tape', 'tape'), ('fe', 'fe'), ('ge', 'ge'),
+ )
+ # FIXME: The new admin interface has change a lot.
+ #host = models.ForeignKey(Host, edit_inline=models.TABULAR, num_in_admin=2)
+ host = models.ForeignKey(Host)
+ # FIXME: The new admin interface has change a lot.
+ #mac_addr = models.CharField(max_length=32, core=True)
+ mac_addr = models.CharField(max_length=32)
+ hdwr_type = models.CharField('type', max_length=16, choices=TYPE_CHOICES, blank=True)
+ # FIXME: The new admin interface has change a lot.
+ # radio_admin=True, blank=True)
+ dhcp = models.BooleanField()
+
+ def __str__(self):
+ return self.mac_addr
+
+ class Admin:
+ list_display = ('mac_addr', 'host')
+ search_fields = ['mac_addr']
+
+class IP(models.Model):
+ interface = models.ForeignKey(Interface)
+ # FIXME: The new admin interface has change a lot.
+ # edit_inline=models.TABULAR, num_in_admin=1)
+ #ip_addr = models.IPAddressField(core=True)
+ ip_addr = models.IPAddressField()
+
+ def __str__(self):
+ return self.ip_addr
+
+ class Admin:
+ pass
+
+ class Meta:
+ ordering = ('ip_addr', )
+
+class MX(models.Model):
+ priority = models.IntegerField(blank=True)
+ # FIXME: The new admin interface has change a lot.
+ #mx = models.CharField(max_length=64, blank=True, core=True)
+ mx = models.CharField(max_length=64, blank=True)
+
+ def __str__(self):
+ return (" ".join([str(self.priority), self.mx]))
+
+ class Admin:
+ pass
+
+class Name(models.Model):
+ DNS_CHOICES = (
+ ('global','global'),('internal','ANL internal'),
+ ('private','private')
+ )
+ # FIXME: The new admin interface has change a lot.
+ #ip = models.ForeignKey(IP, edit_inline=models.TABULAR, num_in_admin=1)
+ ip = models.ForeignKey(IP)
+ # FIXME: The new admin interface has change a lot.
+ #name = models.CharField(max_length=64, core=True)
+ name = models.CharField(max_length=64)
+ dns_view = models.CharField(max_length=16, choices=DNS_CHOICES)
+ only = models.BooleanField(blank=True)
+ mxs = models.ManyToManyField(MX)
+
+ def __str__(self):
+ return self.name
+
+ class Admin:
+ pass
+
+class CName(models.Model):
+ # FIXME: The new admin interface has change a lot.
+ #name = models.ForeignKey(Name, edit_inline=models.TABULAR, num_in_admin=1)
+ name = models.ForeignKey(Name)
+ # FIXME: The new admin interface has change a lot.
+ #cname = models.CharField(max_length=64, core=True)
+ cname = models.CharField(max_length=64)
+
+ def __str__(self):
+ return self.cname
+
+ class Admin:
+ pass
+
+class Nameserver(models.Model):
+ name = models.CharField(max_length=64, blank=True)
+
+ def __str__(self):
+ return self.name
+
+ class Admin:
+ pass
+
+class ZoneAddress(models.Model):
+ ip_addr = models.IPAddressField(blank=True)
+
+ def __str__(self):
+ return self.ip_addr
+
+ class Admin:
+ pass
+
+class Zone(models.Model):
+ zone = models.CharField(max_length=64)
+ serial = models.IntegerField()
+ admin = models.CharField(max_length=64)
+ primary_master = models.CharField(max_length=64)
+ expire = models.IntegerField()
+ retry = models.IntegerField()
+ refresh = models.IntegerField()
+ ttl = models.IntegerField()
+ nameservers = models.ManyToManyField(Nameserver, blank=True)
+ mxs = models.ManyToManyField(MX, blank=True)
+ addresses = models.ManyToManyField(ZoneAddress, blank=True)
+ aux = models.TextField(blank=True)
+
+ def __str__(self):
+ return self.zone
+
+ class Admin:
+ pass
+
+class Log(models.Model):
+ # FIXME: Proposal hostname = models.ForeignKey(Host)
+ hostname = models.CharField(max_length=64)
+ date = models.DateTimeField(auto_now=True, auto_now_add=True)
+ log = models.TextField()
+
+ def __str__(self):
+ return self.hostname
+
+class ZoneLog(models.Model):
+ zone = models.CharField(max_length=64)
+ date = models.DateTimeField(auto_now=True, auto_now_add=True)
+ log = models.TextField()
+
+ def __str__(self):
+ return self.zone
diff --git a/build/lib/Bcfg2/Server/Hostbase/hostbase/urls.py b/build/lib/Bcfg2/Server/Hostbase/hostbase/urls.py
new file mode 100644
index 000000000..0ee204abe
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Hostbase/hostbase/urls.py
@@ -0,0 +1,68 @@
+# -*- coding: utf-8 -*-
+from django.conf.urls.defaults import *
+from django.contrib.auth.decorators import login_required
+from django.core.urlresolvers import reverse
+from django.views.generic.create_update import create_object, update_object, delete_object
+from django.views.generic.list_detail import object_detail, object_list
+
+from models import Host, Zone, Log
+
+host_detail_dict = {
+ 'queryset':Host.objects.all(),
+ 'template_name':'host.html',
+ 'template_object_name':'host',
+}
+
+host_delete_dict = {
+ 'model':Host,
+ 'post_delete_redirect':'/',
+}
+
+host_log_detail_dict = host_detail_dict.copy()
+host_log_detail_dict['template_name'] = 'logviewer.html'
+
+host_dns_detail_dict = host_detail_dict.copy()
+host_dns_detail_dict['template_name'] = 'dns.html'
+
+zone_new_dict = {
+ 'model':Zone,
+ 'template_name':'zonenew.html',
+ 'post_save_redirect':'../%(id)s',
+}
+
+zones_list_dict = {
+ 'queryset':Zone.objects.all(),
+ 'template_name':'zones.html',
+ 'template_object_name':'zone',
+}
+
+zone_detail_dict = {
+ 'queryset':Zone.objects.all(),
+ 'template_name':'zoneview.html',
+ 'template_object_name':'zone',
+}
+
+urlpatterns = patterns('',
+ (r'^(?P<object_id>\d+)/$', object_detail, host_detail_dict, 'host_detail'),
+ (r'^zones/new/$', login_required(create_object), zone_new_dict, 'zone_new'),
+ (r'^zones/(?P<object_id>\d+)/edit', login_required(update_object), zone_new_dict, 'zone_edit'),
+ (r'^zones/$', object_list, zones_list_dict, 'zone_list'),
+ (r'^zones/(?P<object_id>\d+)/$', object_detail, zone_detail_dict, 'zone_detail'),
+ (r'^zones/(?P<object_id>\d+)/$', object_detail, zone_detail_dict, 'zone_detail'),
+ (r'^\d+/logs/(?P<object_id>\d+)/', object_detail, { 'queryset':Log.objects.all() }, 'log_detail'),
+ (r'^(?P<object_id>\d+)/logs/', object_detail, host_log_detail_dict, 'host_log_list'),
+ (r'^(?P<object_id>\d+)/dns', object_detail, host_dns_detail_dict, 'host_dns_list'),
+ (r'^(?P<object_id>\d+)/remove', login_required(delete_object), host_delete_dict, 'host_delete'),
+)
+
+urlpatterns += patterns('Bcfg2.Server.Hostbase.hostbase.views',
+ (r'^$', 'search'),
+ (r'^(?P<host_id>\d+)/edit', 'edit'),
+ (r'^(?P<host_id>\d+)/(?P<item>\D+)/(?P<item_id>\d+)/confirm', 'confirm'),
+ (r'^(?P<host_id>\d+)/(?P<item>\D+)/(?P<item_id>\d+)/(?P<name_id>\d+)/confirm', 'confirm'),
+ (r'^(?P<host_id>\d+)/dns/edit', 'dnsedit'),
+ (r'^new', 'new'),
+ (r'^(?P<host_id>\d+)/copy', 'copy'),
+# (r'^hostinfo', 'hostinfo'),
+ (r'^zones/(?P<zone_id>\d+)/(?P<item>\D+)/(?P<item_id>\d+)/confirm', 'confirm'),
+)
diff --git a/build/lib/Bcfg2/Server/Hostbase/hostbase/views.py b/build/lib/Bcfg2/Server/Hostbase/hostbase/views.py
new file mode 100644
index 000000000..ff1d4710d
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Hostbase/hostbase/views.py
@@ -0,0 +1,972 @@
+"""Views.py
+Contains all the views associated with the hostbase app
+Also has does form validation
+"""
+__revision__ = "$Revision: $"
+
+from django.http import HttpResponse, HttpResponseRedirect
+
+from django.contrib.auth.decorators import login_required
+from django.contrib.auth import logout
+from django.template import RequestContext
+from Bcfg2.Server.Hostbase.hostbase.models import *
+from datetime import date
+from django.db import connection
+from django.shortcuts import render_to_response
+from django import forms
+from Bcfg2.Server.Hostbase import settings, regex
+import re, copy
+
+attribs = ['hostname', 'whatami', 'netgroup', 'security_class', 'support',
+ 'csi', 'printq', 'primary_user', 'administrator', 'location',
+ 'status', 'comments']
+
+zoneattribs = ['zone', 'admin', 'primary_master', 'expire', 'retry',
+ 'refresh', 'ttl', 'aux']
+
+dispatch = {'mac_addr':'i.mac_addr LIKE \'%%%%%s%%%%\'',
+ 'ip_addr':'p.ip_addr LIKE \'%%%%%s%%%%\'',
+ 'name':'n.name LIKE \'%%%%%s%%%%\'',
+## 'hostname':'n.name LIKE \'%%%%%s%%%%\'',
+## 'cname':'n.name LIKE \'%%%%%s%%%%\'',
+ 'mx':'m.mx LIKE \'%%%%%s%%%%\'',
+ 'dns_view':'n.dns_view = \'%s\'',
+ 'hdwr_type':'i.hdwr_type = \'%s\'',
+ 'dhcp':'i.dhcp = \'%s\''}
+
+def search(request):
+ """Search for hosts in the database
+ If more than one field is entered, logical AND is used
+ """
+ if 'sub' in request.GET:
+ querystring = """SELECT DISTINCT h.hostname, h.id, h.status
+ FROM (((((hostbase_host h
+ INNER JOIN hostbase_interface i ON h.id = i.host_id)
+ INNER JOIN hostbase_ip p ON i.id = p.interface_id)
+ INNER JOIN hostbase_name n ON p.id = n.ip_id)
+ INNER JOIN hostbase_name_mxs x ON n.id = x.name_id)
+ INNER JOIN hostbase_mx m ON m.id = x.mx_id)
+ LEFT JOIN hostbase_cname c ON n.id = c.name_id
+ WHERE """
+
+ _and = False
+ for field in request.POST:
+ if request.POST[field] and field == 'hostname':
+ if _and:
+ querystring += ' AND '
+ querystring += 'n.name LIKE \'%%%%%s%%%%\' or c.cname LIKE \'%%%%%s%%%%\'' % (request.POST[field], request.POST[field])
+ _and = True
+ elif request.POST[field] and field in dispatch:
+ if _and:
+ querystring += ' AND '
+ querystring += dispatch[field] % request.POST[field]
+ _and = True
+ elif request.POST[field]:
+ if _and:
+ querystring += ' AND '
+ querystring += "h.%s LIKE \'%%%%%s%%%%\'" % (field, request.POST[field])
+ _and = True
+
+ if not _and:
+ cursor = connection.cursor()
+ cursor.execute("""SELECT hostname, id, status
+ FROM hostbase_host ORDER BY hostname""")
+ results = cursor.fetchall()
+ else:
+ querystring += " ORDER BY h.hostname"
+ cursor = connection.cursor()
+ cursor.execute(querystring)
+ results = cursor.fetchall()
+
+ return render_to_response('results.html',
+ {'hosts': results,
+ 'logged_in': request.session.get('_auth_user_id', False)},
+ context_instance = RequestContext(request))
+ else:
+ return render_to_response('search.html',
+ {'TYPE_CHOICES': Interface.TYPE_CHOICES,
+ 'DNS_CHOICES': Name.DNS_CHOICES,
+ 'yesno': [(1, 'yes'), (0, 'no')],
+ 'logged_in': request.session.get('_auth_user_id', False)},
+ context_instance = RequestContext(request))
+
+
+def gethostdata(host_id, dnsdata=False):
+ """Grabs the necessary data about a host
+ Replaces a lot of repeated code"""
+ hostdata = {}
+ hostdata['ips'] = {}
+ hostdata['names'] = {}
+ hostdata['cnames'] = {}
+ hostdata['mxs'] = {}
+ hostdata['host'] = Host.objects.get(id=host_id)
+ hostdata['interfaces'] = hostdata['host'].interface_set.all()
+ for interface in hostdata['interfaces']:
+ hostdata['ips'][interface.id] = interface.ip_set.all()
+ if dnsdata:
+ for ip in hostdata['ips'][interface.id]:
+ hostdata['names'][ip.id] = ip.name_set.all()
+ for name in hostdata['names'][ip.id]:
+ hostdata['cnames'][name.id] = name.cname_set.all()
+ hostdata['mxs'][name.id] = name.mxs.all()
+ return hostdata
+
+def fill(template, hostdata, dnsdata=False):
+ """Fills a generic template
+ Replaces a lot of repeated code"""
+ if dnsdata:
+ template.names = hostdata['names']
+ template.cnames = hostdata['cnames']
+ template.mxs = hostdata['mxs']
+ template.host = hostdata['host']
+ template.interfaces = hostdata['interfaces']
+ template.ips = hostdata['ips']
+ return template
+
+def edit(request, host_id):
+ """edit general host information"""
+ manipulator = Host.ChangeManipulator(host_id)
+ changename = False
+ if request.method == 'POST':
+ host = Host.objects.get(id=host_id)
+ before = host.__dict__.copy()
+ if request.POST['hostname'] != host.hostname:
+ oldhostname = host.hostname.split(".")[0]
+ changename = True
+ interfaces = host.interface_set.all()
+ old_interfaces = [interface.__dict__.copy() for interface in interfaces]
+
+ new_data = request.POST.copy()
+
+ errors = manipulator.get_validation_errors(new_data)
+ if not errors:
+
+ # somehow keep track of multiple interface change manipulators
+ # as well as multiple ip chnage manipulators??? (add manipulators???)
+ # change to many-to-many??????
+
+ # dynamically look up mx records?
+ text = ''
+
+ for attrib in attribs:
+ if host.__dict__[attrib] != request.POST[attrib]:
+ text = do_log(text, attrib, host.__dict__[attrib], request.POST[attrib])
+ host.__dict__[attrib] = request.POST[attrib]
+
+ if 'expiration_date' in request.POST:
+ ymd = request.POST['expiration_date'].split("-")
+ if date(int(ymd[0]), int(ymd[1]), int(ymd[2])) != host.__dict__['expiration_date']:
+ text = do_log(text, 'expiration_date', host.__dict__['expiration_date'],
+ request.POST['expiration_date'])
+ host.__dict__['expiration_date'] = date(int(ymd[0]), int(ymd[1]), int(ymd[2]))
+
+ for inter in interfaces:
+ changetype = False
+ ips = IP.objects.filter(interface=inter.id)
+ if inter.mac_addr != request.POST['mac_addr%d' % inter.id]:
+ text = do_log(text, 'mac_addr', inter.mac_addr, request.POST['mac_addr%d' % inter.id])
+ inter.mac_addr = request.POST['mac_addr%d' % inter.id].lower().replace('-',':')
+ if inter.hdwr_type != request.POST['hdwr_type%d' % inter.id]:
+ oldtype = inter.hdwr_type
+ text = do_log(text, 'hdwr_type', oldtype, request.POST['hdwr_type%d' % inter.id])
+ inter.hdwr_type = request.POST['hdwr_type%d' % inter.id]
+ changetype = True
+ if (('dhcp%d' % inter.id) in request.POST and not inter.dhcp or
+ not ('dhcp%d' % inter.id) in request.POST and inter.dhcp):
+ text = do_log(text, 'dhcp', inter.dhcp, int(not inter.dhcp))
+ inter.dhcp = not inter.dhcp
+ for ip in ips:
+ names = ip.name_set.all()
+ if not ip.ip_addr == request.POST['ip_addr%d' % ip.id]:
+ oldip = ip.ip_addr
+ oldsubnet = oldip.split(".")[2]
+ ip.ip_addr = request.POST['ip_addr%d' % ip.id]
+ ip.save()
+ text = do_log(text, 'ip_addr', oldip, ip.ip_addr)
+ for name in names:
+ if name.name.split(".")[0].endswith('-%s' % oldsubnet):
+ name.name = name.name.replace('-%s' % oldsubnet, '-%s' % ip.ip_addr.split(".")[2])
+ name.save()
+ if changetype:
+ for name in names:
+ if name.name.split(".")[0].endswith('-%s' % oldtype):
+ name.name = name.name.replace('-%s' % oldtype, '-%s' % inter.hdwr_type)
+ name.save()
+ if changename:
+ for name in names:
+ if name.name.startswith(oldhostname):
+ name.name = name.name.replace(oldhostname, host.hostname.split(".")[0])
+ name.save()
+ if request.POST['%dip_addr' % inter.id]:
+ mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX)
+ if created:
+ mx.save()
+ new_ip = IP(interface=inter, ip_addr=request.POST['%dip_addr' % inter.id])
+ new_ip.save()
+ text = do_log(text, '*new*', 'ip_addr', new_ip.ip_addr)
+ new_name = "-".join([host.hostname.split(".")[0],
+ new_ip.ip_addr.split(".")[2]])
+ new_name += "." + host.hostname.split(".", 1)[1]
+ name = Name(ip=new_ip, name=new_name,
+ dns_view='global', only=False)
+ name.save()
+ name.mxs.add(mx)
+ new_name = "-".join([host.hostname.split(".")[0],
+ inter.hdwr_type])
+ new_name += "." + host.hostname.split(".", 1)[1]
+ name = Name(ip=new_ip, name=new_name,
+ dns_view='global', only=False)
+ name.save()
+ name.mxs.add(mx)
+ name = Name(ip=new_ip, name=host.hostname,
+ dns_view='global', only=False)
+ name.save()
+ name.mxs.add(mx)
+ inter.save()
+ if request.POST['mac_addr_new']:
+ new_inter = Interface(host=host,
+ mac_addr=request.POST['mac_addr_new'].lower().replace('-',':'),
+ hdwr_type=request.POST['hdwr_type_new'],
+ dhcp=request.POST['dhcp_new'])
+ text = do_log(text, '*new*', 'mac_addr', new_inter.mac_addr)
+ new_inter.save()
+ if request.POST['mac_addr_new'] and request.POST['ip_addr_new']:
+ mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX)
+ if created:
+ mx.save()
+ new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new'])
+ new_ip.save()
+ text = do_log(text, '*new*', 'ip_addr', new_ip.ip_addr)
+ new_name = "-".join([host.hostname.split(".")[0],
+ new_ip.ip_addr.split(".")[2]])
+ new_name += "." + host.hostname.split(".", 1)[1]
+ name = Name(ip=new_ip, name=new_name,
+ dns_view='global', only=False)
+ name.save()
+ name.mxs.add(mx)
+ new_name = "-".join([host.hostname.split(".")[0],
+ new_inter.hdwr_type])
+ new_name += "." + host.hostname.split(".", 1)[1]
+ name = Name(ip=new_ip, name=new_name,
+ dns_view='global', only=False)
+ name.save()
+ name.mxs.add(mx)
+ name = Name(ip=new_ip, name=host.hostname,
+ dns_view='global', only=False)
+ name.save()
+ name.mxs.add(mx)
+ if request.POST['ip_addr_new'] and not request.POST['mac_addr_new']:
+ mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX)
+ if created:
+ mx.save()
+ new_inter = Interface(host=host, mac_addr="",
+ hdwr_type=request.POST['hdwr_type_new'],
+ dhcp=False)
+ new_inter.save()
+ new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new'])
+ new_ip.save()
+ text = do_log(text, '*new*', 'ip_addr', new_ip.ip_addr)
+ new_name = "-".join([host.hostname.split(".")[0],
+ new_ip.ip_addr.split(".")[2]])
+ new_name += "." + host.hostname.split(".", 1)[1]
+ name = Name(ip=new_ip, name=new_name,
+ dns_view='global', only=False)
+ name.save()
+ name.mxs.add(mx)
+ new_name = "-".join([host.hostname.split(".")[0],
+ new_inter.hdwr_type])
+ new_name += "." + host.hostname.split(".", 1)[1]
+ name = Name(ip=new_ip, name=new_name,
+ dns_view='global', only=False)
+ name.save()
+ name.mxs.add(mx)
+ name = Name(ip=new_ip, name=host.hostname,
+ dns_view='global', only=False)
+ name.save()
+ name.mxs.add(mx)
+ if text:
+ log = Log(hostname=host.hostname, log=text)
+ log.save()
+ host.save()
+ return HttpResponseRedirect('/hostbase/%s/' % host.id)
+ else:
+ return render_to_response('errors.html',
+ {'failures': errors,
+ 'logged_in': request.session.get('_auth_user_id', False)},
+ context_instance = RequestContext(request))
+ else:
+ host = Host.objects.get(id=host_id)
+ interfaces = []
+ for interface in host.interface_set.all():
+ interfaces.append([interface, interface.ip_set.all()])
+ return render_to_response('edit.html',
+ {'host': host,
+ 'interfaces': interfaces,
+ 'TYPE_CHOICES': Interface.TYPE_CHOICES,
+ 'logged_in': request.session.get('_auth_user_id', False)},
+ context_instance = RequestContext(request))
+
+def confirm(request, item, item_id, host_id=None, name_id=None, zone_id=None):
+ """Asks if the user is sure he/she wants to remove an item"""
+ if 'sub' in request.GET:
+ if item == 'interface':
+ for ip in Interface.objects.get(id=item_id).ip_set.all():
+ for name in ip.name_set.all():
+ name.cname_set.all().delete()
+ ip.name_set.all().delete()
+ Interface.objects.get(id=item_id).ip_set.all().delete()
+ Interface.objects.get(id=item_id).delete()
+ elif item=='ip':
+ for name in IP.objects.get(id=item_id).name_set.all():
+ name.cname_set.all().delete()
+ IP.objects.get(id=item_id).name_set.all().delete()
+ IP.objects.get(id=item_id).delete()
+ elif item=='cname':
+ CName.objects.get(id=item_id).delete()
+ elif item=='mx':
+ mx = MX.objects.get(id=item_id)
+ Name.objects.get(id=name_id).mxs.remove(mx)
+ elif item=='name':
+ Name.objects.get(id=item_id).cname_set.all().delete()
+ Name.objects.get(id=item_id).delete()
+ elif item=='nameserver':
+ nameserver = Nameserver.objects.get(id=item_id)
+ Zone.objects.get(id=zone_id).nameservers.remove(nameserver)
+ elif item=='zonemx':
+ mx = MX.objects.get(id=item_id)
+ Zone.objects.get(id=zone_id).mxs.remove(mx)
+ elif item=='address':
+ address = ZoneAddress.objects.get(id=item_id)
+ Zone.objects.get(id=zone_id).addresses.remove(address)
+ if item == 'cname' or item == 'mx' or item == 'name':
+ return HttpResponseRedirect('/hostbase/%s/dns/edit' % host_id)
+ elif item == 'nameserver' or item == 'zonemx' or item == 'address':
+ return HttpResponseRedirect('/hostbase/zones/%s/edit' % zone_id)
+ else:
+ return HttpResponseRedirect('/hostbase/%s/edit' % host_id)
+ else:
+ interface = None
+ ips = []
+ names = []
+ cnames = []
+ mxs = []
+ zonemx = None
+ nameserver = None
+ address = None
+ if item == 'interface':
+ interface = Interface.objects.get(id=item_id)
+ ips = interface.ip_set.all()
+ for ip in ips:
+ for name in ip.name_set.all():
+ names.append((ip.id, name))
+ for cname in name.cname_set.all():
+ cnames.append((name.id, cname))
+ for mx in name.mxs.all():
+ mxs.append((name.id, mx))
+ elif item=='ip':
+ ips = [IP.objects.get(id=item_id)]
+ for name in ips[0].name_set.all():
+ names.append((ips[0].id, name))
+ for cname in name.cname_set.all():
+ cnames.append((name.id, cname))
+ for mx in name.mxs.all():
+ mxs.append((name.id, mx))
+ elif item=='name':
+ names = [Name.objects.get(id=item_id)]
+ for cname in names[0].cname_set.all():
+ cnames.append((names[0].id, cname))
+ for mx in names[0].mxs.all():
+ mxs.append((names[0].id, mx))
+ elif item=='cname':
+ cnames = [CName.objects.get(id=item_id)]
+ elif item=='mx':
+ mxs = [MX.objects.get(id=item_id)]
+ elif item=='zonemx':
+ zonemx = MX.objects.get(id=item_id)
+ elif item=='nameserver':
+ nameserver = Nameserver.objects.get(id=item_id)
+ elif item=='address':
+ address = ZoneAddress.objects.get(id=item_id)
+ return render_to_response('confirm.html',
+ {'interface': interface,
+ 'ips': ips,
+ 'names': names,
+ 'cnames': cnames,
+ 'id': item_id,
+ 'type': item,
+ 'host_id': host_id,
+ 'mxs': mxs,
+ 'zonemx': zonemx,
+ 'nameserver': nameserver,
+ 'address': address,
+ 'zone_id': zone_id,
+ 'logged_in': request.session.get('_auth_user_id', False)},
+ context_instance = RequestContext(request))
+
+def dnsedit(request, host_id):
+ """Edits specific DNS information
+ Data is validated before committed to the database"""
+ text = ''
+ if 'sub' in request.GET:
+ hostdata = gethostdata(host_id, True)
+ for ip in hostdata['names']:
+ ipaddr = IP.objects.get(id=ip)
+ ipaddrstr = ipaddr.__str__()
+ for name in hostdata['cnames']:
+ for cname in hostdata['cnames'][name]:
+ if regex.host.match(request.POST['cname%d' % cname.id]):
+ text = do_log(text, 'cname', cname.cname, request.POST['cname%d' % cname.id])
+ cname.cname = request.POST['cname%d' % cname.id]
+ cname.save()
+ for name in hostdata['mxs']:
+ for mx in hostdata['mxs'][name]:
+ if (mx.priority != request.POST['priority%d' % mx.id] and mx.mx != request.POST['mx%d' % mx.id]):
+ text = do_log(text, 'mx', ' '.join([str(mx.priority), str(mx.mx)]),
+ ' '.join([request.POST['priority%d' % mx.id], request.POST['mx%d' % mx.id]]))
+ nameobject = Name.objects.get(id=name)
+ nameobject.mxs.remove(mx)
+ newmx, created = MX.objects.get_or_create(priority=request.POST['priority%d' % mx.id], mx=request.POST['mx%d' % mx.id])
+ if created:
+ newmx.save()
+ nameobject.mxs.add(newmx)
+ nameobject.save()
+ for name in hostdata['names'][ip]:
+ name.name = request.POST['name%d' % name.id]
+ name.dns_view = request.POST['dns_view%d' % name.id]
+ if (request.POST['%dcname' % name.id] and
+ regex.host.match(request.POST['%dcname' % name.id])):
+ cname = CName(name=name,
+ cname=request.POST['%dcname' % name.id])
+ text = do_log(text, '*new*', 'cname', cname.cname)
+ cname.save()
+ if (request.POST['%dpriority' % name.id] and
+ request.POST['%dmx' % name.id]):
+ mx, created = MX.objects.get_or_create(priority=request.POST['%dpriority' % name.id],
+ mx=request.POST['%dmx' % name.id])
+ if created:
+ mx.save()
+ text = do_log(text, '*new*', 'mx',
+ ' '.join([request.POST['%dpriority' % name.id],
+ request.POST['%dmx' % name.id]]))
+ name.mxs.add(mx)
+ name.save()
+ if request.POST['%sname' % ipaddrstr]:
+ name = Name(ip=ipaddr,
+ dns_view=request.POST['%sdns_view' % ipaddrstr],
+ name=request.POST['%sname' % ipaddrstr], only=False)
+ text = do_log(text, '*new*', 'name', name.name)
+ name.save()
+ if (request.POST['%scname' % ipaddrstr] and
+ regex.host.match(request.POST['%scname' % ipaddrstr])):
+ cname = CName(name=name,
+ cname=request.POST['%scname' % ipaddrstr])
+ text = do_log(text, '*new*', 'cname', cname.cname)
+ cname.save()
+ if (request.POST['%smx' % ipaddrstr] and
+ request.POST['%spriority' % ipaddrstr]):
+ mx, created = MX.objects.get_or_create(priority=request.POST['%spriority' % ipaddrstr],
+ mx=request.POST['%smx' % ipaddrstr])
+ if created:
+ mx.save()
+ text = do_log(text, '*new*', 'mx',
+ ' '.join([request.POST['%spriority' % ipaddrstr], request.POST['%smx' % ipaddrstr]]))
+ name.mxs.add(mx)
+ if text:
+ log = Log(hostname=hostdata['host'].hostname, log=text)
+ log.save()
+ return HttpResponseRedirect('/hostbase/%s/dns' % host_id)
+ else:
+ host = Host.objects.get(id=host_id)
+ ips = []
+ info = []
+ cnames = []
+ mxs = []
+ interfaces = host.interface_set.all()
+ for interface in host.interface_set.all():
+ ips.extend(interface.ip_set.all())
+ for ip in ips:
+ info.append([ip, ip.name_set.all()])
+ for name in ip.name_set.all():
+ cnames.extend(name.cname_set.all())
+ mxs.append((name.id, name.mxs.all()))
+ return render_to_response('dnsedit.html',
+ {'host': host,
+ 'info': info,
+ 'cnames': cnames,
+ 'mxs': mxs,
+ 'request': request,
+ 'interfaces': interfaces,
+ 'DNS_CHOICES': Name.DNS_CHOICES,
+ 'logged_in': request.session.get('_auth_user_id', False)},
+ context_instance = RequestContext(request))
+
+def new(request):
+ """Function for creating a new host in hostbase
+ Data is validated before committed to the database"""
+ if 'sub' in request.GET:
+ try:
+ Host.objects.get(hostname=request.POST['hostname'].lower())
+ return render_to_response('errors.html',
+ {'failures': ['%s already exists in hostbase' % request.POST['hostname']],
+ 'logged_in': request.session.get('_auth_user_id', False)},
+ context_instance = RequestContext(request))
+ except:
+ pass
+ if not validate(request, True):
+ if not request.POST['ip_addr_new'] and not request.POST['ip_addr_new2']:
+ return render_to_response('errors.html',
+ {'failures': ['ip_addr: You must enter an ip address'],
+ 'logged_in': request.session.get('_auth_user_id', False)},
+ context_instance = RequestContext(request))
+ host = Host()
+ # this is the stuff that validate() should take care of
+ # examine the check boxes for any changes
+ host.outbound_smtp = 'outbound_smtp' in request.POST
+ for attrib in attribs:
+ if attrib in request.POST:
+ host.__dict__[attrib] = request.POST[attrib].lower()
+ if 'comments' in request.POST:
+ host.comments = request.POST['comments']
+ if 'expiration_date' in request.POST:
+# ymd = request.POST['expiration_date'].split("-")
+# host.__dict__['expiration_date'] = date(int(ymd[0]), int(ymd[1]), int(ymd[2]))
+ host.__dict__['expiration_date'] = date(2000, 1, 1)
+ host.status = 'active'
+ host.save()
+ else:
+ return render_to_response('errors.html',
+ {'failures': validate(request, True),
+ 'logged_in': request.session.get('_auth_user_id', False)},
+ context_instance = RequestContext(request))
+
+ if request.POST['mac_addr_new']:
+ new_inter = Interface(host=host,
+ mac_addr = request.POST['mac_addr_new'].lower().replace('-',':'),
+ hdwr_type = request.POST['hdwr_type_new'],
+ dhcp = 'dhcp_new' in request.POST)
+ new_inter.save()
+ if request.POST['mac_addr_new'] and request.POST['ip_addr_new']:
+ new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new'])
+# Change all this things. Use a "post_save" signal handler for model Host to create all sociate models
+# and use a generi view.
+ new_ip.save()
+ mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX)
+ if created:
+ mx.save()
+ new_name = "-".join([host.hostname.split(".")[0],
+ new_ip.ip_addr.split(".")[2]])
+ new_name += "." + host.hostname.split(".", 1)[1]
+ name = Name(ip=new_ip, name=new_name, dns_view='global', only=False)
+ name.save()
+ name.mxs.add(mx)
+ new_name = "-".join([host.hostname.split(".")[0],
+ new_inter.hdwr_type])
+ new_name += "." + host.hostname.split(".", 1)[1]
+ name = Name(ip=new_ip, name=new_name,
+ dns_view='global', only=False)
+ name.save()
+ name.mxs.add(mx)
+ name = Name(ip=new_ip, name=host.hostname,
+ dns_view='global', only=False)
+ name.save()
+ name.mxs.add(mx)
+ if request.POST['ip_addr_new'] and not request.POST['mac_addr_new']:
+ new_inter = Interface(host=host,
+ mac_addr="",
+ hdwr_type=request.POST['hdwr_type_new'],
+ dhcp=False)
+ new_inter.save()
+ new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new'])
+ new_ip.save()
+ mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX)
+ if created:
+ mx.save()
+ new_name = "-".join([host.hostname.split(".")[0],
+ new_ip.ip_addr.split(".")[2]])
+ new_name += "." + host.hostname.split(".", 1)[1]
+ name = Name(ip=new_ip, name=new_name,
+ dns_view='global', only=False)
+ name.save()
+ name.mxs.add(mx)
+ new_name = "-".join([host.hostname.split(".")[0],
+ new_inter.hdwr_type])
+ new_name += "." + host.hostname.split(".", 1)[1]
+ name = Name(ip=new_ip, name=new_name,
+ dns_view='global', only=False)
+ name.save()
+ name.mxs.add(mx)
+ name = Name(ip=new_ip, name=host.hostname,
+ dns_view='global', only=False)
+ name.save()
+ name.mxs.add(mx)
+ if request.POST['mac_addr_new2']:
+ new_inter = Interface(host=host,
+ mac_addr = request.POST['mac_addr_new2'].lower().replace('-',':'),
+ hdwr_type = request.POST['hdwr_type_new2'],
+ dhcp = 'dhcp_new2' in request.POST)
+ new_inter.save()
+ if request.POST['mac_addr_new2'] and request.POST['ip_addr_new2']:
+ new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new2'])
+ new_ip.save()
+ mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX)
+ if created:
+ mx.save()
+ new_name = "-".join([host.hostname.split(".")[0],
+ new_ip.ip_addr.split(".")[2]])
+ new_name += "." + host.hostname.split(".", 1)[1]
+ name = Name(ip=new_ip, name=new_name,
+ dns_view='global', only=False)
+ name.save()
+ name.mxs.add(mx)
+ new_name = "-".join([host.hostname.split(".")[0],
+ new_inter.hdwr_type])
+ new_name += "." + host.hostname.split(".", 1)[1]
+ name = Name(ip=new_ip, name=new_name,
+ dns_view='global', only=False)
+ name.save()
+ name.mxs.add(mx)
+ name = Name(ip=new_ip, name=host.hostname,
+ dns_view='global', only=False)
+ name.save()
+ name.mxs.add(mx)
+ if request.POST['ip_addr_new2'] and not request.POST['mac_addr_new2']:
+ new_inter = Interface(host=host,
+ mac_addr="",
+ hdwr_type=request.POST['hdwr_type_new2'],
+ dhcp=False)
+ new_inter.save()
+ new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new2'])
+ new_ip.save()
+ mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX)
+ if created:
+ mx.save()
+ new_name = "-".join([host.hostname.split(".")[0],
+ new_ip.ip_addr.split(".")[2]])
+ new_name += "." + host.hostname.split(".", 1)[1]
+ name = Name(ip=new_ip, name=new_name,
+ dns_view='global', only=False)
+ name.save()
+ name.mxs.add(mx)
+ new_name = "-".join([host.hostname.split(".")[0],
+ new_inter.hdwr_type])
+ new_name += "." + host.hostname.split(".", 1)[1]
+ name = Name(ip=new_ip, name=new_name,
+ dns_view='global', only=False)
+ name.save()
+ name.mxs.add(mx)
+ name = Name(ip=new_ip, name=host.hostname,
+ dns_view='global', only=False)
+ name.save()
+ name.mxs.add(mx)
+ host.save()
+ return HttpResponseRedirect('/hostbase/%s/' % host.id)
+ else:
+ return render_to_response('new.html',
+ {'TYPE_CHOICES': Interface.TYPE_CHOICES,
+ 'NETGROUP_CHOICES': Host.NETGROUP_CHOICES,
+ 'CLASS_CHOICES': Host.CLASS_CHOICES,
+ 'SUPPORT_CHOICES': Host.SUPPORT_CHOICES,
+ 'WHATAMI_CHOICES': Host.WHATAMI_CHOICES,
+ 'logged_in': request.session.get('_auth_user_id', False)},
+ context_instance = RequestContext(request))
+
+def copy(request, host_id):
+ """Function for creating a new host in hostbase
+ Data is validated before committed to the database"""
+ if 'sub' in request.GET:
+ try:
+ Host.objects.get(hostname=request.POST['hostname'].lower())
+ return render_to_response('errors.html',
+ {'failures': ['%s already exists in hostbase' % request.POST['hostname']],
+ 'logged_in': request.session.get('_auth_user_id', False)},
+ context_instance = RequestContext(request))
+ except:
+ pass
+ if not validate(request, True):
+ if not request.POST['ip_addr_new'] and not request.POST['ip_addr_new2']:
+ return render_to_response('errors.html',
+ {'failures': ['ip_addr: You must enter an ip address'],
+ 'logged_in': request.session.get('_auth_user_id', False)},
+ context_instance = RequestContext(request))
+ host = Host()
+ # this is the stuff that validate() should take care of
+ # examine the check boxes for any changes
+ host.outbound_smtp = 'outbound_smtp' in request.POST
+ for attrib in attribs:
+ if attrib in request.POST:
+ host.__dict__[attrib] = request.POST[attrib].lower()
+ if 'comments' in request.POST:
+ host.comments = request.POST['comments']
+ if 'expiration_date' in request.POST:
+# ymd = request.POST['expiration_date'].split("-")
+# host.__dict__['expiration_date'] = date(int(ymd[0]), int(ymd[1]), int(ymd[2]))
+ host.__dict__['expiration_date'] = date(2000, 1, 1)
+ host.status = 'active'
+ host.save()
+ else:
+ return render_to_response('errors.html',
+ {'failures': validate(request, True),
+ 'logged_in': request.session.get('_auth_user_id', False)},
+ context_instance = RequestContext(request))
+
+ if request.POST['mac_addr_new']:
+ new_inter = Interface(host=host,
+ mac_addr = request.POST['mac_addr_new'].lower().replace('-',':'),
+ hdwr_type = request.POST['hdwr_type_new'],
+ dhcp = 'dhcp_new' in request.POST)
+ new_inter.save()
+ if request.POST['mac_addr_new'] and request.POST['ip_addr_new']:
+ new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new'])
+ new_ip.save()
+ mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX)
+ if created:
+ mx.save()
+ new_name = "-".join([host.hostname.split(".")[0],
+ new_ip.ip_addr.split(".")[2]])
+ new_name += "." + host.hostname.split(".", 1)[1]
+ name = Name(ip=new_ip, name=new_name, dns_view='global', only=False)
+ name.save()
+ name.mxs.add(mx)
+ new_name = "-".join([host.hostname.split(".")[0],
+ new_inter.hdwr_type])
+ new_name += "." + host.hostname.split(".", 1)[1]
+ name = Name(ip=new_ip, name=new_name,
+ dns_view='global', only=False)
+ name.save()
+ name.mxs.add(mx)
+ name = Name(ip=new_ip, name=host.hostname,
+ dns_view='global', only=False)
+ name.save()
+ name.mxs.add(mx)
+ if request.POST['ip_addr_new'] and not request.POST['mac_addr_new']:
+ new_inter = Interface(host=host,
+ mac_addr="",
+ hdwr_type=request.POST['hdwr_type_new'],
+ dhcp=False)
+ new_inter.save()
+ new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new'])
+ new_ip.save()
+ mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX)
+ if created:
+ mx.save()
+ new_name = "-".join([host.hostname.split(".")[0],
+ new_ip.ip_addr.split(".")[2]])
+ new_name += "." + host.hostname.split(".", 1)[1]
+ name = Name(ip=new_ip, name=new_name,
+ dns_view='global', only=False)
+ name.save()
+ name.mxs.add(mx)
+ new_name = "-".join([host.hostname.split(".")[0],
+ new_inter.hdwr_type])
+ new_name += "." + host.hostname.split(".", 1)[1]
+ name = Name(ip=new_ip, name=new_name,
+ dns_view='global', only=False)
+ name.save()
+ name.mxs.add(mx)
+ name = Name(ip=new_ip, name=host.hostname,
+ dns_view='global', only=False)
+ name.save()
+ name.mxs.add(mx)
+ if request.POST['mac_addr_new2']:
+ new_inter = Interface(host=host,
+ mac_addr = request.POST['mac_addr_new2'].lower().replace('-',':'),
+ hdwr_type = request.POST['hdwr_type_new2'],
+ dhcp = 'dhcp_new2' in request.POST)
+ new_inter.save()
+ if request.POST['mac_addr_new2'] and request.POST['ip_addr_new2']:
+ new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new2'])
+ new_ip.save()
+ mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX)
+ if created:
+ mx.save()
+ new_name = "-".join([host.hostname.split(".")[0],
+ new_ip.ip_addr.split(".")[2]])
+ new_name += "." + host.hostname.split(".", 1)[1]
+ name = Name(ip=new_ip, name=new_name,
+ dns_view='global', only=False)
+ name.save()
+ name.mxs.add(mx)
+ new_name = "-".join([host.hostname.split(".")[0],
+ new_inter.hdwr_type])
+ new_name += "." + host.hostname.split(".", 1)[1]
+ name = Name(ip=new_ip, name=new_name,
+ dns_view='global', only=False)
+ name.save()
+ name.mxs.add(mx)
+ name = Name(ip=new_ip, name=host.hostname,
+ dns_view='global', only=False)
+ name.save()
+ name.mxs.add(mx)
+ if request.POST['ip_addr_new2'] and not request.POST['mac_addr_new2']:
+ new_inter = Interface(host=host,
+ mac_addr="",
+ hdwr_type=request.POST['hdwr_type_new2'],
+ dhcp=False)
+ new_inter.save()
+ new_ip = IP(interface=new_inter, ip_addr=request.POST['ip_addr_new2'])
+ new_ip.save()
+ mx, created = MX.objects.get_or_create(priority=settings.PRIORITY, mx=settings.DEFAULT_MX)
+ if created:
+ mx.save()
+ new_name = "-".join([host.hostname.split(".")[0],
+ new_ip.ip_addr.split(".")[2]])
+ new_name += "." + host.hostname.split(".", 1)[1]
+ name = Name(ip=new_ip, name=new_name,
+ dns_view='global', only=False)
+ name.save()
+ name.mxs.add(mx)
+ new_name = "-".join([host.hostname.split(".")[0],
+ new_inter.hdwr_type])
+ new_name += "." + host.hostname.split(".", 1)[1]
+ name = Name(ip=new_ip, name=new_name,
+ dns_view='global', only=False)
+ name.save()
+ name.mxs.add(mx)
+ name = Name(ip=new_ip, name=host.hostname,
+ dns_view='global', only=False)
+ name.save()
+ name.mxs.add(mx)
+ host.save()
+ return HttpResponseRedirect('/hostbase/%s/' % host.id)
+ else:
+ host = Host.objects.get(id=host_id)
+ return render_to_response('copy.html',
+ {'host': host,
+ 'TYPE_CHOICES': Interface.TYPE_CHOICES,
+ 'NETGROUP_CHOICES': Host.NETGROUP_CHOICES,
+ 'CLASS_CHOICES': Host.CLASS_CHOICES,
+ 'SUPPORT_CHOICES': Host.SUPPORT_CHOICES,
+ 'WHATAMI_CHOICES': Host.WHATAMI_CHOICES,
+ 'logged_in': request.session.get('_auth_user_id', False)},
+ context_instance = RequestContext(request))
+
+# FIXME: delete all this things in a signal handler "pre_delete"
+#def remove(request, host_id):
+# host = Host.objects.get(id=host_id)
+# if 'sub' in request:
+# for interface in host.interface_set.all():
+# for ip in interface.ip_set.all():
+# for name in ip.name_set.all():
+# name.cname_set.all().delete()
+# ip.name_set.all().delete()
+# interface.ip_set.all().delete()
+# interface.delete()
+# host.delete()
+
+def validate(request, new=False, host_id=None):
+ """Function for checking form data"""
+ failures = []
+ if (request.POST['expiration_date']
+ and regex.date.match(request.POST['expiration_date'])):
+ try:
+ (year, month, day) = request.POST['expiration_date'].split("-")
+ date(int(year), int(month), int(day))
+ except (ValueError):
+ failures.append('expiration_date')
+ elif request.POST['expiration_date']:
+ failures.append('expiration_date')
+
+ if not (request.POST['hostname']
+ and regex.host.match(request.POST['hostname'])):
+ failures.append('hostname')
+
+## if not regex.printq.match(request.POST['printq']) and request.POST['printq']:
+## failures.append('printq')
+
+## if not regex.user.match(request.POST['primary_user']):
+## failures.append('primary_user')
+
+## if (not regex.user.match(request.POST['administrator'])
+## and request.POST['administrator']):
+## failures.append('administrator')
+
+## if not (request.POST['location']
+## and regex.location.match(request.POST['location'])):
+## failures.append('location')
+
+ if new:
+ if (not regex.macaddr.match(request.POST['mac_addr_new'])
+ and request.POST['mac_addr_new']):
+ failures.append('mac_addr (#1)')
+ if ((request.POST['mac_addr_new'] or request.POST['ip_addr_new']) and
+ not 'hdwr_type_new' in request.REQUEST):
+ failures.append('hdwr_type (#1)')
+ if ((request.POST['mac_addr_new2'] or request.POST['ip_addr_new2']) and
+ not 'hdwr_type_new2' in request.REQUEST):
+ failures.append('hdwr_type (#2)')
+
+ if (not regex.macaddr.match(request.POST['mac_addr_new2'])
+ and request.POST['mac_addr_new2']):
+ failures.append('mac_addr (#2)')
+
+ if (not regex.ipaddr.match(request.POST['ip_addr_new'])
+ and request.POST['ip_addr_new']):
+ failures.append('ip_addr (#1)')
+ if (not regex. ipaddr.match(request.POST['ip_addr_new2'])
+ and request.POST['ip_addr_new2']):
+ failures.append('ip_addr (#2)')
+
+ [failures.append('ip_addr (#1)') for number in
+ request.POST['ip_addr_new'].split(".")
+ if number.isdigit() and int(number) > 255
+ and 'ip_addr (#1)' not in failures]
+ [failures.append('ip_addr (#2)') for number in
+ request.POST['ip_addr_new2'].split(".")
+ if number.isdigit() and int(number) > 255
+ and 'ip_addr (#2)' not in failures]
+
+ elif host_id:
+ interfaces = Interface.objects.filter(host=host_id)
+ for interface in interfaces:
+ if (not regex.macaddr.match(request.POST['mac_addr%d' % interface.id])
+ and request.POST['mac_addr%d' % interface.id]):
+ failures.append('mac_addr (%s)' % request.POST['mac_addr%d' % interface.id])
+ for ip in interface.ip_set.all():
+ if not regex.ipaddr.match(request.POST['ip_addr%d' % ip.id]):
+ failures.append('ip_addr (%s)' % request.POST['ip_addr%d' % ip.id])
+ [failures.append('ip_addr (%s)' % request.POST['ip_addr%d' % ip.id])
+ for number in request.POST['ip_addr%d' % ip.id].split(".")
+ if (number.isdigit() and int(number) > 255 and
+ 'ip_addr (%s)' % request.POST['ip_addr%d' % ip.id] not in failures)]
+ if (request.POST['%dip_addr' % interface.id]
+ and not regex.ipaddr.match(request.POST['%dip_addr' % interface.id])):
+ failures.append('ip_addr (%s)' % request.POST['%dip_addr' % interface.id])
+ if (request.POST['mac_addr_new']
+ and not regex.macaddr.match(request.POST['mac_addr_new'])):
+ failures.append('mac_addr (%s)' % request.POST['mac_addr_new'])
+ if (request.POST['ip_addr_new']
+ and not regex.ipaddr.match(request.POST['ip_addr_new'])):
+ failures.append('ip_addr (%s)' % request.POST['ip_addr_new'])
+
+ if not failures:
+ return 0
+ return failures
+
+def do_log(text, attribute, previous, new):
+ if previous != new:
+ text += "%-20s%-20s -> %s\n" % (attribute, previous, new)
+ return text
+
+## login required stuff
+## uncomment the views below that you would like to restrict access to
+
+## uncomment the lines below this point to restrict access to pages that modify the database
+## anonymous users can still view data in Hostbase
+
+edit = login_required(edit)
+confirm = login_required(confirm)
+dnsedit = login_required(dnsedit)
+new = login_required(new)
+copy = login_required(copy)
+#remove = login_required(remove)
+#zoneedit = login_required(zoneedit)
+#zonenew = login_required(zonenew)
+
+## uncomment the lines below this point to restrict access to all of hostbase
+
+## search = login_required(search)
+## look = login_required(look)
+## dns = login_required(dns)
+## zones = login_required(zones)
+## zoneview = login_required(zoneview)
+
diff --git a/build/lib/Bcfg2/Server/Hostbase/ldapauth.py b/build/lib/Bcfg2/Server/Hostbase/ldapauth.py
new file mode 100644
index 000000000..f2148181f
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Hostbase/ldapauth.py
@@ -0,0 +1,172 @@
+"""Checks with LDAP (ActiveDirectory) to see if the current user is an LDAP(AD) user,
+and returns a subset of the user's profile that is needed by Argonne/CIS to
+to set user level privleges in Django"""
+
+__revision__ = '$Revision: 2456 $'
+
+import os
+import ldap
+
+class LDAPAUTHError(Exception):
+ """LDAPAUTHError is raised when somehting goes boom."""
+ pass
+
+class ldapauth(object):
+ group_test = False
+ check_member_of = os.environ['LDAP_CHECK_MBR_OF_GRP']
+ securitylevel = 0
+ distinguishedName = None
+ sAMAccountName = None
+ telephoneNumber = None
+ title = None
+ memberOf = None
+ department = None #this will be a list
+ mail = None
+ extensionAttribute1 = None #badgenumber
+ badge_no = None
+
+ def __init__(self,login,passwd):
+ """get username (if using ldap as auth the
+ apache env var REMOTE_USER should be used)
+ from username get user profile from AD/LDAP
+ """
+ #p = self.user_profile(login,passwd)
+ d = self.user_dn(login) #success, distname
+ print d[1]
+ if d[0] == 'success':
+ pass
+ p = self.user_bind(d[1],passwd)
+ if p[0] == 'success':
+ #parse results
+ parsed = self.parse_results(p[2])
+ print self.department
+ self.group_test = self.member_of()
+ securitylevel = self.security_level()
+ print "ACCESS LEVEL: " + str(securitylevel)
+ else:
+ raise LDAPAUTHError(p[2])
+ else:
+ raise LDAPAUTHError(p[2])
+
+ def user_profile(self,login,passwd=None):
+ """NOT USED RIGHT NOW"""
+ ldap_login = "CN=%s" % login
+ svc_acct = os.environ['LDAP_SVC_ACCT_NAME']
+ svc_pass = os.environ['LDAP_SVC_ACCT_PASS']
+ #svc_acct = 'CN=%s,DC=anl,DC=gov' % login
+ #svc_pass = passwd
+
+ search_pth = os.environ['LDAP_SEARCH_PTH']
+
+ try:
+ conn = ldap.initialize(os.environ['LDAP_URI'])
+ conn.bind(svc_acct,svc_pass,ldap.AUTH_SIMPLE)
+ result_id = conn.search(search_pth,
+ ldap.SCOPE_SUBTREE,
+ ldap_login,None)
+ result_type,result_data = conn.result(result_id,0)
+ return ('success','User profile found',result_data,)
+ except ldap.LDAPError,e:
+ #connection failed
+ return ('error','LDAP connect failed',e,)
+
+ def user_bind(self,distinguishedName,passwd):
+ """Binds to LDAP Server"""
+ search_pth = os.environ['LDAP_SEARCH_PTH']
+ try:
+ conn = ldap.initialize(os.environ['LDAP_URI'])
+ conn.bind(distinguishedName,passwd,ldap.AUTH_SIMPLE)
+ cn = distinguishedName.split(",")
+ result_id = conn.search(search_pth,
+ ldap.SCOPE_SUBTREE,
+ cn[0],None)
+ result_type,result_data = conn.result(result_id,0)
+ return ('success','User profile found',result_data,)
+ except ldap.LDAPError,e:
+ #connection failed
+ return ('error','LDAP connect failed',e,)
+
+ def user_dn(self,cn):
+ """Uses Service Account to get distinguishedName"""
+ ldap_login = "CN=%s" % cn
+ svc_acct = os.environ['LDAP_SVC_ACCT_NAME']
+ svc_pass = os.environ['LDAP_SVC_ACCT_PASS']
+ search_pth = os.environ['LDAP_SEARCH_PTH']
+
+ try:
+ conn = ldap.initialize(os.environ['LDAP_URI'])
+ conn.bind(svc_acct,svc_pass,ldap.AUTH_SIMPLE)
+ result_id = conn.search(search_pth,
+ ldap.SCOPE_SUBTREE,
+ ldap_login,None)
+ result_type,result_data = conn.result(result_id,0)
+ raw_obj = result_data[0][1]
+ distinguishedName = raw_obj['distinguishedName']
+ return ('success',distinguishedName[0],)
+ except ldap.LDAPError,e:
+ #connection failed
+ return ('error','LDAP connect failed',e,)
+
+ def parse_results(self,user_obj):
+ """Clean up the huge ugly object handed to us in the LDAP query"""
+ #user_obj is a list formatted like this:
+ #[('LDAP_DN',{user_dict},),]
+ try:
+ raw_obj = user_obj[0][1]
+ self.memberOf = raw_obj['memberOf']
+ self.sAMAccountName = raw_obj['sAMAccountName'][0]
+ self.distinguishedName = raw_obj['distinguishedName'][0]
+ self.telephoneNumber = raw_obj['telephoneNumber'][0]
+ self.title = raw_obj['title'][0]
+ self.department = raw_obj['department'][0]
+ self.mail = raw_obj['mail'][0]
+ self.badge_no = raw_obj['extensionAttribute1'][0]
+ self.email = raw_obj['extensionAttribute2'][0]
+ display_name = raw_obj['displayName'][0].split(",")
+ self.name_f = raw_obj['givenName'][0]
+ self.name_l = display_name[0]
+ self.is_staff = False
+ self.is_superuser = False
+
+ return
+ except KeyError, e:
+ raise LDAPAUTHError("Portions of the LDAP User profile not present")
+
+ def member_of(self):
+ """See if this user is in our group that is allowed to login"""
+ m = [g for g in self.memberOf if g == self.check_member_of]
+ #print m
+ if len(m) == 1:
+ return True
+ else:
+ return False
+
+ def security_level(self):
+ level = self.securitylevel
+
+ user = os.environ['LDAP_GROUP_USER']
+ m = [g for g in self.memberOf if g == user]
+ if len(m) == 1:
+ if level < 1:
+ level = 1
+
+ cspr = os.environ['LDAP_GROUP_SECURITY_LOW']
+ m = [g for g in self.memberOf if g == cspr]
+ if len(m) == 1:
+ if level < 2:
+ level = 2
+
+ cspo = os.environ['LDAP_GROUP_SECURITY_HIGH']
+ m = [g for g in self.memberOf if g == cspo]
+ if len(m) == 1:
+ if level < 3:
+ level = 3
+
+ admin = os.environ['LDAP_GROUP_ADMIN']
+ m = [g for g in self.memberOf if g == admin]
+ if len(m) == 1:
+ if level < 4:
+ level = 4
+
+ return level
+
diff --git a/build/lib/Bcfg2/Server/Hostbase/manage.py b/build/lib/Bcfg2/Server/Hostbase/manage.py
new file mode 100644
index 000000000..5e78ea979
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Hostbase/manage.py
@@ -0,0 +1,11 @@
+#!/usr/bin/env python
+from django.core.management import execute_manager
+try:
+ import settings # Assumed to be in the same directory.
+except ImportError:
+ import sys
+ sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
+ sys.exit(1)
+
+if __name__ == "__main__":
+ execute_manager(settings)
diff --git a/build/lib/Bcfg2/Server/Hostbase/nisauth.py b/build/lib/Bcfg2/Server/Hostbase/nisauth.py
new file mode 100644
index 000000000..9c7da8c0a
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Hostbase/nisauth.py
@@ -0,0 +1,42 @@
+import os
+import crypt, nis
+from Bcfg2.Server.Hostbase.settings import AUTHORIZED_GROUP
+
+"""Checks with NIS to see if the current user is in the support group"""
+
+__revision__ = "$Revision: $"
+
+class NISAUTHError(Exception):
+ """NISAUTHError is raised when somehting goes boom."""
+ pass
+
+class nisauth(object):
+ group_test = False
+# check_member_of = os.environ['LDAP_CHECK_MBR_OF_GRP']
+ samAcctName = None
+ distinguishedName = None
+ sAMAccountName = None
+ telephoneNumber = None
+ title = None
+ memberOf = None
+ department = None #this will be a list
+ mail = None
+ extensionAttribute1 = None #badgenumber
+ badge_no = None
+ uid = None
+
+ def __init__(self,login,passwd=None):
+ """get user profile from NIS"""
+ try:
+ p = nis.match(login, 'passwd.byname').split(":")
+ except:
+ raise NISAUTHError('username')
+ # check user password using crypt and 2 character salt from passwd file
+ if p[1] == crypt.crypt(passwd, p[1][:2]):
+ # check to see if user is in valid support groups
+ # will have to include these groups in a settings file eventually
+ if not login in nis.match(AUTHORIZED_GROUP, 'group.byname').split(':')[-1].split(',') and p[3] != nis.match(AUTHORIZED_GROUP, 'group.byname').split(':')[2]:
+ raise NISAUTHError('group')
+ self.uid = p[2]
+ else:
+ raise NISAUTHError('password')
diff --git a/build/lib/Bcfg2/Server/Hostbase/regex.py b/build/lib/Bcfg2/Server/Hostbase/regex.py
new file mode 100644
index 000000000..41cc0f6f0
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Hostbase/regex.py
@@ -0,0 +1,6 @@
+import re
+
+date = re.compile('^[0-9]{4}-[0-9]{2}-[0-9]{2}$')
+host = re.compile('^[a-z0-9-_]+(\.[a-z0-9-_]+)+$')
+macaddr = re.compile('^[0-9abcdefABCDEF]{2}(:[0-9abcdefABCDEF]{2}){5}$|virtual')
+ipaddr = re.compile('^[0-9]{1,3}(\.[0-9]{1,3}){3}$')
diff --git a/build/lib/Bcfg2/Server/Hostbase/settings.py b/build/lib/Bcfg2/Server/Hostbase/settings.py
new file mode 100644
index 000000000..a42fd5b2e
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Hostbase/settings.py
@@ -0,0 +1,142 @@
+from ConfigParser import ConfigParser, NoSectionError, NoOptionError
+import os.path
+
+PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
+
+c = ConfigParser()
+#This needs to be configurable one day somehow
+c.read(['./bcfg2.conf'])
+
+defaults = {'database_engine':'sqlite3',
+ 'database_name':'./dev.db',
+ 'database_user':'',
+ 'database_password':'',
+ 'database_host':'',
+ 'database_port':3306,
+ 'default_mx':'localhost',
+ 'priority':10,
+ 'authorized_group':'admins',
+ }
+
+if c.has_section('hostbase'):
+ options = dict(c.items('hostbase'))
+else:
+ options = defaults
+
+# Django settings for Hostbase project.
+DEBUG = True
+TEMPLATE_DEBUG = DEBUG
+ADMINS = (
+ # ('Your Name', 'your_email@domain.com'),
+)
+MANAGERS = ADMINS
+
+# 'postgresql', 'mysql', 'sqlite3' or 'ado_mssql'.
+DATABASE_ENGINE = options['database_engine']
+# Or path to database file if using sqlite3.
+DATABASE_NAME = options['database_name']
+# Not used with sqlite3.
+DATABASE_USER = options['database_user']
+# Not used with sqlite3.
+DATABASE_PASSWORD = options['database_password']
+# Set to empty string for localhost. Not used with sqlite3.
+DATABASE_HOST = options['database_host']
+# Set to empty string for default. Not used with sqlite3.
+DATABASE_PORT = int(options['database_port'])
+# Local time zone for this installation. All choices can be found here:
+# http://docs.djangoproject.com/en/dev/ref/settings/#time-zone
+try:
+ TIME_ZONE = c.get('statistics', 'time_zone')
+except:
+ TIME_ZONE = None
+
+# enter the defauly MX record machines will get in Hostbase
+# this setting may move elsewhere eventually
+DEFAULT_MX = options['default_mx']
+PRIORITY = int(options['priority'])
+
+SESSION_EXPIRE_AT_BROWSER_CLOSE = True
+
+# Uncomment a backend below if you would like to use it for authentication
+AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',
+ 'Bcfg2.Server.Hostbase.backends.NISBackend',
+ #'Bcfg2.Server.Hostbase.backends.LDAPBacken',
+ )
+# enter an NIS group name you'd like to give access to edit hostbase records
+AUTHORIZED_GROUP = options['authorized_group']
+
+#create login url area:
+import django.contrib.auth
+django.contrib.auth.LOGIN_URL = '/login'
+# Absolute path to the directory that holds media.
+# Example: "/home/media/media.lawrence.com/"
+MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')
+# Just for development
+SERVE_MEDIA = DEBUG
+
+# Language code for this installation. All choices can be found here:
+# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
+# http://blogs.law.harvard.edu/tech/stories/storyReader$15
+LANGUAGE_CODE = 'en-us'
+SITE_ID = 1
+# URL that handles the media served from MEDIA_ROOT.
+# Example: "http://media.lawrence.com"
+MEDIA_URL = '/site_media/'
+# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
+# trailing slash.
+# Examples: "http://foo.com/media/", "/media/".
+ADMIN_MEDIA_PREFIX = '/media/'
+# Make this unique, and don't share it with anybody.
+SECRET_KEY = '*%=fv=yh9zur&gvt4&*d#84o(cy^-*$ox-v1e9%32pzf2*qu#s'
+# List of callables that know how to import templates from various sources.
+TEMPLATE_LOADERS = (
+ 'django.template.loaders.filesystem.load_template_source',
+ 'django.template.loaders.app_directories.load_template_source',
+# 'django.template.loaders.eggs.load_template_source',
+)
+
+TEMPLATE_CONTEXT_PROCESSORS = (
+ "django.core.context_processors.auth",
+ "django.core.context_processors.debug",
+ "django.core.context_processors.i18n",
+ "django.core.context_processors.request",
+ "django.core.context_processors.media",
+# Django development version.
+# "django.core.context_processors.csrf",
+)
+
+
+MIDDLEWARE_CLASSES = (
+ 'django.middleware.common.CommonMiddleware',
+ 'django.contrib.sessions.middleware.SessionMiddleware',
+ 'django.middleware.locale.LocaleMiddleware',
+ 'django.contrib.auth.middleware.AuthenticationMiddleware',
+ 'django.middleware.doc.XViewMiddleware',
+)
+
+ROOT_URLCONF = 'Bcfg2.Server.Hostbase.urls'
+
+TEMPLATE_DIRS = (
+ # Put strings here, like "/home/html/django_templates".
+ # Always use forward slashes, even on Windows.
+ '/usr/lib/python2.3/site-packages/Bcfg2/Server/Hostbase/hostbase/webtemplates',
+ '/usr/lib/python2.4/site-packages/Bcfg2/Server/Hostbase/hostbase/webtemplates',
+ '/usr/lib/python2.3/site-packages/Bcfg2/Server/Hostbase/templates',
+ '/usr/lib/python2.4/site-packages/Bcfg2/Server/Hostbase/templates',
+ '/usr/share/bcfg2/Hostbase/templates',
+ os.path.join(PROJECT_ROOT, 'templates'),
+ os.path.join(PROJECT_ROOT, 'hostbase/webtemplates'),
+)
+
+INSTALLED_APPS = (
+ 'django.contrib.admin',
+ 'django.contrib.admindocs',
+ 'django.contrib.auth',
+ 'django.contrib.contenttypes',
+ 'django.contrib.sessions',
+ 'django.contrib.sites',
+ 'django.contrib.humanize',
+ 'Bcfg2.Server.Hostbase.hostbase',
+)
+
+LOGIN_URL = '/login/'
diff --git a/build/lib/Bcfg2/Server/Hostbase/urls.py b/build/lib/Bcfg2/Server/Hostbase/urls.py
new file mode 100644
index 000000000..01fe97d4f
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Hostbase/urls.py
@@ -0,0 +1,27 @@
+from django.conf.urls.defaults import *
+from django.conf import settings
+from django.views.generic.simple import direct_to_template
+from django.contrib import admin
+
+
+admin.autodiscover()
+
+
+urlpatterns = patterns('',
+ # Uncomment the admin/doc line below and add 'django.contrib.admindocs'
+ # to INSTALLED_APPS to enable admin documentation:
+ (r'^admin/doc/', include('django.contrib.admindocs.urls')),
+
+ # Uncomment the next line to enable the admin:
+ (r'^admin/', include(admin.site.urls)),
+
+ (r'^$',direct_to_template, {'template':'index.html'}, 'index'),
+ (r'^hostbase/', include('hostbase.urls')),
+ (r'^login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}),
+ (r'^logout/$', 'django.contrib.auth.views.logout', {'template_name': 'logout.html'})
+)
+
+if settings.SERVE_MEDIA:
+ urlpatterns += patterns('',
+ (r'^site_media/(?P<path>.*)$', 'django.views.static.serve',
+ dict(document_root=settings.MEDIA_ROOT)),)
diff --git a/build/lib/Bcfg2/Server/Plugin.py b/build/lib/Bcfg2/Server/Plugin.py
new file mode 100644
index 000000000..95569e3ac
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Plugin.py
@@ -0,0 +1,886 @@
+"""This module provides the baseclass for Bcfg2 Server Plugins."""
+__revision__ = '$Revision$'
+
+import copy
+import logging
+import lxml.etree
+import os
+import pickle
+import posixpath
+import re
+import Queue
+import threading
+
+from lxml.etree import XML, XMLSyntaxError
+
+import Bcfg2.Options
+
+# grab default metadata info from bcfg2.conf
+opts = {'owner': Bcfg2.Options.MDATA_OWNER,
+ 'group': Bcfg2.Options.MDATA_GROUP,
+ 'important': Bcfg2.Options.MDATA_IMPORTANT,
+ 'perms': Bcfg2.Options.MDATA_PERMS,
+ 'paranoid': Bcfg2.Options.MDATA_PARANOID}
+mdata_setup = Bcfg2.Options.OptionParser(opts)
+mdata_setup.parse([])
+del mdata_setup['args']
+
+logger = logging.getLogger('Bcfg2.Plugin')
+
+default_file_metadata = mdata_setup
+
+info_regex = re.compile( \
+ 'encoding:(\s)*(?P<encoding>\w+)|' +
+ 'group:(\s)*(?P<group>\S+)|' +
+ 'important:(\s)*(?P<important>\S+)|' +
+ 'mtime:(\s)*(?P<mtime>\w+)|' +
+ 'owner:(\s)*(?P<owner>\S+)|' +
+ 'paranoid:(\s)*(?P<paranoid>\S+)|' +
+ 'perms:(\s)*(?P<perms>\w+)|')
+
+class PluginInitError(Exception):
+ """Error raised in cases of Plugin initialization errors."""
+ pass
+
+class PluginExecutionError(Exception):
+ """Error raised in case of Plugin execution errors."""
+ pass
+
+class Plugin(object):
+ """This is the base class for all Bcfg2 Server plugins.
+ Several attributes must be defined in the subclass:
+ name : the name of the plugin
+ __version__ : a version string
+ __author__ : the author/contact for the plugin
+
+ Plugins can provide three basic types of functionality:
+ - Structure creation (overloading BuildStructures)
+ - Configuration entry binding (overloading HandlesEntry, or loads the Entries table)
+ - Data collection (overloading GetProbes/ReceiveData)
+ """
+ name = 'Plugin'
+ __version__ = '$Id$'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+ __rmi__ = ['toggle_debug']
+ experimental = False
+ deprecated = False
+ conflicts = []
+
+ def __init__(self, core, datastore):
+ object.__init__(self)
+ self.Entries = {}
+ self.core = core
+ self.data = "%s/%s" % (datastore, self.name)
+ self.logger = logging.getLogger('Bcfg2.Plugins.%s' % (self.name))
+ self.running = True
+ self.debug_flag = False
+
+ def toggle_debug(self):
+ self.debug_flag = not self.debug_flag
+
+ def debug_log(self, message, flag=None):
+ if (flag is None) and self.debug_flag or flag:
+ self.logger.error(message)
+
+ @classmethod
+ def init_repo(cls, repo):
+ path = "%s/%s" % (repo, cls.name)
+ os.makedirs(path)
+
+ def shutdown(self):
+ self.running = False
+
+class Generator(object):
+ """Generator plugins contribute to literal client configurations."""
+ def HandlesEntry(self, entry, metadata):
+ """This is the slow path method for routing configuration binding requests."""
+ return False
+
+ def HandleEntry(self, entry, metadata):
+ """This is the slow-path handler for configuration entry binding."""
+ raise PluginExecutionError
+
+class Structure(object):
+ """Structure Plugins contribute to abstract client configurations."""
+ def BuildStructures(self, metadata):
+ """Return a list of abstract goal structures for client."""
+ raise PluginExecutionError
+
+class Metadata(object):
+ """Signal metadata capabilities for this plugin"""
+ def add_client(self, client_name, attribs):
+ """Add client."""
+ pass
+ def remove_client(self, client_name):
+ """Remove client."""
+ pass
+ def viz(self, hosts, bundles, key, colors):
+ """Create viz str for viz admin mode."""
+ pass
+
+ def get_initial_metadata(self, client_name):
+ raise PluginExecutionError
+
+ def merge_additional_data(self, imd, source, groups, data):
+ raise PluginExecutionError
+
+class Connector(object):
+ """Connector Plugins augment client metadata instances."""
+ def get_additional_groups(self, metadata):
+ """Determine additional groups for metadata."""
+ return list()
+
+ def get_additional_data(self, metadata):
+ """Determine additional data for metadata instances."""
+ return dict()
+
+class Probing(object):
+ """Signal probe capability for this plugin."""
+ def GetProbes(self, _):
+ """Return a set of probes for execution on client."""
+ return []
+
+ def ReceiveData(self, _, dummy):
+ """Receive probe results pertaining to client."""
+ pass
+
+class Statistics(object):
+ """Signal statistics handling capability."""
+ def process_statistics(self, client, xdata):
+ pass
+
+class ThreadedStatistics(Statistics,
+ threading.Thread):
+ """Threaded statistics handling capability."""
+ def __init__(self, core, datastore):
+ Statistics.__init__(self)
+ threading.Thread.__init__(self)
+ # Event from the core signaling an exit
+ self.terminate = core.terminate
+ self.work_queue = Queue.Queue(100000)
+ self.pending_file = "%s/etc/%s.pending" % (datastore, self.__class__.__name__)
+ self.daemon = True
+ self.start()
+
+ def save(self):
+ """Save any pending data to a file."""
+ pending_data = []
+ try:
+ while not self.work_queue.empty():
+ (metadata, data) = self.work_queue.get_nowait()
+ try:
+ pending_data.append( ( metadata.hostname, lxml.etree.tostring(data) ) )
+ except:
+ self.logger.warning("Dropping interaction for %s" % metadata.hostname)
+ except Queue.Empty:
+ pass
+
+ try:
+ savefile = open(self.pending_file, 'w')
+ pickle.dump(pending_data, savefile)
+ savefile.close()
+ self.logger.info("Saved pending %s data" % self.__class__.__name__)
+ except:
+ self.logger.warning("Failed to save pending data")
+
+ def load(self):
+ """Load any pending data to a file."""
+ if not os.path.exists(self.pending_file):
+ return True
+ pending_data = []
+ try:
+ savefile = open(self.pending_file, 'r')
+ pending_data = pickle.load(savefile)
+ savefile.close()
+ except Exception, e:
+ self.logger.warning("Failed to load pending data: %s" % e)
+ for (pmetadata, pdata) in pending_data:
+ # check that shutdown wasnt called early
+ if self.terminate.isSet():
+ return False
+
+ try:
+ while True:
+ try:
+ metadata = self.core.build_metadata(pmetadata)
+ break
+ except Bcfg2.Server.Plugins.Metadata.MetadataRuntimeError:
+ pass
+
+ self.terminate.wait(5)
+ if self.terminate.isSet():
+ return False
+
+ self.work_queue.put_nowait( (metadata, lxml.etree.fromstring(pdata)) )
+ except Queue.Full:
+ self.logger.warning("Queue.Full: Failed to load queue data")
+ break
+ except lxml.etree.LxmlError, lxml_error:
+ self.logger.error("Unable to load save interaction: %s" % lxml_error)
+ except Bcfg2.Server.Plugins.Metadata.MetadataConsistencyError:
+ self.logger.error("Unable to load metadata for save interaction: %s" % pmetadata)
+ try:
+ os.unlink(self.pending_file)
+ except:
+ self.logger.error("Failed to unlink save file: %s" % self.pending_file)
+ self.logger.info("Loaded pending %s data" % self.__class__.__name__)
+ return True
+
+ def run(self):
+ if not self.load():
+ return
+ while not self.terminate.isSet():
+ try:
+ (xdata, client) = self.work_queue.get(block=True, timeout=2)
+ except Queue.Empty:
+ continue
+ except Exception, e:
+ self.logger.error("ThreadedStatistics: %s" % e)
+ continue
+ self.handle_statistic(xdata, client)
+ if not self.work_queue.empty():
+ self.save()
+
+ def process_statistics(self, metadata, data):
+ warned = False
+ try:
+ self.work_queue.put_nowait((metadata, copy.deepcopy(data)))
+ warned = False
+ except Queue.Full:
+ if not warned:
+ self.logger.warning("%s: Queue is full. Dropping interactions." % self.__class__.__name__)
+ warned = True
+
+ def handle_statistics(self, metadata, data):
+ """Handle stats here."""
+ pass
+
+class PullSource(object):
+ def GetExtra(self, client):
+ return []
+
+ def GetCurrentEntry(self, client, e_type, e_name):
+ raise PluginExecutionError
+
+class PullTarget(object):
+ def AcceptChoices(self, entry, metadata):
+ raise PluginExecutionError
+
+ def AcceptPullData(self, specific, new_entry, verbose):
+ """This is the null per-plugin implementation
+ of bcfg2-admin pull."""
+ raise PluginExecutionError
+
+class Decision(object):
+ """Signal decision handling capability."""
+ def GetDecisions(self, metadata, mode):
+ return []
+
+class ValidationError(Exception):
+ pass
+
+class StructureValidator(object):
+ """Validate/modify goal structures."""
+ def validate_structures(self, metadata, structures):
+ raise ValidationError, "not implemented"
+
+class GoalValidator(object):
+ """Validate/modify configuration goals."""
+ def validate_goals(self, metadata, goals):
+ raise ValidationError, "not implemented"
+
+class Version(object):
+ """Interact with various version control systems."""
+ def get_revision(self):
+ return []
+ def commit_data(self):
+ pass
+
+# the rest of the file contains classes for coherent file caching
+
+class FileBacked(object):
+ """This object caches file data in memory.
+ HandleEvent is called whenever fam registers an event.
+ Index can parse the data into member data as required.
+ This object is meant to be used as a part of DirectoryBacked.
+ """
+
+ def __init__(self, name):
+ object.__init__(self)
+ self.data = ''
+ self.name = name
+
+ def HandleEvent(self, event=None):
+ """Read file upon update."""
+ if event and event.code2str() not in ['exists', 'changed', 'created']:
+ return
+ try:
+ self.data = file(self.name).read()
+ self.Index()
+ except IOError:
+ logger.error("Failed to read file %s" % (self.name))
+
+ def Index(self):
+ """Update local data structures based on current file state"""
+ pass
+
+class DirectoryBacked(object):
+ """This object is a coherent cache for a filesystem hierarchy of files."""
+ __child__ = FileBacked
+ patterns = re.compile('.*')
+
+ def __init__(self, name, fam):
+ object.__init__(self)
+ self.name = name
+ self.fam = fam
+ self.entries = {}
+ self.inventory = False
+ fam.AddMonitor(name, self)
+
+ def __getitem__(self, key):
+ return self.entries[key]
+
+ def __iter__(self):
+ return self.entries.iteritems()
+
+ def AddEntry(self, name):
+ """Add new entry to data structures upon file creation."""
+ if name == '':
+ logger.info("got add for empty name")
+ elif name in self.entries:
+ self.entries[name].HandleEvent()
+ else:
+ if ((name[-1] == '~') or
+ (name[:2] == '.#') or
+ (name[-4:] == '.swp') or
+ (name in ['SCCS', '.svn'])):
+ return
+ if not self.patterns.match(name):
+ return
+ self.entries[name] = self.__child__('%s/%s' % (self.name, name))
+ self.entries[name].HandleEvent()
+
+ def HandleEvent(self, event):
+ """Propagate fam events to underlying objects."""
+ action = event.code2str()
+ if event.filename == '':
+ logger.info("Got event for blank filename")
+ return
+ if action == 'exists':
+ if event.filename != self.name:
+ self.AddEntry(event.filename)
+ elif action == 'created':
+ self.AddEntry(event.filename)
+ elif action == 'changed':
+ if event.filename in self.entries:
+ self.entries[event.filename].HandleEvent(event)
+ elif action == 'deleted':
+ if event.filename in self.entries:
+ del self.entries[event.filename]
+ elif action in ['endExist']:
+ pass
+ else:
+ print "Got unknown event %s %s %s" % (event.requestID,
+ event.code2str(),
+ event.filename)
+
+class XMLFileBacked(FileBacked):
+ """
+ This object is a coherent cache for an XML file to be used as a
+ part of DirectoryBacked.
+ """
+ __identifier__ = 'name'
+
+ def __init__(self, filename):
+ self.label = "dummy"
+ self.entries = []
+ FileBacked.__init__(self, filename)
+
+ def Index(self):
+ """Build local data structures."""
+ try:
+ xdata = XML(self.data)
+ except XMLSyntaxError:
+ logger.error("Failed to parse %s"%(self.name))
+ return
+ self.label = xdata.attrib[self.__identifier__]
+ self.entries = xdata.getchildren()
+
+ def __iter__(self):
+ return iter(self.entries)
+
+class SingleXMLFileBacked(XMLFileBacked):
+ """This object is a coherent cache for an independent XML file."""
+ def __init__(self, filename, fam):
+ XMLFileBacked.__init__(self, filename)
+ fam.AddMonitor(filename, self)
+
+class StructFile(XMLFileBacked):
+ """This file contains a set of structure file formatting logic."""
+ def __init__(self, name):
+ XMLFileBacked.__init__(self, name)
+ self.fragments = {}
+
+ def Index(self):
+ """Build internal data structures."""
+ try:
+ xdata = lxml.etree.XML(self.data)
+ except lxml.etree.XMLSyntaxError:
+ logger.error("Failed to parse file %s" % self.name)
+ return
+ self.fragments = {}
+ work = {lambda x:True: xdata.getchildren()}
+ while work:
+ (predicate, worklist) = work.popitem()
+ self.fragments[predicate] = [item for item in worklist if item.tag != 'Group'
+ and not isinstance(item, lxml.etree._Comment)]
+ for group in [item for item in worklist if item.tag == 'Group']:
+ # if only python had forceable early-binding
+ if group.get('negate', 'false') in ['true', 'True']:
+ cmd = "lambda x:'%s' not in x.groups and predicate(x)"
+ else:
+ cmd = "lambda x:'%s' in x.groups and predicate(x)"
+
+ newpred = eval(cmd % (group.get('name')), {'predicate':predicate})
+ work[newpred] = group.getchildren()
+
+ def Match(self, metadata):
+ """Return matching fragments of independent."""
+ matching = [frag for (pred, frag) in self.fragments.iteritems() if pred(metadata)]
+ if matching:
+ return reduce(lambda x, y:x+y, matching)
+ logger.error("File %s got null match" % (self.name))
+ return []
+
+class INode:
+ """
+ LNodes provide lists of things available at a particular
+ group intersection.
+ """
+ raw = {'Client':"lambda x:'%s' == x.hostname and predicate(x)",
+ 'Group':"lambda x:'%s' in x.groups and predicate(x)"}
+ nraw = {'Client':"lambda x:'%s' != x.hostname and predicate(x)",
+ 'Group':"lambda x:'%s' not in x.groups and predicate(x)"}
+ containers = ['Group', 'Client']
+ ignore = []
+
+ def __init__(self, data, idict, parent=None):
+ self.data = data
+ self.contents = {}
+ if parent == None:
+ self.predicate = lambda x:True
+ else:
+ predicate = parent.predicate
+ if data.get('negate', 'false') in ['true', 'True']:
+ psrc = self.nraw
+ else:
+ psrc = self.raw
+ if data.tag in psrc.keys():
+ self.predicate = eval(psrc[data.tag] % (data.get('name')),
+ {'predicate':predicate})
+ else:
+ raise Exception
+ mytype = self.__class__
+ self.children = []
+ for item in data.getchildren():
+ if item.tag in self.ignore:
+ continue
+ elif item.tag in self.containers:
+ self.children.append(mytype(item, idict, self))
+ else:
+ try:
+ self.contents[item.tag][item.get('name')] = item.attrib
+ except KeyError:
+ self.contents[item.tag] = {item.get('name'):item.attrib}
+ if item.text:
+ self.contents[item.tag]['__text__'] = item.text
+ try:
+ idict[item.tag].append(item.get('name'))
+ except KeyError:
+ idict[item.tag] = [item.get('name')]
+
+ def Match(self, metadata, data):
+ """Return a dictionary of package mappings."""
+ if self.predicate(metadata):
+ for key in self.contents:
+ try:
+ data[key].update(self.contents[key])
+ except:
+ data[key] = {}
+ data[key].update(self.contents[key])
+ for child in self.children:
+ child.Match(metadata, data)
+
+class XMLSrc(XMLFileBacked):
+ """XMLSrc files contain a LNode hierarchy that returns matching entries."""
+ __node__ = INode
+ __cacheobj__ = dict
+
+ def __init__(self, filename, noprio=False):
+ XMLFileBacked.__init__(self, filename)
+ self.items = {}
+ self.cache = None
+ self.pnode = None
+ self.priority = -1
+ self.noprio = noprio
+
+ def HandleEvent(self, _=None):
+ """Read file upon update."""
+ try:
+ data = file(self.name).read()
+ except IOError:
+ logger.error("Failed to read file %s" % (self.name))
+ return
+ self.items = {}
+ try:
+ xdata = lxml.etree.XML(data)
+ except lxml.etree.XMLSyntaxError:
+ logger.error("Failed to parse file %s" % (self.name))
+ return
+ self.pnode = self.__node__(xdata, self.items)
+ self.cache = None
+ try:
+ self.priority = int(xdata.get('priority'))
+ except (ValueError, TypeError):
+ if not self.noprio:
+ logger.error("Got bogus priority %s for file %s" % (xdata.get('priority'), self.name))
+ del xdata, data
+
+ def Cache(self, metadata):
+ """Build a package dict for a given host."""
+ if self.cache == None or self.cache[0] != metadata:
+ cache = (metadata, self.__cacheobj__())
+ if self.pnode == None:
+ logger.error("Cache method called early for %s; forcing data load" % (self.name))
+ self.HandleEvent()
+ return
+ self.pnode.Match(metadata, cache[1])
+ self.cache = cache
+
+class XMLDirectoryBacked(DirectoryBacked):
+ """Directorybacked for *.xml."""
+ patterns = re.compile('.*\.xml')
+
+class PrioDir(Plugin, Generator, XMLDirectoryBacked):
+ """This is a generator that handles package assignments."""
+ name = 'PrioDir'
+ __child__ = XMLSrc
+
+ def __init__(self, core, datastore):
+ Plugin.__init__(self, core, datastore)
+ Generator.__init__(self)
+ try:
+ XMLDirectoryBacked.__init__(self, self.data, self.core.fam)
+ except OSError:
+ self.logger.error("Failed to load %s indices" % (self.name))
+ raise PluginInitError
+
+ def HandleEvent(self, event):
+ """Handle events and update dispatch table."""
+ XMLDirectoryBacked.HandleEvent(self, event)
+ self.Entries = {}
+ for src in self.entries.values():
+ for itype, children in src.items.iteritems():
+ for child in children:
+ try:
+ self.Entries[itype][child] = self.BindEntry
+ except KeyError:
+ self.Entries[itype] = {child: self.BindEntry}
+
+ def BindEntry(self, entry, metadata):
+ """Check package lists of package entries."""
+ [src.Cache(metadata) for src in self.entries.values()]
+ name = entry.get('name')
+ if not src.cache:
+ self.logger.error("Called before data loaded")
+ raise PluginExecutionError
+ matching = [src for src in self.entries.values()
+ if src.cache and entry.tag in src.cache[1]
+ and src.cache[1][entry.tag].has_key(name)]
+ if len(matching) == 0:
+ raise PluginExecutionError
+ elif len(matching) == 1:
+ index = 0
+ else:
+ prio = [int(src.priority) for src in matching]
+ if prio.count(max(prio)) > 1:
+ self.logger.error("Found conflicting sources with "
+ "same priority for %s, %s %s" %
+ (metadata.hostname,
+ entry.tag.lower(), entry.get('name')))
+ self.logger.error([item.name for item in matching])
+ self.logger.error("Priority was %s" % max(prio))
+ raise PluginExecutionError
+ index = prio.index(max(prio))
+
+ data = matching[index].cache[1][entry.tag][name]
+ if '__text__' in data:
+ entry.text = data['__text__']
+ if '__children__' in data:
+ [entry.append(copy.deepcopy(item)) for item in data['__children__']]
+ [entry.attrib.__setitem__(key, data[key]) for key in data.keys() \
+ if not key.startswith('__')]
+
+# new unified EntrySet backend
+
+class SpecificityError(Exception):
+ """Thrown in case of filename parse failure."""
+ pass
+
+class Specificity:
+
+ def __init__(self, all=False, group=False, hostname=False, prio=0, delta=False):
+ self.hostname = hostname
+ self.all = all
+ self.group = group
+ self.prio = prio
+ self.delta = delta
+
+ def matches(self, metadata):
+ return self.all or \
+ self.hostname == metadata.hostname or \
+ self.group in metadata.groups
+
+ def __cmp__(self, other):
+ """Sort most to least specific."""
+ if self.all:
+ return 1
+ if self.group:
+ if other.hostname:
+ return 1
+ if other.group and other.prio > self.prio:
+ return 1
+ if other.group and other.prio == self.prio:
+ return 0
+ return -1
+
+ def more_specific(self, other):
+ """Test if self is more specific than other."""
+ if self.all:
+ True
+ elif self.group:
+ if other.hostname:
+ return True
+ elif other.group and other.prio > self.prio:
+ return True
+ return False
+
+class SpecificData(object):
+ def __init__(self, name, specific, encoding):
+ self.name = name
+ self.specific = specific
+
+ def handle_event(self, event):
+ if event.code2str() == 'deleted':
+ return
+ try:
+ self.data = open(self.name).read()
+ except:
+ logger.error("Failed to read file %s" % self.name)
+
+class EntrySet:
+ """Entry sets deal with the host- and group-specific entries."""
+ ignore = re.compile("^(\.#.*|.*~|\\..*\\.(sw[px]))$")
+ def __init__(self, basename, path, entry_type, encoding):
+ self.path = path
+ self.entry_type = entry_type
+ self.entries = {}
+ self.metadata = default_file_metadata.copy()
+ self.infoxml = None
+ self.encoding = encoding
+ pattern = '(.*/)?%s(\.((H_(?P<hostname>\S+))|' % basename
+ pattern += '(G(?P<prio>\d+)_(?P<group>\S+))))?$'
+ self.specific = re.compile(pattern)
+
+ def get_matching(self, metadata):
+ return [item for item in self.entries.values() \
+ if item.specific.matches(metadata)]
+
+ def handle_event(self, event):
+ """Handle FAM events for the TemplateSet."""
+ action = event.code2str()
+
+ if event.filename in ['info', 'info.xml', ':info']:
+ if action in ['exists', 'created', 'changed']:
+ self.update_metadata(event)
+ elif action == 'deleted':
+ self.reset_metadata(event)
+ return
+
+ if action in ['exists', 'created']:
+ self.entry_init(event)
+ else:
+ if event.filename not in self.entries:
+ return
+ if action == 'changed':
+ self.entries[event.filename].handle_event(event)
+ elif action == 'deleted':
+ del self.entries[event.filename]
+
+ def entry_init(self, event):
+ """Handle template and info file creation."""
+ if event.filename in self.entries:
+ logger.warn("Got duplicate add for %s" % event.filename)
+ else:
+ fpath = "%s/%s" % (self.path, event.filename)
+ try:
+ spec = self.specificity_from_filename(event.filename)
+ except SpecificityError:
+ if not self.ignore.match(event.filename):
+ logger.error("Could not process filename %s; ignoring" % fpath)
+ return
+ self.entries[event.filename] = self.entry_type(fpath,
+ spec, self.encoding)
+ self.entries[event.filename].handle_event(event)
+
+ def specificity_from_filename(self, fname):
+ """Construct a specificity instance from a filename and regex."""
+ data = self.specific.match(fname)
+ if not data:
+ raise SpecificityError(fname)
+ kwargs = {}
+ if data.group('hostname'):
+ kwargs['hostname'] = data.group('hostname')
+ elif data.group('group'):
+ kwargs['group'] = data.group('group')
+ kwargs['prio'] = int(data.group('prio'))
+ else:
+ kwargs['all'] = True
+ if 'delta' in data.groupdict():
+ kwargs['delta'] = data.group('delta')
+ return Specificity(**kwargs)
+
+ def update_metadata(self, event):
+ """Process info and info.xml files for the templates."""
+ fpath = "%s/%s" % (self.path, event.filename)
+ if event.filename == 'info.xml':
+ if not self.infoxml:
+ self.infoxml = XMLSrc(fpath, True)
+ self.infoxml.HandleEvent(event)
+ elif event.filename in [':info', 'info']:
+ for line in open(fpath).readlines():
+ match = info_regex.match(line)
+ if not match:
+ logger.warning("Failed to match line: %s"%line)
+ continue
+ else:
+ mgd = match.groupdict()
+ for key, value in mgd.iteritems():
+ if value:
+ self.metadata[key] = value
+ if len(self.metadata['perms']) == 3:
+ self.metadata['perms'] = "0%s" % \
+ (self.metadata['perms'])
+
+ def reset_metadata(self, event):
+ """Reset metadata to defaults if info or info.xml removed."""
+ if event.filename == 'info.xml':
+ self.infoxml = None
+ elif event.filename == 'info':
+ self.metadata = default_file_metadata.copy()
+
+ def group_sortfunc(self, x, y):
+ """sort groups by their priority"""
+ return cmp(x.specific.prio, y.specific.prio)
+
+ def bind_info_to_entry(self, entry, metadata):
+ if not self.infoxml:
+ for key in self.metadata:
+ entry.set(key, self.metadata[key])
+ else:
+ mdata = {}
+ self.infoxml.pnode.Match(metadata, mdata)
+ if 'Info' not in mdata:
+ logger.error("Failed to set metadata for file %s" % \
+ (entry.get('name')))
+ raise PluginExecutionError
+ [entry.attrib.__setitem__(key, value) \
+ for (key, value) in mdata['Info'][None].iteritems()]
+
+ def bind_entry(self, entry, metadata):
+ """Return the appropriate interpreted template from the set of available templates."""
+ self.bind_info_to_entry(entry, metadata)
+ matching = self.get_matching(metadata)
+
+ hspec = [ent for ent in matching if ent.specific.hostname]
+ if hspec:
+ return hspec[0].bind_entry(entry, metadata)
+
+ gspec = [ent for ent in matching if ent.specific.group]
+ if gspec:
+ gspec.sort(self.group_sortfunc)
+ return gspec[-1].bind_entry(entry, metadata)
+
+ aspec = [ent for ent in matching if ent.specific.all]
+ if aspec:
+ return aspec[0].bind_entry(entry, metadata)
+
+ raise PluginExecutionError
+
+class GroupSpool(Plugin, Generator):
+ """Unified interface for handling group-specific data (e.g. .G## files)."""
+ name = 'GroupSpool'
+ __version__ = '$Id$'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+ filename_pattern = ""
+ es_child_cls = object
+ es_cls = EntrySet
+
+ def __init__(self, core, datastore):
+ Plugin.__init__(self, core, datastore)
+ Generator.__init__(self)
+ if self.data[-1] == '/':
+ self.data = self.data[:-1]
+ self.Entries['Path'] = {}
+ self.entries = {}
+ self.handles = {}
+ self.AddDirectoryMonitor('')
+ self.encoding = core.encoding
+
+ def HandleEvent(self, event):
+ """Unified FAM event handler for DirShadow."""
+ action = event.code2str()
+ if event.filename[0] == '/':
+ return
+ epath = "".join([self.data, self.handles[event.requestID],
+ event.filename])
+ if posixpath.isdir(epath):
+ ident = self.handles[event.requestID] + event.filename
+ else:
+ ident = self.handles[event.requestID][:-1]
+
+ if action in ['exists', 'created']:
+ if posixpath.isdir(epath):
+ self.AddDirectoryMonitor(epath[len(self.data):])
+ if ident not in self.entries and posixpath.isfile(epath):
+ dirpath = "".join([self.data, ident])
+ self.entries[ident] = self.es_cls(self.filename_pattern,
+ dirpath,
+ self.es_child_cls,
+ self.encoding)
+ self.Entries['Path'][ident] = self.entries[ident].bind_entry
+ if not posixpath.isdir(epath):
+ # do not pass through directory events
+ self.entries[ident].handle_event(event)
+ if action == 'changed':
+ self.entries[ident].handle_event(event)
+ elif action == 'deleted':
+ fbase = self.handles[event.requestID] + event.filename
+ if fbase in self.entries:
+ # a directory was deleted
+ del self.entries[fbase]
+ del self.Entries['Path'][fbase]
+ else:
+ self.entries[ident].handle_event(event)
+
+ def AddDirectoryMonitor(self, relative):
+ """Add new directory to FAM structures."""
+ if not relative.endswith('/'):
+ relative += '/'
+ name = self.data + relative
+ if relative not in self.handles.values():
+ if not posixpath.isdir(name):
+ print "Failed to open directory %s" % (name)
+ return
+ reqid = self.core.fam.AddMonitor(name, self)
+ self.handles[reqid] = relative
diff --git a/build/lib/Bcfg2/Server/Plugins/Account.py b/build/lib/Bcfg2/Server/Plugins/Account.py
new file mode 100644
index 000000000..e3ea58761
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Plugins/Account.py
@@ -0,0 +1,93 @@
+"""This handles authentication setup."""
+__revision__ = '$Revision$'
+
+import Bcfg2.Server.Plugin
+
+class Account(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Generator):
+ """This module generates account config files,
+ based on an internal data repo:
+ static.(passwd|group|limits.conf) -> static entries
+ dyn.(passwd|group) -> dynamic entries (usually acquired from yp or somesuch)
+ useraccess -> users to be granted login access on some hosts
+ superusers -> users to be granted root privs on all hosts
+ rootlike -> users to be granted root privs on some hosts
+
+ """
+ name = 'Account'
+ __version__ = '$Id$'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Generator.__init__(self)
+ self.Entries = {'ConfigFile':{'/etc/passwd':self.from_yp_cb,
+ '/etc/group':self.from_yp_cb,
+ '/etc/security/limits.conf':self.gen_limits_cb,
+ '/root/.ssh/authorized_keys':self.gen_root_keys_cb,
+ '/etc/sudoers':self.gen_sudoers}}
+ try:
+ self.repository = Bcfg2.Server.Plugin.DirectoryBacked(self.data, self.core.fam)
+ except:
+ self.logger.error("Failed to load repos: %s, %s" % \
+ (self.data, "%s/ssh" % (self.data)))
+ raise Bcfg2.Server.Plugin.PluginInitError
+
+ def from_yp_cb(self, entry, metadata):
+ """Build password file from cached yp data."""
+ fname = entry.attrib['name'].split('/')[-1]
+ entry.text = self.repository.entries["static.%s" % (fname)].data
+ entry.text += self.repository.entries["dyn.%s" % (fname)].data
+ perms = {'owner':'root', 'group':'root', 'perms':'0644'}
+ [entry.attrib.__setitem__(key, value) for (key, value) in \
+ perms.iteritems()]
+
+ def gen_limits_cb(self, entry, metadata):
+ """Build limits entries based on current ACLs."""
+ entry.text = self.repository.entries["static.limits.conf"].data
+ superusers = self.repository.entries["superusers"].data.split()
+ useraccess = [line.split(':') for line in \
+ self.repository.entries["useraccess"].data.split()]
+ users = [user for (user, host) in \
+ useraccess if host == metadata.hostname.split('.')[0]]
+ perms = {'owner':'root', 'group':'root', 'perms':'0600'}
+ [entry.attrib.__setitem__(key, value) for (key, value) in \
+ perms.iteritems()]
+ entry.text += "".join(["%s hard maxlogins 1024\n" % uname for uname in superusers + users])
+ if "*" not in users:
+ entry.text += "* hard maxlogins 0\n"
+
+ def gen_root_keys_cb(self, entry, metadata):
+ """Build root authorized keys file based on current ACLs."""
+ superusers = self.repository.entries['superusers'].data.split()
+ try:
+ rootlike = [line.split(':', 1) for line in \
+ self.repository.entries['rootlike'].data.split()]
+ superusers += [user for (user, host) in rootlike \
+ if host == metadata.hostname.split('.')[0]]
+ except:
+ pass
+ rdata = self.repository.entries
+ entry.text = "".join([rdata["%s.key" % user].data for user \
+ in superusers if \
+ ("%s.key" % user) in rdata])
+ perms = {'owner':'root', 'group':'root', 'perms':'0600'}
+ [entry.attrib.__setitem__(key, value) for (key, value) \
+ in perms.iteritems()]
+
+ def gen_sudoers(self, entry, metadata):
+ """Build root authorized keys file based on current ACLs."""
+ superusers = self.repository.entries['superusers'].data.split()
+ try:
+ rootlike = [line.split(':', 1) for line in \
+ self.repository.entries['rootlike'].data.split()]
+ superusers += [user for (user, host) in rootlike \
+ if host == metadata.hostname.split('.')[0]]
+ except:
+ pass
+ entry.text = self.repository.entries['static.sudoers'].data
+ entry.text += "".join(["%s ALL=(ALL) ALL\n" % uname \
+ for uname in superusers])
+ perms = {'owner':'root', 'group':'root', 'perms':'0440'}
+ [entry.attrib.__setitem__(key, value) for (key, value) \
+ in perms.iteritems()]
diff --git a/build/lib/Bcfg2/Server/Plugins/BB.py b/build/lib/Bcfg2/Server/Plugins/BB.py
new file mode 100644
index 000000000..137142b66
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Plugins/BB.py
@@ -0,0 +1,84 @@
+import lxml.etree
+import Bcfg2.Server.Plugin
+import glob
+import os
+import socket
+
+#manage boot symlinks
+ #add statistics check to do build->boot mods
+
+#map profiles: first array is not empty we replace the -p with a determined profile.
+logger = Bcfg2.Server.Plugin.logger
+
+class BBfile(Bcfg2.Server.Plugin.XMLFileBacked):
+ """Class for bb files."""
+ def Index(self):
+ """Build data into an xml object."""
+
+ try:
+ self.data = lxml.etree.XML(self.data)
+ except lxml.etree.XMLSyntaxError:
+ Bcfg2.Server.Plugin.logger.error("Failed to parse %s" % self.name)
+ return
+ self.tftppath = self.data.get('tftp', '/tftpboot')
+ self.macs = {}
+ self.users = {}
+ self.actions = {}
+ self.bootlinks = []
+
+ for node in self.data.findall('Node'):
+ iface = node.find('Interface')
+ if iface != None:
+ mac = "01-%s" % (iface.get('mac'.replace(':','-').lower()))
+ self.actions[node.get('name')] = node.get('action')
+ self.bootlinks.append((mac, node.get('action')))
+ try:
+ ip = socket.gethostbyname(node.get('name'))
+ except:
+ logger.error("failed host resolution for %s" % node.get('name'))
+
+ self.macs[node.get('name')] = (iface.get('mac'), ip)
+ else:
+ logger.error("%s" % lxml.etree.tostring(node))
+ self.users[node.get('name')] = node.get('user',"").split(':')
+
+ def enforce_bootlinks(self):
+ for mac, target in self.bootlinks:
+ path = self.tftppath + '/' + mac
+ if not os.path.islink(path):
+ logger.error("Boot file %s not a link" % path)
+ if target != os.readlink(path):
+ try:
+ os.unlink(path)
+ os.symlink(target, path)
+ except:
+ logger.error("Failed to modify link %s" % path)
+
+class BBDirectoryBacked(Bcfg2.Server.Plugin.DirectoryBacked):
+ __child__ = BBfile
+
+
+class BB(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Connector):
+ """The BB plugin maps users to machines and metadata to machines."""
+ name = 'BB'
+ version = '$Revision$'
+ deprecated = True
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Connector.__init__(self)
+ self.store = BBDirectoryBacked(self.data, core.fam)
+
+ def get_additional_data(self, metadata):
+
+ users = {}
+ for user in self.store.entries['bb.xml'].users.get(metadata.hostname.split(".")[0], []):
+ pubkeys = []
+ for fname in glob.glob('/home/%s/.ssh/*.pub'%user):
+ pubkeys.append(open(fname).read())
+
+ users[user] = pubkeys
+
+ return dict([('users', users),
+ ('macs', self.store.entries['bb.xml'].macs)])
diff --git a/build/lib/Bcfg2/Server/Plugins/Base.py b/build/lib/Bcfg2/Server/Plugins/Base.py
new file mode 100644
index 000000000..8e5ca1cd9
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Plugins/Base.py
@@ -0,0 +1,38 @@
+"""This module sets up a base list of configuration entries."""
+__revision__ = '$Revision$'
+
+import Bcfg2.Server.Plugin
+import copy
+import lxml.etree
+
+class Base(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Structure,
+ Bcfg2.Server.Plugin.XMLDirectoryBacked):
+ """This Structure is good for the pile of independent configs
+ needed for most actual systems.
+ """
+ name = 'Base'
+ __version__ = '$Id$'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+ __child__ = Bcfg2.Server.Plugin.StructFile
+
+ """Base creates independent clauses based on client metadata."""
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Structure.__init__(self)
+ try:
+ Bcfg2.Server.Plugin.XMLDirectoryBacked.__init__(self,
+ self.data,
+ self.core.fam)
+ except OSError:
+ self.logger.error("Failed to load Base repository")
+ raise Bcfg2.Server.Plugin.PluginInitError
+
+ def BuildStructures(self, metadata):
+ """Build structures for client described by metadata."""
+ ret = lxml.etree.Element("Independent", version='2.0')
+ fragments = reduce(lambda x, y: x+y,
+ [base.Match(metadata) for base
+ in self.entries.values()], [])
+ [ret.append(copy.deepcopy(frag)) for frag in fragments]
+ return [ret]
diff --git a/build/lib/Bcfg2/Server/Plugins/Bundler.py b/build/lib/Bcfg2/Server/Plugins/Bundler.py
new file mode 100644
index 000000000..47cd7e2c4
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Plugins/Bundler.py
@@ -0,0 +1,76 @@
+"""This provides bundle clauses with translation functionality."""
+__revision__ = '$Revision$'
+
+import copy
+import lxml.etree
+import re
+
+import Bcfg2.Server.Plugin
+
+try:
+ import genshi.template
+ import genshi.template.base
+ import Bcfg2.Server.Plugins.SGenshi
+ have_genshi = True
+except:
+ have_genshi = False
+
+class BundleFile(Bcfg2.Server.Plugin.StructFile):
+ def get_xml_value(self, metadata):
+ bundlename = self.name.split('/')[-1][:-4]
+ bundle = lxml.etree.Element('Bundle', name=bundlename)
+ [bundle.append(copy.deepcopy(item)) for item in self.Match(metadata)]
+ return bundle
+
+class Bundler(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Structure,
+ Bcfg2.Server.Plugin.XMLDirectoryBacked):
+ """The bundler creates dependent clauses based on the bundle/translation scheme from Bcfg1."""
+ name = 'Bundler'
+ __version__ = '$Id$'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+ patterns = re.compile('^(?P<name>.*)\.(xml|genshi)$')
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Structure.__init__(self)
+ self.encoding = core.encoding
+ self.__child__ = self.template_dispatch
+ try:
+ Bcfg2.Server.Plugin.XMLDirectoryBacked.__init__(self, self.data, self.core.fam)
+ except OSError:
+ self.logger.error("Failed to load Bundle repository")
+ raise Bcfg2.Server.Plugin.PluginInitError
+
+ def template_dispatch(self, name):
+ if name.endswith('.xml'):
+ return BundleFile(name)
+ elif name.endswith('.genshi'):
+ if have_genshi:
+ spec = Bcfg2.Server.Plugin.Specificity()
+ return Bcfg2.Server.Plugins.SGenshi.SGenshiTemplateFile(name,
+ spec,
+ self.encoding)
+
+ def BuildStructures(self, metadata):
+ """Build all structures for client (metadata)."""
+ bundleset = []
+ for bundlename in metadata.bundles:
+ entries = [item for (key, item) in self.entries.iteritems() if \
+ self.patterns.match(key).group('name') == bundlename]
+ if len(entries) == 0:
+ continue
+ elif len(entries) == 1:
+ try:
+ bundleset.append(entries[0].get_xml_value(metadata))
+ except genshi.template.base.TemplateError, t:
+ self.logger.error("Bundler: Failed to template genshi bundle %s" \
+ % (bundlename))
+ self.logger.error(t)
+ except:
+ self.logger.error("Bundler: Unexpected bundler error for %s" \
+ % (bundlename), exc_info=1)
+ else:
+ self.logger.error("Got multiple matches for bundle %s" \
+ % (bundlename))
+ return bundleset
diff --git a/build/lib/Bcfg2/Server/Plugins/Bzr.py b/build/lib/Bcfg2/Server/Plugins/Bzr.py
new file mode 100644
index 000000000..a9a5eb814
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Plugins/Bzr.py
@@ -0,0 +1,36 @@
+import Bcfg2.Server.Plugin
+from bzrlib.workingtree import WorkingTree
+from bzrlib import errors
+
+# for debugging output only
+import logging
+logger = logging.getLogger('Bcfg2.Plugins.Bzr')
+
+class Bzr(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Version):
+ """Bzr is a version plugin for dealing with Bcfg2 repos."""
+ name = 'Bzr'
+ __version__ = '$Id$'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ self.core = core
+ self.datastore = datastore
+
+ # Read revision from bcfg2 repo
+ revision = self.get_revision()
+
+ logger.debug("Initialized Bazaar plugin with directory = %(dir)s at revision = %(rev)s" % {'dir': datastore, 'rev': revision})
+
+ def get_revision(self):
+ """Read Bazaar revision information for the Bcfg2 repository."""
+ try:
+ working_tree = WorkingTree.open(self.datastore)
+ revision = str(working_tree.branch.revno())
+ if working_tree.has_changes(working_tree.basis_tree()) or working_tree.unknowns():
+ revision += "+"
+ except errors.NotBranchError:
+ logger.error("Failed to read Bazaar branch; disabling Bazaar support")
+ raise Bcfg2.Server.Plugin.PluginInitError
+ return revision
diff --git a/build/lib/Bcfg2/Server/Plugins/Cfg.py b/build/lib/Bcfg2/Server/Plugins/Cfg.py
new file mode 100644
index 000000000..dd1e792ec
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Plugins/Cfg.py
@@ -0,0 +1,165 @@
+"""This module implements a config file repository."""
+__revision__ = '$Revision$'
+
+import binascii
+import logging
+import lxml
+import os
+import re
+import tempfile
+
+import Bcfg2.Server.Plugin
+
+logger = logging.getLogger('Bcfg2.Plugins.Cfg')
+
+def process_delta(data, delta):
+ if not delta.specific.delta:
+ return data
+ if delta.specific.delta == 'cat':
+ datalines = data.split('\n')
+ for line in delta.data.split('\n'):
+ if not line:
+ continue
+ if line[0] == '+':
+ datalines.append(line[1:])
+ elif line[0] == '-':
+ if line[1:] in datalines:
+ datalines.remove(line[1:])
+ return "\n".join(datalines)
+ elif delta.specific.delta == 'diff':
+ basehandle, basename = tempfile.mkstemp()
+ basefile = open(basename, 'w')
+ basefile.write(data)
+ basefile.close()
+ os.close(basehandle)
+ dhandle, dname = tempfile.mkstemp()
+ dfile = open(dname, 'w')
+ dfile.write(delta.data)
+ dfile.close()
+ os.close(dhandle)
+ ret = os.system("patch -uf %s < %s > /dev/null 2>&1" \
+ % (basefile.name, dfile.name))
+ output = open(basefile.name, 'r').read()
+ [os.unlink(fname) for fname in [basefile.name, dfile.name]]
+ if ret >> 8 != 0:
+ raise Bcfg2.Server.Plugin.PluginExecutionError, ('delta', delta)
+ return output
+
+class CfgMatcher:
+ def __init__(self, fname):
+ name = re.escape(fname)
+ self.basefile_reg = re.compile('^(?P<basename>%s)(|\\.H_(?P<hostname>\S+)|.G(?P<prio>\d+)_(?P<group>\S+))$' % name)
+ self.delta_reg = re.compile('^(?P<basename>%s)(|\\.H_(?P<hostname>\S+)|\\.G(?P<prio>\d+)_(?P<group>\S+))\\.(?P<delta>(cat|diff))$' % name)
+ self.cat_count = fname.count(".cat")
+ self.diff_count = fname.count(".diff")
+
+ def match(self, fname):
+ if fname.count(".cat") > self.cat_count \
+ or fname.count('.diff') > self.diff_count:
+ return self.delta_reg.match(fname)
+ return self.basefile_reg.match(fname)
+
+class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet):
+ def __init__(self, basename, path, entry_type, encoding):
+ Bcfg2.Server.Plugin.EntrySet.__init__(self, basename, path,
+ entry_type, encoding)
+ self.specific = CfgMatcher(path.split('/')[-1])
+
+ def sort_by_specific(self, one, other):
+ return cmp(one.specific, other.specific)
+
+ def get_pertinent_entries(self, metadata):
+ '''return a list of all entries pertinent to a client => [base, delta1, delta2]'''
+ matching = [ent for ent in self.entries.values() if \
+ ent.specific.matches(metadata)]
+ matching.sort(self.sort_by_specific)
+ non_delta = [matching.index(m) for m in matching if not m.specific.delta]
+ if not non_delta:
+ raise Bcfg2.Server.Plugin.PluginExecutionError
+ base = min(non_delta)
+ used = matching[:base+1]
+ used.reverse()
+ return used
+
+ def bind_entry(self, entry, metadata):
+ self.bind_info_to_entry(entry, metadata)
+ used = self.get_pertinent_entries(metadata)
+ basefile = used.pop(0)
+ data = basefile.data
+ if entry.tag == 'Path':
+ entry.set('type', 'file')
+ for delta in used:
+ data = data.strip()
+ data = process_delta(data, delta)
+ if used:
+ data += '\n'
+ if entry.get('encoding') == 'base64':
+ entry.text = binascii.b2a_base64(data)
+ else:
+ entry.text = unicode(data, self.encoding)
+ if entry.text in ['', None]:
+ entry.set('empty', 'true')
+
+ def list_accept_choices(self, metadata):
+ '''return a list of candidate pull locations'''
+ used = self.get_pertinent_entries(metadata)
+ ret = []
+ if used:
+ ret.append(used[0].specific)
+ if not ret[0].hostname:
+ ret.append(Bcfg2.Server.Plugin.Specificity(hostname=metadata.hostname))
+ return ret
+
+ def build_filename(self, specific):
+ bfname = self.path + '/' + self.path.split('/')[-1]
+ if specific.all:
+ return bfname
+ elif specific.group:
+ return "%s.G%d_%s" % (bfname, specific.prio, specific.group)
+ elif specific.hostname:
+ return "%s.H_%s" % (bfname, specific.hostname)
+
+ def write_update(self, specific, new_entry, log):
+ if 'text' in new_entry:
+ name = self.build_filename(specific)
+ open(name, 'w').write(new_entry['text'])
+ if log:
+ logger.info("Wrote file %s" % name)
+ badattr = [attr for attr in ['owner', 'group', 'perms'] if attr in new_entry]
+ if badattr:
+ metadata_updates = {}
+ metadata_updates.update(self.metadata)
+ for attr in badattr:
+ metadata_updates[attr] = new_entry.get(attr)
+ if self.infoxml:
+ infoxml = lxml.etree.Element('FileInfo')
+ infotag = lxml.etree.SubElement(infoxml, 'Info')
+ [infotag.attrib.__setitem__(attr, metadata_updates[attr]) \
+ for attr in metadata_updates]
+ ofile = open(self.path + "/info.xml","w")
+ ofile.write(lxml.etree.tostring(infoxml, pretty_print=True))
+ ofile.close()
+ if log:
+ logger.info("Wrote file %s" % (self.path + "/info.xml"))
+ else:
+ infofile = open(self.path + '/:info', 'w')
+ for x in metadata_updates.iteritems():
+ infofile.write("%s: %s\n" % x)
+ infofile.close()
+ if log:
+ logger.info("Wrote file %s" % infofile.name)
+
+class Cfg(Bcfg2.Server.Plugin.GroupSpool,
+ Bcfg2.Server.Plugin.PullTarget):
+ """This generator in the configuration file repository for Bcfg2."""
+ name = 'Cfg'
+ __version__ = '$Id$'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+ es_cls = CfgEntrySet
+ es_child_cls = Bcfg2.Server.Plugin.SpecificData
+
+ def AcceptChoices(self, entry, metadata):
+ return self.entries[entry.get('name')].list_accept_choices(metadata)
+
+ def AcceptPullData(self, specific, new_entry, log):
+ return self.entries[new_entry.get('name')].write_update(specific, new_entry, log)
diff --git a/build/lib/Bcfg2/Server/Plugins/Cvs.py b/build/lib/Bcfg2/Server/Plugins/Cvs.py
new file mode 100644
index 000000000..ea898c023
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Plugins/Cvs.py
@@ -0,0 +1,47 @@
+import os
+from subprocess import Popen, PIPE
+import Bcfg2.Server.Plugin
+
+# for debugging output only
+import logging
+logger = logging.getLogger('Bcfg2.Plugins.Cvs')
+
+class Cvs(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Version):
+ """CVS is a version plugin for dealing with Bcfg2 repository."""
+ name = 'Cvs'
+ __version__ = '$Id$'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+ experimental = True
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ self.core = core
+ self.datastore = datastore
+
+ # path to cvs directory for Bcfg2 repo
+ cvs_dir = "%s/CVSROOT" % datastore
+
+ # Read revision from Bcfg2 repo
+ if os.path.isdir(cvs_dir):
+ self.get_revision()
+ else:
+ logger.error("%s is not a directory" % cvs_dir)
+ raise Bcfg2.Server.Plugin.PluginInitError
+
+ logger.debug("Initialized cvs plugin with cvs directory = %s" % cvs_dir)
+
+ def get_revision(self):
+ """Read cvs revision information for the Bcfg2 repository."""
+ try:
+ data = Popen("env LC_ALL=C cvs log",
+ shell=True,
+ cwd=self.datastore,
+ stdout=PIPE).stdout.readlines()
+ revision = data[3].strip('\n')
+ except IndexError:
+ logger.error("Failed to read cvs log; disabling cvs support")
+ logger.error('''Ran command "cvs log %s"''' % (self.datastore))
+ logger.error("Got output: %s" % data)
+ raise Bcfg2.Server.Plugin.PluginInitError
+
diff --git a/build/lib/Bcfg2/Server/Plugins/DBStats.py b/build/lib/Bcfg2/Server/Plugins/DBStats.py
new file mode 100644
index 000000000..2712cd45f
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Plugins/DBStats.py
@@ -0,0 +1,110 @@
+import binascii
+import difflib
+import logging
+import lxml.etree
+import platform
+import time
+
+try:
+ from django.core.exceptions import MultipleObjectsReturned
+except ImportError:
+ pass
+
+import Bcfg2.Server.Plugin
+import Bcfg2.Server.Reports.importscript
+from Bcfg2.Server.Reports.reports.models import Client
+import Bcfg2.Server.Reports.settings
+from Bcfg2.Server.Reports.updatefix import update_database
+# for debugging output only
+logger = logging.getLogger('Bcfg2.Plugins.DBStats')
+
+class DBStats(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.ThreadedStatistics,
+ Bcfg2.Server.Plugin.PullSource):
+ name = 'DBStats'
+ __version__ = '$Id$'
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.ThreadedStatistics.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.PullSource.__init__(self)
+ self.cpath = "%s/Metadata/clients.xml" % datastore
+ self.core = core
+ logger.debug("Searching for new models to add to the statistics database")
+ try:
+ update_database()
+ except Exception, inst:
+ logger.debug(str(inst))
+ logger.debug(str(type(inst)))
+
+ def handle_statistic(self, metadata, data):
+ newstats = data.find("Statistics")
+ newstats.set('time', time.asctime(time.localtime()))
+ # ick
+ data = lxml.etree.tostring(newstats)
+ ndx = lxml.etree.XML(data)
+ e = lxml.etree.Element('Node', name=metadata.hostname)
+ e.append(ndx)
+ container = lxml.etree.Element("ConfigStatistics")
+ container.append(e)
+
+ # FIXME need to build a metadata interface to expose a list of clients
+ start = time.time()
+ for i in [1, 2, 3]:
+ try:
+ Bcfg2.Server.Reports.importscript.load_stats(self.core.metadata.clientdata,
+ container,
+ 0,
+ logger,
+ True,
+ platform.node())
+ logger.info("Imported data for %s in %s seconds" \
+ % (metadata.hostname, time.time() - start))
+ return
+ except MultipleObjectsReturned, e:
+ logger.error("DBStats: MultipleObjectsReturned while handling %s: %s" % \
+ (metadata.hostname, e))
+ logger.error("DBStats: Data is inconsistent")
+ break
+ except:
+ logger.error("DBStats: Failed to write to db (lock); retrying",
+ exc_info=1)
+ logger.error("DBStats: Retry limit failed for %s; aborting operation" \
+ % metadata.hostname)
+
+ def GetExtra(self, client):
+ c_inst = Client.objects.filter(name=client)[0]
+ return [(a.entry.kind, a.entry.name) for a in
+ c_inst.current_interaction.extra()]
+
+ def GetCurrentEntry(self, client, e_type, e_name):
+ try:
+ c_inst = Client.objects.filter(name=client)[0]
+ except IndexError:
+ self.logger.error("Unknown client: %s" % client)
+ raise Bcfg2.Server.Plugin.PluginExecutionError
+ result = c_inst.current_interaction.bad().filter(entry__kind=e_type,
+ entry__name=e_name)
+ if not result:
+ raise Bcfg2.Server.Plugin.PluginExecutionError
+ entry = result[0]
+ ret = []
+ data = ('owner', 'group', 'perms')
+ for t in data:
+ if getattr(entry.reason, "current_%s" % t) == '':
+ ret.append(getattr(entry.reason, t))
+ else:
+ ret.append(getattr(entry.reason, "current_%s" % t))
+
+ if entry.reason.current_diff != '':
+ if entry.reason.is_binary:
+ ret.append(binascii.a2b_base64(entry.reason.current_diff))
+ else:
+ ret.append('\n'.join(difflib.restore(\
+ entry.reason.current_diff.split('\n'), 1)))
+ elif entry.reason.is_binary:
+ # If len is zero the object was too large to store
+ raise Bcfg2.Server.Plugin.PluginExecutionError
+ else:
+ ret.append(None)
+ return ret
diff --git a/build/lib/Bcfg2/Server/Plugins/Darcs.py b/build/lib/Bcfg2/Server/Plugins/Darcs.py
new file mode 100644
index 000000000..eb34a52c4
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Plugins/Darcs.py
@@ -0,0 +1,49 @@
+import os
+from subprocess import Popen, PIPE
+import Bcfg2.Server.Plugin
+
+# for debugging output only
+import logging
+logger = logging.getLogger('Bcfg2.Plugins.Darcs')
+
+class Darcs(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Version):
+ """Darcs is a version plugin for dealing with Bcfg2 repos."""
+ name = 'Darcs'
+ __version__ = '$Id$'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+ experimental = True
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Version.__init__(self)
+ self.core = core
+ self.datastore = datastore
+
+ # path to darcs directory for bcfg2 repo
+ darcs_dir = "%s/_darcs" % datastore
+
+ # Read changeset from bcfg2 repo
+ if os.path.isdir(darcs_dir):
+ self.get_revision()
+ else:
+ logger.error("%s is not present." % darcs_dir)
+ raise Bcfg2.Server.Plugin.PluginInitError
+
+ logger.debug("Initialized Darcs plugin with darcs directory = %s" % darcs_dir)
+
+ def get_revision(self):
+ """Read Darcs changeset information for the Bcfg2 repository."""
+ try:
+ data = Popen("env LC_ALL=C darcs changes",
+ shell=True,
+ cwd=self.datastore,
+ stdout=PIPE).stdout.readlines()
+ revision = data[0].strip('\n')
+ except:
+ logger.error("Failed to read darcs repository; disabling Darcs support")
+ logger.error('''Ran command "darcs changes" from directory "%s"''' % (self.datastore))
+ logger.error("Got output: %s" % data)
+ raise Bcfg2.Server.Plugin.PluginInitError
+ return revision
+
diff --git a/build/lib/Bcfg2/Server/Plugins/Decisions.py b/build/lib/Bcfg2/Server/Plugins/Decisions.py
new file mode 100644
index 000000000..1f9525a0e
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Plugins/Decisions.py
@@ -0,0 +1,64 @@
+import logging
+import lxml.etree
+import Bcfg2.Server.Plugin
+logger = logging.getLogger('Bcfg2.Plugins.Decisions')
+
+class DecisionFile(Bcfg2.Server.Plugin.SpecificData):
+ def handle_event(self, event):
+ Bcfg2.Server.Plugin.SpecificData.handle_event(self, event)
+ self.contents = lxml.etree.XML(self.data)
+
+ def get_decisions(self):
+ return [(x.get('type'), x.get('name')) for x in self.contents.xpath('.//Decision')]
+
+class DecisionSet(Bcfg2.Server.Plugin.EntrySet):
+ def __init__(self, path, fam, encoding):
+ """Container for decision specification files.
+
+ Arguments:
+ - `path`: repository path
+ - `fam`: reference to the file monitor
+ - `encoding`: XML character encoding
+
+ """
+ pattern = '(white|black)list'
+ Bcfg2.Server.Plugin.EntrySet.__init__(self, pattern, path, \
+ DecisionFile, encoding)
+ try:
+ fam.AddMonitor(path, self)
+ except OSError, e:
+ logger.error('Adding filemonitor for %s failed. '
+ 'Make sure directory exists' % path)
+ raise Bcfg2.Server.Plugin.PluginInitError(e)
+
+ def HandleEvent(self, event):
+ if event.filename != self.path:
+ return self.handle_event(event)
+
+ def GetDecisions(self, metadata, mode):
+ ret = []
+ candidates = [c for c in self.get_matching(metadata)
+ if c.name.split('/')[-1].startswith(mode)]
+ for c in candidates:
+ ret += c.get_decisions()
+ return ret
+
+class Decisions(DecisionSet,
+ Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Decision):
+ name = 'Decisions'
+ __version__ = '$Id$'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+
+ def __init__(self, core, datastore):
+ """Decisions plugins
+
+ Arguments:
+ - `core`: Bcfg2.Core instance
+ - `datastore`: File repository location
+
+ """
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Decision.__init__(self)
+ DecisionSet.__init__(self, self.data, core.fam, core.encoding)
+
diff --git a/build/lib/Bcfg2/Server/Plugins/Deps.py b/build/lib/Bcfg2/Server/Plugins/Deps.py
new file mode 100644
index 000000000..088f8cdad
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Plugins/Deps.py
@@ -0,0 +1,103 @@
+"""This plugin provides automatic dependency handling."""
+__revision__ = '$Revision$'
+
+import lxml.etree
+
+import Bcfg2.Server.Plugin
+
+class DNode(Bcfg2.Server.Plugin.INode):
+ """DNode provides supports for single predicate types for dependencies."""
+ raw = {'Group':"lambda x:'%s' in x.groups and predicate(x)"}
+ containers = ['Group']
+
+ def __init__(self, data, idict, parent=None):
+ self.data = data
+ self.contents = {}
+ if parent == None:
+ self.predicate = lambda x:True
+ else:
+ predicate = parent.predicate
+ if data.tag in self.raw.keys():
+ self.predicate = eval(self.raw[data.tag] % (data.get('name')), {'predicate':predicate})
+ else:
+ raise Exception
+ mytype = self.__class__
+ self.children = []
+ for item in data.getchildren():
+ if item.tag in self.containers:
+ self.children.append(mytype(item, idict, self))
+ else:
+ data = [(child.tag, child.get('name')) for child in item.getchildren()]
+ try:
+ self.contents[item.tag][item.get('name')] = data
+ except KeyError:
+ self.contents[item.tag] = {item.get('name'):data}
+
+class DepXMLSrc(Bcfg2.Server.Plugin.XMLSrc):
+ __node__ = DNode
+
+class Deps(Bcfg2.Server.Plugin.PrioDir,
+ Bcfg2.Server.Plugin.StructureValidator):
+ name = 'Deps'
+ __version__ = '$Id$'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+ __child__ = DepXMLSrc
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.PrioDir.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.StructureValidator.__init__(self)
+ self.cache = {}
+
+ def HandleEvent(self, event):
+ self.cache = {}
+ Bcfg2.Server.Plugin.PrioDir.HandleEvent(self, event)
+
+ def validate_structures(self, metadata, structures):
+ entries = []
+ prereqs = []
+ for structure in structures:
+ for entry in structure.getchildren():
+ if (entry.tag, entry.get('name')) not in entries \
+ and not isinstance(entry, lxml.etree._Comment):
+ entries.append((entry.tag, entry.get('name')))
+ entries.sort()
+ entries = tuple(entries)
+ gdata = list(metadata.groups)
+ gdata.sort()
+ gdata = tuple(gdata)
+ if (entries, gdata) in self.cache:
+ prereqs = self.cache[(entries, gdata)]
+ else:
+ [src.Cache(metadata) for src in self.entries.values()]
+
+ toexamine = list(entries[:])
+ while toexamine:
+ entry = toexamine.pop()
+ matching = [src for src in self.entries.values()
+ if src.cache and entry[0] in src.cache[1]
+ and entry[1] in src.cache[1][entry[0]]]
+ if len(matching) > 1:
+ prio = [int(src.priority) for src in matching]
+ if prio.count(max(prio)) > 1:
+ self.logger.error("Found conflicting %s sources with same priority for %s, pkg %s" %
+ (entry[0].lower(), metadata.hostname, entry[1]))
+ raise Bcfg2.Server.Plugin.PluginExecutionError
+ index = prio.index(max(prio))
+ matching = [matching[index]]
+
+ if not matching:
+ continue
+ elif len(matching) == 1:
+ for prq in matching[0].cache[1][entry[0]][entry[1]]:
+ if prq not in prereqs and prq not in entries:
+ toexamine.append(prq)
+ prereqs.append(prq)
+ self.cache[(entries, gdata)] = prereqs
+
+ newstruct = lxml.etree.Element("Independent")
+ for tag, name in prereqs:
+ try:
+ lxml.etree.SubElement(newstruct, tag, name=name)
+ except:
+ self.logger.error("Failed to add dep entry for %s:%s" % (tag, name))
+ structures.append(newstruct)
diff --git a/build/lib/Bcfg2/Server/Plugins/Editor.py b/build/lib/Bcfg2/Server/Plugins/Editor.py
new file mode 100644
index 000000000..bfd4d6e93
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Plugins/Editor.py
@@ -0,0 +1,73 @@
+import Bcfg2.Server.Plugin
+import re
+import lxml.etree
+
+def linesub(pattern, repl, filestring):
+ """Substitutes instances of pattern with repl in filestring."""
+ if filestring == None:
+ filestring = ''
+ output = list()
+ fileread = filestring.split('\n')
+ for line in fileread:
+ output.append(re.sub(pattern, repl, filestring))
+ return '\n'.join(output)
+
+class EditDirectives(Bcfg2.Server.Plugin.SpecificData):
+ """This object handles the editing directives."""
+ def ProcessDirectives(self, input):
+ """Processes a list of edit directives on input."""
+ temp = input
+ for directive in self.data.split('\n'):
+ directive = directive.split(',')
+ temp = linesub(directive[0], directive[1], temp)
+ return temp
+
+class EditEntrySet(Bcfg2.Server.Plugin.EntrySet):
+ def __init__(self, basename, path, entry_type, encoding):
+ self.ignore = re.compile("^(\.#.*|.*~|\\..*\\.(tmp|sw[px])|%s\.H_.*)$" %path.split('/')[-1])
+ Bcfg2.Server.Plugin.EntrySet.__init__(self, basename, path, entry_type, encoding)
+ self.inputs = dict()
+
+ def bind_entry(self, entry, metadata):
+ client = metadata.hostname
+ filename = entry.get('name')
+ permdata = {'owner':'root', 'group':'root'}
+ permdata['perms'] = '0644'
+ [entry.attrib.__setitem__(key, permdata[key]) for key in permdata]
+ entry.text = self.entries['edits'].ProcessDirectives(self.get_client_data(client))
+ if not entry.text:
+ entry.set('empty', 'true')
+ try:
+ f = open('%s/%s.H_%s' %(self.path, filename.split('/')[-1], client), 'w')
+ f.write(entry.text)
+ f.close()
+ except:
+ pass
+
+ def get_client_data(self, client):
+ return self.inputs[client]
+
+
+class Editor(Bcfg2.Server.Plugin.GroupSpool,
+ Bcfg2.Server.Plugin.Probing):
+ name = 'Editor'
+ __version__ = '$Id$'
+ __author__ = 'bcfg2-dev@mcs.anl.gov'
+ filename_pattern = 'edits'
+ es_child_cls = EditDirectives
+ es_cls = EditEntrySet
+
+ def GetProbes(self, _):
+ '''Return a set of probes for execution on client'''
+ probelist = list()
+ for name in self.entries.keys():
+ probe = lxml.etree.Element('probe')
+ probe.set('name', name)
+ probe.set('source', "Editor")
+ probe.text = "cat %s" % name
+ probelist.append(probe)
+ return probelist
+
+ def ReceiveData(self, client, datalist):
+ for data in datalist:
+ self.entries[data.get('name')].inputs[client.hostname] = data.text
diff --git a/build/lib/Bcfg2/Server/Plugins/Fossil.py b/build/lib/Bcfg2/Server/Plugins/Fossil.py
new file mode 100644
index 000000000..57d427673
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Plugins/Fossil.py
@@ -0,0 +1,52 @@
+import os
+from subprocess import Popen, PIPE
+import Bcfg2.Server.Plugin
+
+# for debugging output only
+import logging
+logger = logging.getLogger('Bcfg2.Plugins.Fossil')
+
+class Fossil(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Version):
+ """Fossil is a version plugin for dealing with Bcfg2 repos."""
+ name = 'Fossil'
+ __version__ = '$Id$'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ self.core = core
+ self.datastore = datastore
+
+ # path to fossil file for bcfg2 repo
+ fossil_file = "%s/_FOSSIL_" % datastore
+
+ # Read revision from bcfg2 repo
+ if os.path.isfile(fossil_file):
+ revision = self.get_revision()
+ elif not os.path.isdir(datastore):
+ logger.error("%s is not a directory" % datastore)
+ raise Bcfg2.Server.Plugin.PluginInitError
+ else:
+ logger.error("%s is not a file" % fossil_file)
+ raise Bcfg2.Server.Plugin.PluginInitError
+
+ logger.debug("Initialized Fossil.py plugin with %(ffile)s at revision %(frev)s" \
+ % {'ffile': fossil_file, 'frev': revision})
+
+ def get_revision(self):
+ """Read fossil revision information for the Bcfg2 repository."""
+ try:
+ data = Popen("env LC_ALL=C fossil info",
+ shell=True,
+ cwd=self.datastore,
+ stdout=PIPE).stdout.readlines()
+ revline = [line.split(': ')[1].strip() for line in data if \
+ line.split(': ')[0].strip() == 'checkout'][-1]
+ revision = revline.split(' ')[0]
+ except IndexError:
+ logger.error("Failed to read fossil info; disabling fossil support")
+ logger.error('''Ran command "fossil info" from directory "%s"''' % (self.datastore))
+ logger.error("Got output: %s" % data)
+ raise Bcfg2.Server.Plugin.PluginInitError
+ return revision
diff --git a/build/lib/Bcfg2/Server/Plugins/Git.py b/build/lib/Bcfg2/Server/Plugins/Git.py
new file mode 100644
index 000000000..aaeac12ae
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Plugins/Git.py
@@ -0,0 +1,45 @@
+"""The Git plugin provides a revision interface for Bcfg2 repos using git."""
+
+import os
+from dulwich.repo import Repo
+import Bcfg2.Server.Plugin
+
+# for debugging output only
+import logging
+logger = logging.getLogger('Bcfg2.Plugins.Git')
+
+
+class Git(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Version):
+ """Git is a version plugin for dealing with Bcfg2 repos."""
+ name = 'Git'
+ __version__ = '$Id$'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Version.__init__(self)
+ self.core = core
+ self.datastore = datastore
+
+ # path to git directory for bcfg2 repo
+ git_dir = "%s/.git" % datastore
+
+ # Read revision from bcfg2 repo
+ if os.path.isdir(git_dir):
+ self.get_revision()
+ else:
+ logger.error("%s is not a directory" % git_dir)
+ raise Bcfg2.Server.Plugin.PluginInitError
+
+ logger.debug("Initialized git plugin with git directory %s" % git_dir)
+
+ def get_revision(self):
+ """Read git revision information for the Bcfg2 repository."""
+ try:
+ repo = Repo(self.datastore)
+ revision = repo.head()
+ except:
+ logger.error("Failed to read git repository; disabling git support")
+ raise Bcfg2.Server.Plugin.PluginInitError
+ return revision
diff --git a/build/lib/Bcfg2/Server/Plugins/GroupPatterns.py b/build/lib/Bcfg2/Server/Plugins/GroupPatterns.py
new file mode 100644
index 000000000..3801a6a08
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Plugins/GroupPatterns.py
@@ -0,0 +1,117 @@
+import lxml.etree
+import re
+
+import Bcfg2.Server.Plugin
+
+class PackedDigitRange(object):
+ def __init__(self, digit_range):
+ self.sparse = list()
+ self.ranges = list()
+ for item in digit_range.split(','):
+ if '-' in item:
+ self.ranges.append(tuple([int(x) for x in item.split('-')]))
+ else:
+ self.sparse.append(int(item))
+
+ def includes(self, other):
+ iother = int(other)
+ if iother in self.sparse:
+ return True
+ for (start, end) in self.ranges:
+ if iother in xrange(start, end+1):
+ return True
+ return False
+
+class PatternMap(object):
+ range_finder = '\\[\\[[\d\-,]+\\]\\]'
+ def __init__(self, pattern, rangestr, groups):
+ self.pattern = pattern
+ self.rangestr = rangestr
+ self.groups = groups
+ if pattern != None:
+ self.re = re.compile(pattern)
+ self.process = self.process_re
+ elif rangestr != None:
+ self.process = self.process_range
+ self.re = re.compile('^' + re.subn(self.range_finder, '(\d+)', rangestr)[0])
+ dmatcher = re.compile(re.subn(self.range_finder, '\\[\\[([\d\-,]+)\\]\\]', rangestr)[0])
+ self.dranges = [PackedDigitRange(x) for x in dmatcher.match(rangestr).groups()]
+ else:
+ raise Exception
+
+ def process_range(self, name):
+ match = self.re.match(name)
+ if not match:
+ return None
+ digits = match.groups()
+ for i in range(len(digits)):
+ if not self.dranges[i].includes(digits[i]):
+ return None
+ return self.groups
+
+ def process_re(self, name):
+ match = self.re.match(name)
+ if not match:
+ return None
+ ret = list()
+ sub = match.groups()
+ for group in self.groups:
+ newg = group
+ for idx in range(len(sub)):
+ newg = newg.replace('$%s' % (idx+1), sub[idx])
+ ret.append(newg)
+ return ret
+
+class PatternFile(Bcfg2.Server.Plugin.SingleXMLFileBacked):
+ def __init__(self, filename, fam):
+ Bcfg2.Server.Plugin.SingleXMLFileBacked.__init__(self, filename, fam)
+ self.patterns = []
+
+ def Index(self):
+ self.patterns = []
+ try:
+ parsed = lxml.etree.XML(self.data)
+ except:
+ Bcfg2.Server.Plugin.logger.error("Failed to read file %s" % self.name)
+ return
+ for entry in parsed.findall('GroupPattern'):
+ try:
+ pat = None
+ rng = None
+ if entry.find('NamePattern') is not None:
+ pat = entry.find('NamePattern').text
+ if entry.find('NameRange') is not None:
+ rng = entry.find('NameRange').text
+ groups = [g.text for g in entry.findall('Group')]
+ self.patterns.append(PatternMap(pat, rng, groups))
+ except:
+ Bcfg2.Server.Plugin.logger.error(\
+ "GroupPatterns: Failed to initialize pattern %s" % \
+ (entry.get('pattern')))
+
+ def process_patterns(self, hostname):
+ ret = []
+ for pattern in self.patterns:
+ try:
+ gn = pattern.process(hostname)
+ if gn is not None:
+ ret.extend(gn)
+ except:
+ Bcfg2.Server.Plugin.logger.error(\
+ "GroupPatterns: Failed to process pattern %s for %s" % \
+ (pattern.pattern, hostname), exc_info=1)
+ return ret
+
+class GroupPatterns(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Connector):
+ name = "GroupPatterns"
+ experimental = True
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Connector.__init__(self)
+ self.config = PatternFile(self.data + '/config.xml',
+ core.fam)
+
+ def get_additional_groups(self, metadata):
+ return self.config.process_patterns(metadata.hostname)
diff --git a/build/lib/Bcfg2/Server/Plugins/Guppy.py b/build/lib/Bcfg2/Server/Plugins/Guppy.py
new file mode 100644
index 000000000..b217378d6
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Plugins/Guppy.py
@@ -0,0 +1,63 @@
+"""
+This plugin is used to trace memory leaks within the bcfg2-server
+process using Guppy. By default the remote debugger is started
+when this plugin is enabled. The debugger can be shutoff in a running
+process using "bcfg2-admin xcmd Guppy.Disable" and reenabled using
+"bcfg2-admin xcmd Guppy.Enable".
+
+To attach the console run:
+
+python -c "from guppy import hpy;hpy().monitor()"
+
+For example:
+
+# python -c "from guppy import hpy;hpy().monitor()"
+<Monitor>
+*** Connection 1 opened ***
+<Monitor> lc
+CID PID ARGV
+ 1 25063 ['/usr/sbin/bcfg2-server', '-D', '/var/run/bcfg2-server.pid']
+<Monitor> sc 1
+Remote connection 1. To return to Monitor, type <Ctrl-C> or .<RETURN>
+<Annex> int
+Remote interactive console. To return to Annex, type '-'.
+>>> hp.heap()
+...
+
+
+"""
+import re
+import Bcfg2.Server.Plugin
+
+class Guppy(Bcfg2.Server.Plugin.Plugin):
+ """Guppy is a debugging plugin to help trace memory leaks"""
+ name = 'Guppy'
+ __version__ = '$Id$'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+
+ experimental = True
+ __rmi__ = Bcfg2.Server.Plugin.Plugin.__rmi__ + ['Enable','Disable']
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+
+ self.Enable()
+
+ def Enable(self):
+ """Enable remote debugging"""
+ try:
+ from guppy.heapy import Remote
+ Remote.on()
+ except:
+ self.logger.error("Failed to create Heapy context")
+ raise Bcfg2.Server.Plugin.PluginInitError
+
+ def Disable(self):
+ """Disable remote debugging"""
+ try:
+ from guppy.heapy import Remote
+ Remote.off()
+ except:
+ self.logger.error("Failed to disable Heapy")
+ raise Bcfg2.Server.Plugin.PluginInitError
+
diff --git a/build/lib/Bcfg2/Server/Plugins/Hg.py b/build/lib/Bcfg2/Server/Plugins/Hg.py
new file mode 100644
index 000000000..3f2864a1c
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Plugins/Hg.py
@@ -0,0 +1,47 @@
+import os
+from mercurial import ui, hg
+from subprocess import Popen, PIPE
+import Bcfg2.Server.Plugin
+
+# for debugging output only
+import logging
+logger = logging.getLogger('Bcfg2.Plugins.Mercurial')
+
+class Hg(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Version):
+ """Mercurial is a version plugin for dealing with Bcfg2 repository."""
+ name = 'Mercurial'
+ __version__ = '$Id$'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+ experimental = True
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Version.__init__(self)
+ self.core = core
+ self.datastore = datastore
+
+ # path to hg directory for Bcfg2 repo
+ hg_dir = "%s/.hg" % datastore
+
+ # Read changeset from bcfg2 repo
+ if os.path.isdir(hg_dir):
+ self.get_revision()
+ else:
+ logger.error("%s is not present." % hg_dir)
+ raise Bcfg2.Server.Plugin.PluginInitError
+
+ logger.debug("Initialized hg plugin with hg directory = %s" % hg_dir)
+
+ def get_revision(self):
+ """Read hg revision information for the Bcfg2 repository."""
+ try:
+ repo_path = "%s/" % self.datastore
+ repo = hg.repository(ui.ui(), repo_path)
+ tip = repo.changelog.tip()
+ revision = repo.changelog.rev(tip)
+ except:
+ logger.error("Failed to read hg repository; disabling mercurial support")
+ raise Bcfg2.Server.Plugin.PluginInitError
+ return revision
+
diff --git a/build/lib/Bcfg2/Server/Plugins/Hostbase.py b/build/lib/Bcfg2/Server/Plugins/Hostbase.py
new file mode 100644
index 000000000..65992596d
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Plugins/Hostbase.py
@@ -0,0 +1,585 @@
+'''This file provides the Hostbase plugin. It manages dns/dhcp/nis host information'''
+__revision__ = '$Revision$'
+
+import os
+os.environ['DJANGO_SETTINGS_MODULE'] = 'Bcfg2.Server.Hostbase.settings'
+from lxml.etree import Element, SubElement
+import Bcfg2.Server.Plugin
+from Bcfg2.Server.Plugin import PluginExecutionError, PluginInitError
+from time import strftime
+from sets import Set
+from django.template import Context, loader
+from django.db import connection
+import re
+import cStringIO
+
+class Hostbase(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Structure,
+ Bcfg2.Server.Plugin.Generator):
+ """The Hostbase plugin handles host/network info."""
+ name = 'Hostbase'
+ __version__ = '$Id$'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+ filepath = '/my/adm/hostbase/files/bind'
+
+ def __init__(self, core, datastore):
+
+ self.ready = False
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Structure.__init__(self)
+ Bcfg2.Server.Plugin.Generator.__init__(self)
+ files = ['zone.tmpl', 'reversesoa.tmpl', 'named.tmpl', 'reverseappend.tmpl',
+ 'dhcpd.tmpl', 'hosts.tmpl', 'hostsappend.tmpl']
+ self.filedata = {}
+ self.dnsservers = []
+ self.dhcpservers = []
+ self.templates = {'zone':loader.get_template('zone.tmpl'),
+ 'reversesoa':loader.get_template('reversesoa.tmpl'),
+ 'named':loader.get_template('named.tmpl'),
+ 'namedviews':loader.get_template('namedviews.tmpl'),
+ 'reverseapp':loader.get_template('reverseappend.tmpl'),
+ 'dhcp':loader.get_template('dhcpd.tmpl'),
+ 'hosts':loader.get_template('hosts.tmpl'),
+ 'hostsapp':loader.get_template('hostsappend.tmpl'),
+ }
+ self.Entries['ConfigFile'] = {}
+ self.__rmi__ = ['rebuildState']
+ try:
+ self.rebuildState(None)
+ except:
+ raise PluginInitError
+
+ def FetchFile(self, entry, metadata):
+ """Return prebuilt file data."""
+ fname = entry.get('name').split('/')[-1]
+ if not fname in self.filedata:
+ raise PluginExecutionError
+ perms = {'owner':'root', 'group':'root', 'perms':'644'}
+ [entry.attrib.__setitem__(key, value) for (key, value) in perms.iteritems()]
+ entry.text = self.filedata[fname]
+
+ def BuildStructures(self, metadata):
+ """Build hostbase bundle."""
+ if metadata.hostname not in self.dnsservers or metadata.hostname not in self.dhcpservers:
+ return []
+ output = Element("Bundle", name='hostbase')
+ if metadata.hostname in self.dnsservers:
+ for configfile in self.Entries['ConfigFile']:
+ if re.search('/etc/bind/', configfile):
+ SubElement(output, "ConfigFile", name=configfile)
+ if metadata.hostname in self.dhcpservers:
+ SubElement(output, "ConfigFile", name="/etc/dhcp3/dhcpd.conf")
+ return [output]
+
+ def rebuildState(self, _):
+ """Pre-cache all state information for hostbase config files
+ callable as an XMLRPC function.
+
+ """
+ self.buildZones()
+ self.buildDHCP()
+ self.buildHosts()
+ self.buildHostsLPD()
+ self.buildPrinters()
+ self.buildNetgroups()
+ return True
+
+ def buildZones(self):
+ """Pre-build and stash zone files."""
+ cursor = connection.cursor()
+
+ cursor.execute("SELECT id, serial FROM hostbase_zone")
+ zones = cursor.fetchall()
+
+ for zone in zones:
+ # update the serial number for all zone files
+ todaydate = (strftime('%Y%m%d'))
+ try:
+ if todaydate == str(zone[1])[:8]:
+ serial = zone[1] + 1
+ else:
+ serial = int(todaydate) * 100
+ except (KeyError):
+ serial = int(todaydate) * 100
+ cursor.execute("""UPDATE hostbase_zone SET serial = \'%s\' WHERE id = \'%s\'""" % (str(serial), zone[0]))
+
+ cursor.execute("SELECT * FROM hostbase_zone WHERE zone NOT LIKE \'%%.rev\'")
+ zones = cursor.fetchall()
+
+ iplist = []
+ hosts = {}
+
+ for zone in zones:
+ zonefile = cStringIO.StringIO()
+ externalzonefile = cStringIO.StringIO()
+ cursor.execute("""SELECT n.name FROM hostbase_zone_nameservers z
+ INNER JOIN hostbase_nameserver n ON z.nameserver_id = n.id
+ WHERE z.zone_id = \'%s\'""" % zone[0])
+ nameservers = cursor.fetchall()
+ cursor.execute("""SELECT i.ip_addr FROM hostbase_zone_addresses z
+ INNER JOIN hostbase_zoneaddress i ON z.zoneaddress_id = i.id
+ WHERE z.zone_id = \'%s\'""" % zone[0])
+ addresses = cursor.fetchall()
+ cursor.execute("""SELECT m.priority, m.mx FROM hostbase_zone_mxs z
+ INNER JOIN hostbase_mx m ON z.mx_id = m.id
+ WHERE z.zone_id = \'%s\'""" % zone[0])
+ mxs = cursor.fetchall()
+ context = Context({
+ 'zone': zone,
+ 'nameservers': nameservers,
+ 'addresses': addresses,
+ 'mxs': mxs
+ })
+ zonefile.write(self.templates['zone'].render(context))
+ externalzonefile.write(self.templates['zone'].render(context))
+
+ querystring = """SELECT h.hostname, p.ip_addr,
+ n.name, c.cname, m.priority, m.mx, n.dns_view
+ FROM (((((hostbase_host h INNER JOIN hostbase_interface i ON h.id = i.host_id)
+ INNER JOIN hostbase_ip p ON i.id = p.interface_id)
+ INNER JOIN hostbase_name n ON p.id = n.ip_id)
+ INNER JOIN hostbase_name_mxs x ON n.id = x.name_id)
+ INNER JOIN hostbase_mx m ON m.id = x.mx_id)
+ LEFT JOIN hostbase_cname c ON n.id = c.name_id
+ WHERE n.name LIKE '%%%%%s'
+ AND h.status = 'active'
+ ORDER BY h.hostname, n.name, p.ip_addr
+ """ % zone[1]
+ cursor.execute(querystring)
+ zonehosts = cursor.fetchall()
+ prevhost = (None, None, None, None)
+ cnames = cStringIO.StringIO()
+ cnamesexternal = cStringIO.StringIO()
+ for host in zonehosts:
+ if not host[2].split(".", 1)[1] == zone[1]:
+ zonefile.write(cnames.getvalue())
+ externalzonefile.write(cnamesexternal.getvalue())
+ cnames = cStringIO.StringIO()
+ cnamesexternal = cStringIO.StringIO()
+ continue
+ if not prevhost[1] == host[1] or not prevhost[2] == host[2]:
+ zonefile.write(cnames.getvalue())
+ externalzonefile.write(cnamesexternal.getvalue())
+ cnames = cStringIO.StringIO()
+ cnamesexternal = cStringIO.StringIO()
+ zonefile.write("%-32s%-10s%-32s\n" %
+ (host[2].split(".", 1)[0], 'A', host[1]))
+ zonefile.write("%-32s%-10s%-3s%s.\n" %
+ ('', 'MX', host[4], host[5]))
+ if host[6] == 'global':
+ externalzonefile.write("%-32s%-10s%-32s\n" %
+ (host[2].split(".", 1)[0], 'A', host[1]))
+ externalzonefile.write("%-32s%-10s%-3s%s.\n" %
+ ('', 'MX', host[4], host[5]))
+ elif not prevhost[5] == host[5]:
+ zonefile.write("%-32s%-10s%-3s%s.\n" %
+ ('', 'MX', host[4], host[5]))
+ if host[6] == 'global':
+ externalzonefile.write("%-32s%-10s%-3s%s.\n" %
+ ('', 'MX', host[4], host[5]))
+
+ if host[3]:
+ try:
+ if host[3].split(".", 1)[1] == zone[1]:
+ cnames.write("%-32s%-10s%-32s\n" %
+ (host[3].split(".", 1)[0],
+ 'CNAME',host[2].split(".", 1)[0]))
+ if host[6] == 'global':
+ cnamesexternal.write("%-32s%-10s%-32s\n" %
+ (host[3].split(".", 1)[0],
+ 'CNAME',host[2].split(".", 1)[0]))
+ else:
+ cnames.write("%-32s%-10s%-32s\n" %
+ (host[3]+".",
+ 'CNAME',
+ host[2].split(".", 1)[0]))
+ if host[6] == 'global':
+ cnamesexternal.write("%-32s%-10s%-32s\n" %
+ (host[3]+".",
+ 'CNAME',
+ host[2].split(".", 1)[0]))
+
+ except:
+ pass
+ prevhost = host
+ zonefile.write(cnames.getvalue())
+ externalzonefile.write(cnamesexternal.getvalue())
+ zonefile.write("\n\n%s" % zone[9])
+ externalzonefile.write("\n\n%s" % zone[9])
+ self.filedata[zone[1]] = zonefile.getvalue()
+ self.filedata[zone[1] + ".external"] = externalzonefile.getvalue()
+ zonefile.close()
+ externalzonefile.close()
+ self.Entries['ConfigFile']["%s/%s" % (self.filepath, zone[1])] = self.FetchFile
+ self.Entries['ConfigFile']["%s/%s.external" % (self.filepath, zone[1])] = self.FetchFile
+
+ cursor.execute("SELECT * FROM hostbase_zone WHERE zone LIKE \'%%.rev\' AND zone <> \'.rev\'")
+ reversezones = cursor.fetchall()
+
+ reversenames = []
+ for reversezone in reversezones:
+ cursor.execute("""SELECT n.name FROM hostbase_zone_nameservers z
+ INNER JOIN hostbase_nameserver n ON z.nameserver_id = n.id
+ WHERE z.zone_id = \'%s\'""" % reversezone[0])
+ reverse_nameservers = cursor.fetchall()
+
+ context = Context({
+ 'inaddr': reversezone[1].rstrip('.rev'),
+ 'zone': reversezone,
+ 'nameservers': reverse_nameservers,
+ })
+
+ self.filedata[reversezone[1]] = self.templates['reversesoa'].render(context)
+ self.filedata[reversezone[1] + '.external'] = self.templates['reversesoa'].render(context)
+ self.filedata[reversezone[1]] += reversezone[9]
+ self.filedata[reversezone[1] + '.external'] += reversezone[9]
+
+ subnet = reversezone[1].split(".")
+ subnet.reverse()
+ reversenames.append((reversezone[1].rstrip('.rev'),".".join(subnet[1:])))
+
+ for filename in reversenames:
+ cursor.execute("""
+ SELECT DISTINCT h.hostname, p.ip_addr, n.dns_view FROM ((hostbase_host h
+ INNER JOIN hostbase_interface i ON h.id = i.host_id)
+ INNER JOIN hostbase_ip p ON i.id = p.interface_id)
+ INNER JOIN hostbase_name n ON n.ip_id = p.id
+ WHERE p.ip_addr LIKE '%s%%%%' AND h.status = 'active' ORDER BY p.ip_addr
+ """ % filename[1])
+ reversehosts = cursor.fetchall()
+ zonefile = cStringIO.StringIO()
+ externalzonefile = cStringIO.StringIO()
+ if len(filename[0].split(".")) == 2:
+ originlist = []
+ [originlist.append((".".join([ip[1].split(".")[2], filename[0]]),
+ ".".join([filename[1], ip[1].split(".")[2]])))
+ for ip in reversehosts
+ if (".".join([ip[1].split(".")[2], filename[0]]),
+ ".".join([filename[1], ip[1].split(".")[2]])) not in originlist]
+ for origin in originlist:
+ hosts = [(host[1].split("."), host[0])
+ for host in reversehosts
+ if host[1].rstrip('0123456789').rstrip('.') == origin[1]]
+ hosts_external = [(host[1].split("."), host[0])
+ for host in reversehosts
+ if (host[1].rstrip('0123456789').rstrip('.') == origin[1]
+ and host[2] == 'global')]
+ context = Context({
+ 'hosts': hosts,
+ 'inaddr': origin[0],
+ 'fileorigin': filename[0],
+ })
+ zonefile.write(self.templates['reverseapp'].render(context))
+ context = Context({
+ 'hosts': hosts_external,
+ 'inaddr': origin[0],
+ 'fileorigin': filename[0],
+ })
+ externalzonefile.write(self.templates['reverseapp'].render(context))
+ else:
+ originlist = [filename[0]]
+ hosts = [(host[1].split("."), host[0])
+ for host in reversehosts
+ if (host[1].split("."), host[0]) not in hosts]
+ hosts_external = [(host[1].split("."), host[0])
+ for host in reversehosts
+ if ((host[1].split("."), host[0]) not in hosts_external
+ and host[2] == 'global')]
+ context = Context({
+ 'hosts': hosts,
+ 'inaddr': filename[0],
+ 'fileorigin': None,
+ })
+ zonefile.write(self.templates['reverseapp'].render(context))
+ context = Context({
+ 'hosts': hosts_external,
+ 'inaddr': filename[0],
+ 'fileorigin': None,
+ })
+ externalzonefile.write(self.templates['reverseapp'].render(context))
+ self.filedata['%s.rev' % filename[0]] += zonefile.getvalue()
+ self.filedata['%s.rev.external' % filename[0]] += externalzonefile.getvalue()
+ zonefile.close()
+ externalzonefile.close()
+ self.Entries['ConfigFile']['%s/%s.rev' % (self.filepath, filename[0])] = self.FetchFile
+ self.Entries['ConfigFile']['%s/%s.rev.external' % (self.filepath, filename[0])] = self.FetchFile
+
+ ## here's where the named.conf file gets written
+ context = Context({
+ 'zones': zones,
+ 'reverses': reversenames,
+ })
+ self.filedata['named.conf'] = self.templates['named'].render(context)
+ self.Entries['ConfigFile']['/my/adm/hostbase/files/named.conf'] = self.FetchFile
+ self.filedata['named.conf.views'] = self.templates['namedviews'].render(context)
+ self.Entries['ConfigFile']['/my/adm/hostbase/files/named.conf.views'] = self.FetchFile
+
+
+ def buildDHCP(self):
+ """Pre-build dhcpd.conf and stash in the filedata table."""
+
+ # fetches all the hosts with DHCP == True
+ cursor = connection.cursor()
+ cursor.execute("""
+ SELECT hostname, mac_addr, ip_addr
+ FROM (hostbase_host h INNER JOIN hostbase_interface i ON h.id = i.host_id)
+ INNER JOIN hostbase_ip ip ON i.id = ip.interface_id
+ WHERE i.dhcp=1 AND h.status='active' AND i.mac_addr <> ''
+ AND i.mac_addr <> 'float' AND i.mac_addr <> 'unknown'
+ ORDER BY h.hostname, i.mac_addr
+ """)
+
+ dhcphosts = cursor.fetchall()
+ count = 0
+ hosts = []
+ hostdata = [dhcphosts[0][0], dhcphosts[0][1], dhcphosts[0][2]]
+ if len(dhcphosts) > 1:
+ for x in range(1, len(dhcphosts)):
+ # if an interface has 2 or more ip addresses
+ # adds the ip to the current interface
+ if hostdata[0].split(".")[0] == dhcphosts[x][0].split(".")[0] and hostdata[1] == dhcphosts[x][1]:
+ hostdata[2] = ", ".join([hostdata[2], dhcphosts[x][2]])
+ # if a host has 2 or more interfaces
+ # writes the current one and grabs the next
+ elif hostdata[0].split(".")[0] == dhcphosts[x][0].split(".")[0]:
+ hosts.append(hostdata)
+ count += 1
+ hostdata = ["-".join([dhcphosts[x][0], str(count)]), dhcphosts[x][1], dhcphosts[x][2]]
+ # new host found, writes current data to the template
+ else:
+ hosts.append(hostdata)
+ count = 0
+ hostdata = [dhcphosts[x][0], dhcphosts[x][1], dhcphosts[x][2]]
+ #makes sure the last of the data gets written out
+ if hostdata not in hosts:
+ hosts.append(hostdata)
+
+ context = Context({
+ 'hosts': hosts,
+ 'numips': len(hosts),
+ })
+
+ self.filedata['dhcpd.conf'] = self.templates['dhcp'].render(context)
+ self.Entries['ConfigFile']['/my/adm/hostbase/files/dhcpd.conf'] = self.FetchFile
+
+
+ def buildHosts(self):
+ """Pre-build and stash /etc/hosts file."""
+
+ append_data = []
+
+ cursor = connection.cursor()
+ cursor.execute("""
+ SELECT hostname FROM hostbase_host ORDER BY hostname
+ """)
+ hostbase = cursor.fetchall()
+ domains = [host[0].split(".", 1)[1] for host in hostbase]
+ domains_set = Set(domains)
+ domain_data = [(domain, domains.count(domain)) for domain in domains_set]
+ domain_data.sort()
+
+ cursor.execute("""
+ SELECT ip_addr FROM hostbase_ip ORDER BY ip_addr
+ """)
+ ips = cursor.fetchall()
+ three_octets = [ip[0].rstrip('0123456789').rstrip('.') \
+ for ip in ips]
+ three_octets_set = Set(three_octets)
+ three_octets_data = [(octet, three_octets.count(octet)) \
+ for octet in three_octets_set]
+ three_octets_data.sort()
+
+ for three_octet in three_octets_data:
+ querystring = """SELECT h.hostname, h.primary_user,
+ p.ip_addr, n.name, c.cname
+ FROM (((hostbase_host h INNER JOIN hostbase_interface i ON h.id = i.host_id)
+ INNER JOIN hostbase_ip p ON i.id = p.interface_id)
+ INNER JOIN hostbase_name n ON p.id = n.ip_id)
+ LEFT JOIN hostbase_cname c ON n.id = c.name_id
+ WHERE p.ip_addr LIKE \'%s.%%%%\' AND h.status = 'active'""" % three_octet[0]
+ cursor.execute(querystring)
+ tosort = list(cursor.fetchall())
+ tosort.sort(lambda x, y: cmp(int(x[2].split(".")[-1]), int(y[2].split(".")[-1])))
+ append_data.append((three_octet, tuple(tosort)))
+
+ two_octets = [ip.rstrip('0123456789').rstrip('.') for ip in three_octets]
+ two_octets_set = Set(two_octets)
+ two_octets_data = [(octet, two_octets.count(octet))
+ for octet in two_octets_set]
+ two_octets_data.sort()
+
+ context = Context({
+ 'domain_data': domain_data,
+ 'three_octets_data': three_octets_data,
+ 'two_octets_data': two_octets_data,
+ 'three_octets': three_octets,
+ 'num_ips': len(three_octets),
+ })
+
+ self.filedata['hosts'] = self.templates['hosts'].render(context)
+
+ for subnet in append_data:
+ ips = []
+ simple = True
+ namelist = [name.split('.', 1)[0] for name in [subnet[1][0][3]]]
+ cnamelist = []
+ if subnet[1][0][4]:
+ cnamelist.append(subnet[1][0][4].split('.', 1)[0])
+ simple = False
+ appenddata = subnet[1][0]
+ for ip in subnet[1][1:]:
+ if appenddata[2] == ip[2]:
+ namelist.append(ip[3].split('.', 1)[0])
+ if ip[4]:
+ cnamelist.append(ip[4].split('.', 1)[0])
+ simple = False
+ appenddata = ip
+ else:
+ if appenddata[0] == ip[0]:
+ simple = False
+ ips.append((appenddata[2], appenddata[0], Set(namelist),
+ cnamelist, simple, appenddata[1]))
+ appenddata = ip
+ simple = True
+ namelist = [ip[3].split('.', 1)[0]]
+ cnamelist = []
+ if ip[4]:
+ cnamelist.append(ip[4].split('.', 1)[0])
+ simple = False
+ ips.append((appenddata[2], appenddata[0], Set(namelist),
+ cnamelist, simple, appenddata[1]))
+ context = Context({
+ 'subnet': subnet[0],
+ 'ips': ips,
+ })
+ self.filedata['hosts'] += self.templates['hostsapp'].render(context)
+ self.Entries['ConfigFile']['/mcs/etc/hosts'] = self.FetchFile
+
+ def buildPrinters(self):
+ """The /mcs/etc/printers.data file"""
+ header = """# This file is automatically generated. DO NOT EDIT IT!
+#
+Name Room User Type Notes
+============== ========== ============================== ======================== ====================
+"""
+
+ cursor = connection.cursor()
+ # fetches all the printers from the database
+ cursor.execute("""
+ SELECT printq, location, primary_user, comments
+ FROM hostbase_host
+ WHERE whatami='printer' AND printq <> '' AND status = 'active'
+ ORDER BY printq
+ """)
+ printers = cursor.fetchall()
+
+ printersfile = header
+ for printer in printers:
+ # splits up the printq line and gets the
+ # correct description out of the comments section
+ temp = printer[3].split('\n')
+ for printq in re.split(',[ ]*', printer[0]):
+ if len(temp) > 1:
+ printersfile += ("%-16s%-12s%-32s%-26s%s\n" %
+ (printq, printer[1], printer[2], temp[1], temp[0]))
+ else:
+ printersfile += ("%-16s%-12s%-32s%-26s%s\n" %
+ (printq, printer[1], printer[2], '', printer[3]))
+ self.filedata['printers.data'] = printersfile
+ self.Entries['ConfigFile']['/mcs/etc/printers.data'] = self.FetchFile
+
+ def buildHostsLPD(self):
+ """Creates the /mcs/etc/hosts.lpd file"""
+
+ # this header needs to be changed to be more generic
+ header = """+@machines
++@all-machines
+achilles.ctd.anl.gov
+raven.ops.anl.gov
+seagull.hr.anl.gov
+parrot.ops.anl.gov
+condor.ops.anl.gov
+delphi.esh.anl.gov
+anlcv1.ctd.anl.gov
+anlvms.ctd.anl.gov
+olivia.ctd.anl.gov\n\n"""
+
+ cursor = connection.cursor()
+ cursor.execute("""
+ SELECT hostname FROM hostbase_host WHERE netgroup=\"red\" AND status = 'active'
+ ORDER BY hostname""")
+ redmachines = list(cursor.fetchall())
+ cursor.execute("""
+ SELECT n.name FROM ((hostbase_host h INNER JOIN hostbase_interface i ON h.id = i.host_id)
+ INNER JOIN hostbase_ip p ON i.id = p.interface_id) INNER JOIN hostbase_name n ON p.id = n.ip_id
+ WHERE netgroup=\"red\" AND n.only=1 AND h.status = 'active'
+ """)
+ redmachines.extend(list(cursor.fetchall()))
+ cursor.execute("""
+ SELECT hostname FROM hostbase_host WHERE netgroup=\"win\" AND status = 'active'
+ ORDER BY hostname""")
+ winmachines = list(cursor.fetchall())
+ cursor.execute("""
+ SELECT n.name FROM ((hostbase_host h INNER JOIN hostbase_interface i ON h.id = i.host_id)
+ INNER JOIN hostbase_ip p ON i.id = p.interface_id) INNER JOIN hostbase_name n ON p.id = n.ip_id
+ WHERE netgroup=\"win\" AND n.only=1 AND h.status = 'active'
+ """)
+ winmachines.__add__(list(cursor.fetchall()))
+ hostslpdfile = header
+ for machine in redmachines:
+ hostslpdfile += machine[0] + "\n"
+ hostslpdfile += "\n"
+ for machine in winmachines:
+ hostslpdfile += machine[0] + "\n"
+ self.filedata['hosts.lpd'] = hostslpdfile
+ self.Entries['ConfigFile']['/mcs/etc/hosts.lpd'] = self.FetchFile
+
+
+ def buildNetgroups(self):
+ """Makes the *-machine files"""
+ header = """###################################################################
+# This file lists hosts in the '%s' machine netgroup, it is
+# automatically generated. DO NOT EDIT THIS FILE!
+#
+# Number of hosts in '%s' machine netgroup: %i
+#\n\n"""
+
+ cursor = connection.cursor()
+ # fetches all the hosts that with valid netgroup entries
+ cursor.execute("""
+ SELECT h.hostname, n.name, h.netgroup, n.only FROM ((hostbase_host h
+ INNER JOIN hostbase_interface i ON h.id = i.host_id)
+ INNER JOIN hostbase_ip p ON i.id = p.interface_id)
+ INNER JOIN hostbase_name n ON p.id = n.ip_id
+ WHERE h.netgroup <> '' AND h.netgroup <> 'none' AND h.status = 'active'
+ ORDER BY h.netgroup, h.hostname
+ """)
+ nameslist = cursor.fetchall()
+ # gets the first host and initializes the hash
+ hostdata = nameslist[0]
+ netgroups = {hostdata[2]:[hostdata[0]]}
+ for row in nameslist:
+ # if new netgroup, create it
+ if row[2] not in netgroups:
+ netgroups.update({row[2]:[]})
+ # if it belongs in the netgroup and has multiple interfaces, put them in
+ if hostdata[0] == row[0] and row[3]:
+ netgroups[row[2]].append(row[1])
+ hostdata = row
+ # if its a new host, write the old one to the hash
+ elif hostdata[0] != row[0]:
+ netgroups[row[2]].append(row[0])
+ hostdata = row
+
+ for netgroup in netgroups:
+ fileoutput = cStringIO.StringIO()
+ fileoutput.write(header % (netgroup, netgroup, len(netgroups[netgroup])))
+ for each in netgroups[netgroup]:
+ fileoutput.write(each + "\n")
+ self.filedata['%s-machines' % netgroup] = fileoutput.getvalue()
+ fileoutput.close()
+ self.Entries['ConfigFile']['/my/adm/hostbase/makenets/machines/%s-machines' % netgroup] = self.FetchFile
+
+ cursor.execute("""
+ UPDATE hostbase_host SET dirty=0
+ """)
diff --git a/build/lib/Bcfg2/Server/Plugins/Metadata.py b/build/lib/Bcfg2/Server/Plugins/Metadata.py
new file mode 100644
index 000000000..81fd3e173
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Plugins/Metadata.py
@@ -0,0 +1,809 @@
+"""This file stores persistent metadata for the Bcfg2 Configuration Repository."""
+
+__revision__ = '$Revision$'
+
+import copy
+import fcntl
+import lxml.etree
+import os
+import os.path
+import socket
+import time
+import Bcfg2.Server.Plugin
+
+class MetadataConsistencyError(Exception):
+ """This error gets raised when metadata is internally inconsistent."""
+ pass
+
+class MetadataRuntimeError(Exception):
+ """This error is raised when the metadata engine is called prior to reading enough data."""
+ pass
+
+class ClientMetadata(object):
+ """This object contains client metadata."""
+ def __init__(self, client, profile, groups, bundles,
+ aliases, addresses, categories, uuid, password, query):
+ self.hostname = client
+ self.profile = profile
+ self.bundles = bundles
+ self.aliases = aliases
+ self.addresses = addresses
+ self.groups = groups
+ self.categories = categories
+ self.uuid = uuid
+ self.password = password
+ self.connectors = []
+ self.query = query
+
+ def inGroup(self, group):
+ """Test to see if client is a member of group."""
+ return group in self.groups
+
+ def group_in_category(self, category):
+ for grp in self.query.all_groups_in_category(category):
+ if grp in self.groups:
+ return grp
+ return ''
+
+class MetadataQuery(object):
+ def __init__(self, by_name, get_clients, by_groups, by_profiles, all_groups, all_groups_in_category):
+ # resolver is set later
+ self.by_name = by_name
+ self.names_by_groups = by_groups
+ self.names_by_profiles = by_profiles
+ self.all_clients = get_clients
+ self.all_groups = all_groups
+ self.all_groups_in_category = all_groups_in_category
+
+ def by_groups(self, groups):
+ return [self.by_name(name) for name in self.names_by_groups(groups)]
+
+ def by_profiles(self, profiles):
+ return [self.by_name(name) for name in self.names_by_profiles(profiles)]
+
+ def all(self):
+ return [self.by_name(name) for name in self.all_clients()]
+
+class Metadata(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Metadata,
+ Bcfg2.Server.Plugin.Statistics):
+ """This class contains data for bcfg2 server metadata."""
+ __version__ = '$Id$'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+ name = "Metadata"
+
+ def __init__(self, core, datastore, watch_clients=True):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Metadata.__init__(self)
+ Bcfg2.Server.Plugin.Statistics.__init__(self)
+ if watch_clients:
+ try:
+ core.fam.AddMonitor("%s/%s" % (self.data, "groups.xml"), self)
+ core.fam.AddMonitor("%s/%s" % (self.data, "clients.xml"), self)
+ except:
+ print("Unable to add file monitor for groups.xml or clients.xml")
+ raise Bcfg2.Server.Plugin.PluginInitError
+ self.states = {}
+ if watch_clients:
+ self.states = {"groups.xml":False, "clients.xml":False}
+ self.addresses = {}
+ self.auth = dict()
+ self.clients = {}
+ self.aliases = {}
+ self.groups = {}
+ self.cgroups = {}
+ self.public = []
+ self.private = []
+ self.profiles = []
+ self.categories = {}
+ self.bad_clients = {}
+ self.uuid = {}
+ self.secure = []
+ self.floating = []
+ self.passwords = {}
+ self.session_cache = {}
+ self.clientdata = None
+ self.clientdata_original = None
+ self.default = None
+ self.pdirty = False
+ self.extra = {'groups.xml':[], 'clients.xml':[]}
+ self.password = core.password
+ self.query = MetadataQuery(core.build_metadata,
+ lambda:self.clients.keys(),
+ self.get_client_names_by_groups,
+ self.get_client_names_by_profiles,
+ self.get_all_group_names,
+ self.get_all_groups_in_category)
+
+ @classmethod
+ def init_repo(cls, repo, groups, os_selection, clients):
+ path = '%s/%s' % (repo, cls.name)
+ os.makedirs(path)
+ open("%s/Metadata/groups.xml" %
+ repo, "w").write(groups % os_selection)
+ open("%s/Metadata/clients.xml" %
+ repo, "w").write(clients % socket.getfqdn())
+
+ def get_groups(self):
+ '''return groups xml tree'''
+ groups_tree = lxml.etree.parse(self.data + "/groups.xml")
+ root = groups_tree.getroot()
+ return root
+
+ def search_group(self, group_name, tree):
+ """Find a group."""
+ for node in tree.findall("//Group"):
+ if node.get("name") == group_name:
+ return node
+ for child in node:
+ if child.tag == "Alias" and child.attrib["name"] == group_name:
+ return node
+ return None
+
+ def add_group(self, group_name, attribs):
+ """Add group to groups.xml."""
+ tree = lxml.etree.parse(self.data + "/groups.xml")
+ root = tree.getroot()
+ element = lxml.etree.Element("Group", name=group_name)
+ for key, val in attribs.iteritems():
+ element.set(key, val)
+ node = self.search_group(group_name, tree)
+ if node != None:
+ self.logger.error("Group \"%s\" already exists" % (group_name))
+ raise MetadataConsistencyError
+ root.append(element)
+ group_tree = open(self.data + "/groups.xml","w")
+ fd = group_tree.fileno()
+ while True:
+ try:
+ fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ except IOError:
+ continue
+ else:
+ break
+ tree.write(group_tree)
+ fcntl.lockf(fd, fcntl.LOCK_UN)
+ group_tree.close()
+
+ def update_group(self, group_name, attribs):
+ """Update a groups attributes."""
+ tree = lxml.etree.parse(self.data + "/groups.xml")
+ root = tree.getroot()
+ node = self.search_group(group_name, tree)
+ if node == None:
+ self.logger.error("Group \"%s\" not found" % (group_name))
+ raise MetadataConsistencyError
+ node.attrib.update(attribs)
+ group_tree = open(self.data + "/groups.xml","w")
+ fd = group_tree.fileno()
+ while True:
+ try:
+ fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ except IOError:
+ continue
+ else:
+ break
+ tree.write(group_tree)
+ fcntl.lockf(fd, fcntl.LOCK_UN)
+ group_tree.close()
+
+ def remove_group(self, group_name):
+ """Remove a group."""
+ tree = lxml.etree.parse(self.data + "/groups.xml")
+ root = tree.getroot()
+ node = self.search_group(group_name, tree)
+ if node == None:
+ self.logger.error("Client \"%s\" not found" % (group_name))
+ raise MetadataConsistencyError
+ root.remove(node)
+ group_tree = open(self.data + "/groups.xml","w")
+ fd = group_tree.fileno()
+ while True:
+ try:
+ fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ except IOError:
+ continue
+ else:
+ break
+ tree.write(group_tree)
+ fcntl.lockf(fd, fcntl.LOCK_UN)
+ group_tree.close()
+
+ def add_bundle(self, bundle_name):
+ """Add bundle to groups.xml."""
+ tree = lxml.etree.parse(self.data + "/groups.xml")
+ root = tree.getroot()
+ element = lxml.etree.Element("Bundle", name=bundle_name)
+ node = self.search_group(bundle_name, tree)
+ if node != None:
+ self.logger.error("Bundle \"%s\" already exists" % (bundle_name))
+ raise MetadataConsistencyError
+ root.append(element)
+ group_tree = open(self.data + "/groups.xml","w")
+ fd = group_tree.fileno()
+ while True:
+ try:
+ fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ except IOError:
+ continue
+ else:
+ break
+ tree.write(group_tree)
+ fcntl.lockf(fd, fcntl.LOCK_UN)
+ group_tree.close()
+
+ def remove_bundle(self, bundle_name):
+ """Remove a bundle."""
+ tree = lxml.etree.parse(self.data + "/groups.xml")
+ root = tree.getroot()
+ node = self.search_group(bundle_name, tree)
+ if node == None:
+ self.logger.error("Bundle \"%s\" not found" % (bundle_name))
+ raise MetadataConsistencyError
+ root.remove(node)
+ group_tree = open(self.data + "/groups.xml","w")
+ fd = group_tree.fileno()
+ while True:
+ try:
+ fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ except IOError:
+ continue
+ else:
+ break
+ tree.write(group_tree)
+ fcntl.lockf(fd, fcntl.LOCK_UN)
+ group_tree.close()
+
+ def search_client(self, client_name, tree):
+ """Find a client."""
+ for node in tree.findall("//Client"):
+ if node.get("name") == client_name:
+ return node
+ for child in node:
+ if child.tag == "Alias" and child.attrib["name"] == client_name:
+ return node
+ return None
+
+ def add_client(self, client_name, attribs):
+ """Add client to clients.xml."""
+ tree = lxml.etree.parse(self.data + "/clients.xml")
+ root = tree.getroot()
+ element = lxml.etree.Element("Client", name=client_name)
+ for key, val in attribs.iteritems():
+ element.set(key, val)
+ node = self.search_client(client_name, tree)
+ if node != None:
+ self.logger.error("Client \"%s\" already exists" % (client_name))
+ raise MetadataConsistencyError
+ root.append(element)
+ client_tree = open(self.data + "/clients.xml","w")
+ fd = client_tree.fileno()
+ while True:
+ try:
+ fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ except IOError:
+ continue
+ else:
+ break
+ tree.write(client_tree)
+ fcntl.lockf(fd, fcntl.LOCK_UN)
+ client_tree.close()
+
+ def update_client(self, client_name, attribs):
+ """Update a clients attributes."""
+ tree = lxml.etree.parse(self.data + "/clients.xml")
+ root = tree.getroot()
+ node = self.search_client(client_name, tree)
+ if node == None:
+ self.logger.error("Client \"%s\" not found" % (client_name))
+ raise MetadataConsistencyError
+ node.attrib.update(attribs)
+ client_tree = open(self.data + "/clients.xml","w")
+ fd = client_tree.fileno()
+ while True:
+ try:
+ fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ except IOError:
+ continue
+ else:
+ break
+ tree.write(client_tree)
+ fcntl.lockf(fd, fcntl.LOCK_UN)
+ client_tree.close()
+
+ def HandleEvent(self, event):
+ """Handle update events for data files."""
+ filename = event.filename.split('/')[-1]
+ if filename in ['groups.xml', 'clients.xml']:
+ dest = filename
+ elif filename in reduce(lambda x, y:x+y, self.extra.values()):
+ if event.code2str() == 'exists':
+ return
+ dest = [key for key, value in self.extra.iteritems() if filename in value][0]
+ else:
+ return
+ if event.code2str() == 'endExist':
+ return
+ try:
+ xdata = lxml.etree.parse("%s/%s" % (self.data, dest))
+ except lxml.etree.XMLSyntaxError:
+ self.logger.error('Failed to parse %s' % (dest))
+ return
+ included = [ent.get('href') for ent in \
+ xdata.findall('./{http://www.w3.org/2001/XInclude}include')]
+ xdata_original = copy.deepcopy(xdata)
+ if included:
+ for name in included:
+ if name not in self.extra[dest]:
+ self.core.fam.AddMonitor("%s/%s" % (self.data, name), self)
+ self.extra[dest].append(name)
+ try:
+ xdata.xinclude()
+ except lxml.etree.XIncludeError:
+ self.logger.error("Failed to process XInclude for file %s" % dest)
+
+ if dest == 'clients.xml':
+ self.clients = {}
+ self.aliases = {}
+ self.raliases = {}
+ self.bad_clients = {}
+ self.secure = []
+ self.floating = []
+ self.addresses = {}
+ self.raddresses = {}
+ self.clientdata_original = xdata_original
+ self.clientdata = xdata
+ for client in xdata.findall('.//Client'):
+ clname = client.get('name').lower()
+ if 'address' in client.attrib:
+ caddr = client.get('address')
+ if caddr in self.addresses:
+ self.addresses[caddr].append(clname)
+ else:
+ self.addresses[caddr] = [clname]
+ if clname not in self.raddresses:
+ self.raddresses[clname] = set()
+ self.raddresses[clname].add(caddr)
+ if 'auth' in client.attrib:
+ self.auth[client.get('name')] = client.get('auth',
+ 'cert+password')
+ if 'uuid' in client.attrib:
+ self.uuid[client.get('uuid')] = clname
+ if client.get('secure', 'false') == 'true':
+ self.secure.append(clname)
+ if client.get('location', 'fixed') == 'floating':
+ self.floating.append(clname)
+ if 'password' in client.attrib:
+ self.passwords[clname] = client.get('password')
+ for alias in [alias for alias in client.findall('Alias')\
+ if 'address' in alias.attrib]:
+ if alias.get('address') in self.addresses:
+ self.addresses[alias.get('address')].append(clname)
+ else:
+ self.addresses[alias.get('address')] = [clname]
+ if clname not in self.raddresses:
+ self.raddresses[clname] = set()
+ self.raddresses[clname].add(alias.get('address'))
+ self.clients.update({clname: client.get('profile')})
+ [self.aliases.update({alias.get('name'): clname}) \
+ for alias in client.findall('Alias')]
+ self.raliases[clname] = set()
+ [self.raliases[clname].add(alias.get('name')) for alias \
+ in client.findall('Alias')]
+ elif dest == 'groups.xml':
+ self.public = []
+ self.private = []
+ self.profiles = []
+ self.groups = {}
+ grouptmp = {}
+ self.categories = {}
+ for group in xdata.xpath('//Groups/Group') \
+ + xdata.xpath('Group'):
+ grouptmp[group.get('name')] = tuple([[item.get('name') for item in group.findall(spec)]
+ for spec in ['./Bundle', './Group']])
+ grouptmp[group.get('name')][1].append(group.get('name'))
+ if group.get('default', 'false') == 'true':
+ self.default = group.get('name')
+ if group.get('profile', 'false') == 'true':
+ self.profiles.append(group.get('name'))
+ if group.get('public', 'false') == 'true':
+ self.public.append(group.get('name'))
+ elif group.get('public', 'true') == 'false':
+ self.private.append(group.get('name'))
+ if 'category' in group.attrib:
+ self.categories[group.get('name')] = group.get('category')
+ for group in grouptmp:
+ # self.groups[group] => (bundles, groups, categories)
+ self.groups[group] = (set(), set(), {})
+ tocheck = [group]
+ group_cat = self.groups[group][2]
+ while tocheck:
+ now = tocheck.pop()
+ self.groups[group][1].add(now)
+ if now in grouptmp:
+ (bundles, groups) = grouptmp[now]
+ for ggg in [ggg for ggg in groups if ggg not in self.groups[group][1]]:
+ if ggg not in self.categories or \
+ self.categories[ggg] not in self.groups[group][2]:
+ self.groups[group][1].add(ggg)
+ tocheck.append(ggg)
+ if ggg in self.categories:
+ group_cat[self.categories[ggg]] = ggg
+ elif ggg in self.categories:
+ self.logger.info("Group %s: %s cat-suppressed %s" % \
+ (group,
+ group_cat[self.categories[ggg]],
+ ggg))
+ [self.groups[group][0].add(bund) for bund in bundles]
+ self.states[dest] = True
+ if False not in self.states.values():
+ # check that all client groups are real and complete
+ real = self.groups.keys()
+ for client in self.clients.keys():
+ if self.clients[client] not in self.profiles:
+ self.logger.error("Client %s set as nonexistent or incomplete group %s" \
+ % (client, self.clients[client]))
+ self.logger.error("Removing client mapping for %s" % (client))
+ self.bad_clients[client] = self.clients[client]
+ del self.clients[client]
+ for bclient in self.bad_clients.keys():
+ if self.bad_clients[bclient] in self.profiles:
+ self.logger.info("Restored profile mapping for client %s" % bclient)
+ self.clients[bclient] = self.bad_clients[bclient]
+ del self.bad_clients[bclient]
+
+ def set_profile(self, client, profile, addresspair):
+ """Set group parameter for provided client."""
+ self.logger.info("Asserting client %s profile to %s" % (client, profile))
+ if False in self.states.values():
+ raise MetadataRuntimeError
+ if profile not in self.public:
+ self.logger.error("Failed to set client %s to private group %s" % (client, profile))
+ raise MetadataConsistencyError
+ if client in self.clients:
+ self.logger.info("Changing %s group from %s to %s" % (client, self.clients[client], profile))
+ cli = self.clientdata_original.xpath('.//Client[@name="%s"]' % (client))
+ cli[0].set('profile', profile)
+ else:
+ self.logger.info("Creating new client: %s, profile %s" % \
+ (client, profile))
+ if addresspair in self.session_cache:
+ # we are working with a uuid'd client
+ lxml.etree.SubElement(self.clientdata_original.getroot(),
+ 'Client',
+ name=self.session_cache[addresspair][1],
+ uuid=client, profile=profile,
+ address=addresspair[0])
+ else:
+ lxml.etree.SubElement(self.clientdata_original.getroot(),
+ 'Client', name=client,
+ profile=profile)
+ self.clients[client] = profile
+ self.write_back_clients()
+
+ def write_back_clients(self):
+ """Write changes to client.xml back to disk."""
+ try:
+ datafile = open("%s/%s" % (self.data, 'clients.xml.new'), 'w')
+ except IOError:
+ self.logger.error("Failed to write clients.xml.new")
+ raise MetadataRuntimeError
+ # prep data
+ dataroot = self.clientdata_original.getroot()
+ if hasattr(dataroot, 'iter'):
+ items = dataroot.iter()
+ else:
+ items = dataroot.getchildren()
+ for item in items:
+ # no items have text data of any sort
+ item.tail = None
+ item.text = None
+ newcontents = lxml.etree.tostring(dataroot, pretty_print=True)
+
+ fd = datafile.fileno()
+ while self.locked(fd) == True:
+ pass
+ try:
+ datafile.write(newcontents)
+ except:
+ fcntl.lockf(fd, fcntl.LOCK_UN)
+ self.logger.error("Metadata: Failed to write new clients data to clients.xml.new", exc_info=1)
+ os.unlink("%s/%s" % (self.data, "clients.xml.new"))
+ raise MetadataRuntimeError
+ datafile.close()
+
+ # check if clients.xml is a symlink
+ clientsxml = "%s/%s" % (self.data, 'clients.xml')
+ if os.path.islink(clientsxml):
+ clientsxml = os.readlink(clientsxml)
+
+ try:
+ os.rename("%s/%s" % (self.data, 'clients.xml.new'), clientsxml)
+ except:
+ self.logger.error("Metadata: Failed to rename clients.xml.new")
+ raise MetadataRuntimeError
+
+ def locked(self, fd):
+ try:
+ fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ except IOError:
+ return True
+ return False
+
+ def resolve_client(self, addresspair):
+ """Lookup address locally or in DNS to get a hostname."""
+ if addresspair in self.session_cache:
+ (stamp, uuid) = self.session_cache[addresspair]
+ if time.time() - stamp < 90:
+ return self.session_cache[addresspair][1]
+ address = addresspair[0]
+ if address in self.addresses:
+ if len(self.addresses[address]) != 1:
+ self.logger.error("Address %s has multiple reverse assignments; a uuid must be used" % (address))
+ raise MetadataConsistencyError
+ return self.addresses[address][0]
+ try:
+ cname = socket.gethostbyaddr(address)[0].lower()
+ if cname in self.aliases:
+ return self.aliases[cname]
+ return cname
+ except socket.herror:
+ warning = "address resolution error for %s" % (address)
+ self.logger.warning(warning)
+ raise MetadataConsistencyError
+
+ def get_initial_metadata(self, client):
+ """Return the metadata for a given client."""
+ if False in self.states.values():
+ raise MetadataRuntimeError
+ client = client.lower()
+ if client in self.aliases:
+ client = self.aliases[client]
+ if client in self.clients:
+ profile = self.clients[client]
+ (bundles, groups, categories) = self.groups[profile]
+ else:
+ if self.default == None:
+ self.logger.error("Cannot set group for client %s; no default group set" % (client))
+ raise MetadataConsistencyError
+ self.set_profile(client, self.default, (None, None))
+ profile = self.default
+ [bundles, groups, categories] = self.groups[self.default]
+ aliases = self.raliases.get(client, set())
+ addresses = self.raddresses.get(client, set())
+ newgroups = set(groups)
+ newbundles = set(bundles)
+ newcategories = {}
+ newcategories.update(categories)
+ if client in self.passwords:
+ password = self.passwords[client]
+ else:
+ password = None
+ uuids = [item for item, value in self.uuid.iteritems() if value == client]
+ if uuids:
+ uuid = uuids[0]
+ else:
+ uuid = None
+ for group in self.cgroups.get(client, []):
+ if group in self.groups:
+ nbundles, ngroups, ncategories = self.groups[group]
+ else:
+ nbundles, ngroups, ncategories = ([], [group], {})
+ [newbundles.add(b) for b in nbundles if b not in newbundles]
+ [newgroups.add(g) for g in ngroups if g not in newgroups]
+ newcategories.update(ncategories)
+ return ClientMetadata(client, profile, newgroups, newbundles, aliases,
+ addresses, newcategories, uuid, password, self.query)
+
+ def get_all_group_names(self):
+ all_groups = set()
+ [all_groups.update(g[1]) for g in self.groups.values()]
+ return all_groups
+
+ def get_all_groups_in_category(self, category):
+ all_groups = set()
+ [all_groups.add(g) for g in self.categories \
+ if self.categories[g] == category]
+ return all_groups
+
+ def get_client_names_by_profiles(self, profiles):
+ return [client for client, profile in self.clients.iteritems() \
+ if profile in profiles]
+
+ def get_client_names_by_groups(self, groups):
+ gprofiles = [profile for profile in self.profiles if \
+ self.groups[profile][1].issuperset(groups)]
+ return self.get_client_names_by_profiles(gprofiles)
+
+ def merge_additional_groups(self, imd, groups):
+ for group in groups:
+ if group in self.categories and \
+ self.categories[group] in imd.categories:
+ continue
+ nb, ng, _ = self.groups.get(group, (list(), [group], dict()))
+ for b in nb:
+ if b not in imd.bundles:
+ imd.bundles.add(b)
+ for g in ng:
+ if g not in imd.groups:
+ if g in self.categories and \
+ self.categories[g] in imd.categories:
+ continue
+ if g in self.private:
+ self.logger.error("Refusing to add dynamic membership in private group %s for client %s" % (g, imd.hostname))
+ continue
+ imd.groups.add(g)
+
+ def merge_additional_data(self, imd, source, data):
+ if not hasattr(imd, source):
+ setattr(imd, source, data)
+ imd.connectors.append(source)
+
+ def validate_client_address(self, client, addresspair):
+ """Check address against client."""
+ address = addresspair[0]
+ if client in self.floating:
+ self.debug_log("Client %s is floating" % client)
+ return True
+ if address in self.addresses:
+ if client in self.addresses[address]:
+ self.debug_log("Client %s matches address %s" % (client, address))
+ return True
+ else:
+ self.logger.error("Got request for non-float client %s from %s" \
+ % (client, address))
+ return False
+ resolved = self.resolve_client(addresspair)
+ if resolved.lower() == client.lower():
+ return True
+ else:
+ self.logger.error("Got request for %s from incorrect address %s" \
+ % (client, address))
+ self.logger.error("Resolved to %s" % resolved)
+ return False
+
+ def AuthenticateConnection(self, cert, user, password, address):
+ """This function checks auth creds."""
+ if cert:
+ id_method = 'cert'
+ certinfo = dict([x[0] for x in cert['subject']])
+ # look at cert.cN
+ client = certinfo['commonName']
+ self.debug_log("Got cN %s; using as client name" % client)
+ auth_type = self.auth.get(client, 'cert+password')
+ elif user == 'root':
+ id_method = 'address'
+ try:
+ client = self.resolve_client(address)
+ except MetadataConsistencyError:
+ self.logger.error("Client %s failed to resolve; metadata problem" % (address[0]))
+ return False
+ else:
+ id_method = 'uuid'
+ # user maps to client
+ if user not in self.uuid:
+ client = user
+ self.uuid[user] = user
+ else:
+ client = self.uuid[user]
+
+ # we have the client name
+ self.debug_log("Authenticating client %s" % client)
+
+ # next we validate the address
+ if id_method == 'uuid':
+ addr_is_valid = True
+ else:
+ addr_is_valid = self.validate_client_address(client, address)
+
+ if not addr_is_valid:
+ return False
+
+ if id_method == 'cert' and auth_type != 'cert+password':
+ # we are done if cert+password not required
+ return True
+
+ if client not in self.passwords:
+ if client in self.secure:
+ self.logger.error("Client %s in secure mode but has no password" % (address[0]))
+ return False
+ if password != self.password:
+ self.logger.error("Client %s used incorrect global password" % (address[0]))
+ return False
+ if client not in self.secure:
+ if client in self.passwords:
+ plist = [self.password, self.passwords[client]]
+ else:
+ plist = [self.password]
+ if password not in plist:
+ self.logger.error("Client %s failed to use either allowed password" % \
+ (address[0]))
+ return False
+ else:
+ # client in secure mode and has a client password
+ if password != self.passwords[client]:
+ self.logger.error("Client %s failed to use client password in secure mode" % \
+ (address[0]))
+ return False
+ # populate the session cache
+ if user != 'root':
+ self.session_cache[address] = (time.time(), client)
+ return True
+
+ def process_statistics(self, meta, _):
+ """Hook into statistics interface to toggle clients in bootstrap mode."""
+ client = meta.hostname
+ if client in self.auth and self.auth[client] == 'bootstrap':
+ self.logger.info("Asserting client %s auth mode to cert" % client)
+ cli = self.clientdata_original.xpath('.//Client[@name="%s"]' \
+ % (client))
+ cli[0].set('auth', 'cert')
+ self.write_back_clients()
+
+ def viz(self, hosts, bundles, key, colors):
+ """Admin mode viz support."""
+ groups_tree = lxml.etree.parse(self.data + "/groups.xml")
+ try:
+ groups_tree.xinclude()
+ except lxml.etree.XincludeError:
+ self.logger.error("Failed to process XInclude for file %s" % dest)
+ groups = groups_tree.getroot()
+ categories = {'default':'grey83'}
+ instances = {}
+ viz_str = ""
+ egroups = groups.findall("Group") + groups.findall('.//Groups/Group')
+ for group in egroups:
+ if not group.get('category') in categories:
+ categories[group.get('category')] = colors.pop()
+ group.set('color', categories[group.get('category')])
+ if None in categories:
+ del categories[None]
+ if hosts:
+ clients = self.clients
+ for client, profile in clients.iteritems():
+ if profile in instances:
+ instances[profile].append(client)
+ else:
+ instances[profile] = [client]
+ for profile, clist in instances.iteritems():
+ clist.sort()
+ viz_str += '''\t"%s-instances" [ label="%s", shape="record" ];\n''' \
+ % (profile, '|'.join(clist))
+ viz_str += '''\t"%s-instances" -> "group-%s";\n''' \
+ % (profile, profile)
+ if bundles:
+ bundles = []
+ [bundles.append(bund.get('name')) \
+ for bund in groups.findall('.//Bundle') \
+ if bund.get('name') not in bundles]
+ bundles.sort()
+ for bundle in bundles:
+ viz_str += '''\t"bundle-%s" [ label="%s", shape="septagon"];\n''' \
+ % (bundle, bundle)
+ gseen = []
+ for group in egroups:
+ if group.get('profile', 'false') == 'true':
+ style = "filled, bold"
+ else:
+ style = "filled"
+ gseen.append(group.get('name'))
+ viz_str += '\t"group-%s" [label="%s", style="%s", fillcolor=%s];\n' % \
+ (group.get('name'), group.get('name'), style, group.get('color'))
+ if bundles:
+ for bundle in group.findall('Bundle'):
+ viz_str += '\t"group-%s" -> "bundle-%s";\n' % \
+ (group.get('name'), bundle.get('name'))
+ gfmt = '\t"group-%s" [label="%s", style="filled", fillcolor="grey83"];\n'
+ for group in egroups:
+ for parent in group.findall('Group'):
+ if parent.get('name') not in gseen:
+ viz_str += gfmt % (parent.get('name'), parent.get('name'))
+ gseen.append(parent.get("name"))
+ viz_str += '\t"group-%s" -> "group-%s" ;\n' % \
+ (group.get('name'), parent.get('name'))
+ if key:
+ for category in categories:
+ viz_str += '''\t"''' + category + '''" [label="''' + category + \
+ '''", shape="record", style="filled", fillcolor=''' + \
+ categories[category] + '''];\n'''
+ return viz_str
diff --git a/build/lib/Bcfg2/Server/Plugins/NagiosGen.py b/build/lib/Bcfg2/Server/Plugins/NagiosGen.py
new file mode 100644
index 000000000..cd6f843fb
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Plugins/NagiosGen.py
@@ -0,0 +1,114 @@
+'''This module implements a Nagios configuration generator'''
+
+import glob
+import logging
+import os
+import re
+import socket
+
+import Bcfg2.Server.Plugin
+
+LOGGER = logging.getLogger('Bcfg2.Plugins.NagiosGen')
+
+host_config_fmt = \
+'''
+define host{
+ host_name %s
+ alias %s
+ address %s
+'''
+
+class NagiosGen(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Generator):
+ """NagiosGen is a Bcfg2 plugin that dynamically generates
+ Nagios configuration file based on Bcfg2 data.
+ """
+ name = 'NagiosGen'
+ __version__ = '0.6'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Generator.__init__(self)
+ self.Entries = {'Path':
+ {'/etc/nagiosgen.status' : self.createhostconfig,
+ '/etc/nagios/nagiosgen.cfg': self.createserverconfig}}
+
+ self.client_attrib = {'encoding':'ascii',
+ 'owner':'root',
+ 'group':'root',
+ 'type':'file',
+ 'perms':'0400'}
+ self.server_attrib = {'encoding':'ascii',
+ 'owner':'nagios',
+ 'group':'nagios',
+ 'type':'file',
+ 'perms':'0440'}
+
+ def createhostconfig(self, entry, metadata):
+ """Build host specific configuration file."""
+ host_address = socket.gethostbyname(metadata.hostname)
+ host_groups = [grp for grp in metadata.groups if \
+ os.path.isfile('%s/%s-group.cfg' % (self.data, grp))]
+ host_config = host_config_fmt % \
+ (metadata.hostname, metadata.hostname, host_address)
+
+ if host_groups:
+ host_config += ' hostgroups %s\n' % (",".join(host_groups))
+
+ xtra = None
+ if hasattr(metadata, 'Properties') and \
+ 'NagiosGen.xml' in metadata.Properties:
+ for q in (metadata.hostname, 'default'):
+ xtra = metadata.Properties['NagiosGen.xml'].data.find(q)
+ if xtra is not None:
+ break
+
+ if xtra is not None:
+ directives = list(xtra)
+ for item in directives:
+ host_config += ' %-32s %s\n' % (item.tag, item.text)
+
+ else:
+ host_config += ' use default\n'
+
+ host_config += '}\n'
+ entry.text = host_config
+ [entry.attrib.__setitem__(key, value) for \
+ (key, value) in self.client_attrib.iteritems()]
+ try:
+ fileh = open("%s/%s-host.cfg" % \
+ (self.data, metadata.hostname), 'w')
+ fileh.write(host_config)
+ fileh.close()
+ except OSError, ioerr:
+ LOGGER.error("Failed to write %s/%s-host.cfg" % \
+ (self.data, metadata.hostname))
+ LOGGER.error(ioerr)
+
+ def createserverconfig(self, entry, _):
+ """Build monolithic server configuration file."""
+ host_configs = glob.glob('%s/*-host.cfg' % self.data)
+ group_configs = glob.glob('%s/*-group.cfg' % self.data)
+ host_data = ""
+ group_data = ""
+ for host in host_configs:
+ hostfile = open(host, 'r')
+ host_data += hostfile.read()
+ hostfile.close()
+ for group in group_configs:
+ group_name = re.sub("(-group.cfg|.*/(?=[^/]+))", "", group)
+ if host_data.find(group_name) != -1:
+ groupfile = open(group, 'r')
+ group_data += groupfile.read()
+ groupfile.close()
+ entry.text = group_data + host_data
+ [entry.attrib.__setitem__(key, value) for \
+ (key, value) in self.server_attrib.iteritems()]
+ try:
+ fileh = open("%s/nagiosgen.cfg" % (self.data), 'w')
+ fileh.write(group_data + host_data)
+ fileh.close()
+ except OSError, ioerr:
+ LOGGER.error("Failed to write %s/nagiosgen.cfg" % (self.data))
+ LOGGER.error(ioerr)
diff --git a/build/lib/Bcfg2/Server/Plugins/Ohai.py b/build/lib/Bcfg2/Server/Plugins/Ohai.py
new file mode 100644
index 000000000..0f7c7187f
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Plugins/Ohai.py
@@ -0,0 +1,79 @@
+import lxml.etree
+import os
+
+import logging
+logger = logging.getLogger('Bcfg2.Plugins.Ohai')
+
+import Bcfg2.Server.Plugin
+
+try:
+ import json
+except:
+ # FIXME: can be removed when server prereq is >= python 2.6
+ # necessary for clients without the in-tree json module
+ try:
+ import simplejson as json
+ except:
+ logger.error("Unable to load any json modules. Make sure "
+ "python-simplejson is installed.")
+ raise ImportError
+
+
+class OhaiCache(object):
+
+ def __init__(self, dirname):
+ self.dirname = dirname
+ self.cache = dict()
+
+ def __setitem__(self, item, value):
+ if value == None:
+ # simply return if the client returned nothing
+ return
+ self.cache[item] = json.loads(value)
+ file("%s/%s.json" % (self.dirname, item), 'w').write(value)
+
+ def __getitem__(self, item):
+ if item not in self.cache:
+ try:
+ data = open("%s/%s.json" % (self.dirname, item)).read()
+ except:
+ raise KeyError, item
+ self.cache[item] = json.loads(data)
+ return self.cache[item]
+
+ def __iter__(self):
+ data = self.cache.keys()
+ data.extend([x[:-5] for x in os.listdir(self.dirname)])
+ return data.__iter__()
+
+
+class Ohai(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Probing,
+ Bcfg2.Server.Plugin.Connector):
+ """The Ohai plugin is used to detect information about the client operating system."""
+ name = 'Ohai'
+ experimental = True
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Probing.__init__(self)
+ Bcfg2.Server.Plugin.Connector.__init__(self)
+ self.probe = lxml.etree.Element('probe', name='Ohai', source='Ohai',
+ interpreter='/bin/sh')
+ self.probe.text = 'ohai'
+ try:
+ os.stat(self.data)
+ except:
+ os.makedirs(self.data)
+ self.cache = OhaiCache(self.data)
+
+ def GetProbes(self, meta, force=False):
+ return [self.probe]
+
+ def ReceiveData(self, meta, datalist):
+ self.cache[meta.hostname] = datalist[0].text
+
+ def get_additional_data(self, meta):
+ if meta.hostname in self.cache:
+ return self.cache[meta.hostname]
+ return dict()
diff --git a/build/lib/Bcfg2/Server/Plugins/Packages.py b/build/lib/Bcfg2/Server/Plugins/Packages.py
new file mode 100644
index 000000000..194330723
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Plugins/Packages.py
@@ -0,0 +1,869 @@
+import cPickle
+import copy
+import gzip
+import tarfile
+import glob
+import logging
+import lxml.etree
+import os
+import re
+import sys
+import urllib2
+
+# FIXME: Remove when server python dep is 2.5 or greater
+if sys.version_info >= (2, 5):
+ from hashlib import md5
+else:
+ from md5 import md5
+
+import Bcfg2.Logger
+import Bcfg2.Server.Plugin
+
+# build sources.list?
+# caching for yum
+
+class NoData(Exception):
+ pass
+
+class SomeData(Exception):
+ pass
+
+logger = logging.getLogger('Packages')
+
+def source_from_xml(xsource):
+ ret = dict([('rawurl', False), ('url', False)])
+ for key, tag in [('groups', 'Group'), ('components', 'Component'),
+ ('arches', 'Arch'), ('blacklist', 'Blacklist'),
+ ('whitelist', 'Whitelist')]:
+ ret[key] = [item.text for item in xsource.findall(tag)]
+ # version and component need to both contain data for sources to work
+ try:
+ ret['version'] = xsource.find('Version').text
+ except:
+ ret['version'] = 'placeholder'
+ if ret['components'] == []:
+ ret['components'] = ['placeholder']
+ try:
+ if xsource.find('Recommended').text in ['True', 'true']:
+ ret['recommended'] = True
+ else:
+ ret['recommended'] = False
+ except:
+ ret['recommended'] = False
+ if xsource.find('RawURL') is not None:
+ ret['rawurl'] = xsource.find('RawURL').text
+ if not ret['rawurl'].endswith('/'):
+ ret['rawurl'] += '/'
+ else:
+ ret['url'] = xsource.find('URL').text
+ if not ret['url'].endswith('/'):
+ ret['url'] += '/'
+ return ret
+
+def _fetch_url(url):
+ if '@' in url:
+ mobj = re.match('(\w+://)([^:]+):([^@]+)@(.*)$', url)
+ if not mobj:
+ raise ValueError
+ user = mobj.group(2)
+ passwd = mobj.group(3)
+ url = mobj.group(1) + mobj.group(4)
+ auth = urllib2.HTTPBasicAuthHandler(urllib2.HTTPPasswordMgrWithDefaultRealm())
+ auth.add_password(None, url, user, passwd)
+ urllib2.install_opener(urllib2.build_opener(auth))
+ return urllib2.urlopen(url).read()
+
+class Source(object):
+ basegroups = []
+
+ def __init__(self, basepath, url, version, arches, components, groups, rawurl,
+ blacklist, whitelist, recommended):
+ self.basepath = basepath
+ self.version = version
+ self.components = components
+ self.url = url
+ self.rawurl = rawurl
+ self.groups = groups
+ self.arches = arches
+ self.deps = dict()
+ self.provides = dict()
+ self.blacklist = set(blacklist)
+ self.whitelist = set(whitelist)
+ self.cachefile = '%s/cache-%s' % (self.basepath, md5(cPickle.dumps( \
+ [self.version, self.components, self.url, \
+ self.rawurl, self.groups, self.arches])).hexdigest())
+ self.recommended = recommended
+ self.url_map = []
+
+ def load_state(self):
+ pass
+
+ def setup_data(self, force_update=False):
+ should_read = True
+ should_download = False
+ if os.path.exists(self.cachefile):
+ try:
+ self.load_state()
+ should_read = False
+ except:
+ logger.error("Cachefile %s load failed; falling back to file read"\
+ % (self.cachefile))
+ if should_read:
+ try:
+ self.read_files()
+ except:
+ logger.error("Packages: File read failed; falling back to file download")
+ should_download = True
+
+ if should_download or force_update:
+ try:
+ self.update()
+ self.read_files()
+ except:
+ logger.error("Failed to update source", exc_info=1)
+
+ def get_urls(self):
+ return []
+ urls = property(get_urls)
+
+ def get_files(self):
+ return [self.escape_url(url) for url in self.urls]
+ files = property(get_files)
+
+ def get_vpkgs(self, meta):
+ agroups = ['global'] + [a for a in self.arches if a in meta.groups]
+ vdict = dict()
+ for agrp in agroups:
+ for key, value in self.provides[agrp].iteritems():
+ if key not in vdict:
+ vdict[key] = set(value)
+ else:
+ vdict[key].update(value)
+ return vdict
+
+ def escape_url(self, url):
+ return "%s/%s" % (self.basepath, url.replace('/', '@'))
+
+ def file_init(self):
+ pass
+
+ def read_files(self):
+ pass
+
+ def update(self):
+ for url in self.urls:
+ logger.info("Packages: Updating %s" % url)
+ fname = self.escape_url(url)
+ try:
+ data = _fetch_url(url)
+ except ValueError:
+ logger.error("Packages: Bad url string %s" % url)
+ continue
+ except urllib2.HTTPError, h:
+ logger.error("Packages: Failed to fetch url %s. code=%s" \
+ % (url, h.code))
+ continue
+ file(fname, 'w').write(data)
+
+ def applies(self, metadata):
+ return len([g for g in self.basegroups if g in metadata.groups]) != 0 and \
+ len([g for g in metadata.groups if g in self.groups]) \
+ == len(self.groups)
+
+ def get_arches(self, metadata):
+ return ['global'] + [a for a in self.arches if a in metadata.groups]
+
+ def get_deps(self, metadata, pkgname):
+ for arch in self.get_arches(metadata):
+ if pkgname in self.deps[arch]:
+ return self.deps[arch][pkgname]
+ raise NoData
+
+ def get_provides(self, metadata, required):
+ for arch in self.get_arches(metadata):
+ if required in self.provides[arch]:
+ return self.provides[arch][required]
+ raise NoData
+
+ def is_package(self, metadata, _):
+ return False
+
+ def get_url_info(self):
+ return {'groups': copy.copy(self.groups), \
+ 'urls': [copy.deepcopy(url) for url in self.url_map]}
+
+class YUMSource(Source):
+ xp = '{http://linux.duke.edu/metadata/common}'
+ rp = '{http://linux.duke.edu/metadata/rpm}'
+ rpo = '{http://linux.duke.edu/metadata/repo}'
+ fl = '{http://linux.duke.edu/metadata/filelists}'
+ basegroups = ['yum', 'redhat', 'centos', 'fedora']
+ ptype = 'yum'
+
+ def __init__(self, basepath, url, version, arches, components, groups,
+ rawurl, blacklist, whitelist, recommended):
+ Source.__init__(self, basepath, url, version, arches, components,
+ groups, rawurl, blacklist, whitelist, recommended)
+ if not self.rawurl:
+ self.baseurl = self.url + '%(version)s/%(component)s/%(arch)s/'
+ else:
+ self.baseurl = self.rawurl
+ self.packages = dict()
+ self.deps = dict([('global', dict())])
+ self.provides = dict([('global', dict())])
+ self.filemap = dict([(x, dict()) for x in ['global'] + self.arches])
+ self.needed_paths = set()
+ self.file_to_arch = dict()
+
+ def save_state(self):
+ cache = file(self.cachefile, 'wb')
+ cPickle.dump((self.packages, self.deps, self.provides,
+ self.filemap, self.url_map), cache, 2)
+ cache.close()
+
+ def load_state(self):
+ data = file(self.cachefile)
+ (self.packages, self.deps, self.provides, \
+ self.filemap, self.url_map) = cPickle.load(data)
+
+ def get_urls(self):
+ surls = list()
+ self.url_map = []
+ for arch in self.arches:
+ usettings = [{'version': self.version, 'component':comp,
+ 'arch':arch} for comp in self.components]
+ for setting in usettings:
+ setting['groups'] = self.groups
+ setting['url'] = self.baseurl % setting
+ self.url_map.append(copy.deepcopy(setting))
+ surls.append((arch, [setting['url'] for setting in usettings]))
+ urls = []
+ for (sarch, surl_list) in surls:
+ for surl in surl_list:
+ if not surl.endswith('/'):
+ surl += '/'
+ rmdurl = surl + 'repodata/repomd.xml'
+ try:
+ repomd = _fetch_url(rmdurl)
+ xdata = lxml.etree.XML(repomd)
+ except ValueError:
+ logger.error("Packages: Bad url string %s" % rmdurl)
+ continue
+ except urllib2.HTTPError, h:
+ logger.error("Packages: Failed to fetch url %s. code=%s" \
+ % (rmdurl, h.code))
+ continue
+ except:
+ logger.error("Failed to process url %s" % rmdurl)
+ continue
+ for elt in xdata.findall(self.rpo + 'data'):
+ if elt.get('type') not in ['filelists', 'primary']:
+ continue
+ floc = elt.find(self.rpo + 'location')
+ fullurl = surl + floc.get('href')
+ urls.append(fullurl)
+ self.file_to_arch[self.escape_url(fullurl)] = sarch
+ return urls
+ urls = property(get_urls)
+
+ def read_files(self):
+ for fname in [f for f in self.files if f.endswith('primary.xml.gz')]:
+ farch = self.file_to_arch[fname]
+ fdata = lxml.etree.parse(fname).getroot()
+ self.parse_primary(fdata, farch)
+ for fname in [f for f in self.files if f.endswith('filelists.xml.gz')]:
+ farch = self.file_to_arch[fname]
+ fdata = lxml.etree.parse(fname).getroot()
+ self.parse_filelist(fdata, farch)
+ # merge data
+ sdata = self.packages.values()
+ self.packages['global'] = copy.deepcopy(sdata.pop())
+ while sdata:
+ self.packages['global'].intersection(sdata.pop())
+
+ for key in self.packages:
+ if key == 'global':
+ continue
+ self.packages[key] = self.packages['global'].difference(self.packages[key])
+ self.save_state()
+
+ def parse_filelist(self, data, arch):
+ if arch not in self.filemap:
+ self.filemap[arch] = dict()
+ for pkg in data.findall(self.fl + 'package'):
+ for fentry in [fe for fe in pkg.findall(self.fl + 'file') \
+ if fe.text in self.needed_paths]:
+ if fentry.text in self.filemap[arch]:
+ self.filemap[arch][fentry.text].add(pkg.get('name'))
+ else:
+ self.filemap[arch][fentry.text] = set([pkg.get('name')])
+
+ def parse_primary(self, data, arch):
+ if arch not in self.packages:
+ self.packages[arch] = set()
+ if arch not in self.deps:
+ self.deps[arch] = dict()
+ if arch not in self.provides:
+ self.provides[arch] = dict()
+ for pkg in data.getchildren():
+ if not pkg.tag.endswith('package'):
+ continue
+ pkgname = pkg.find(self.xp + 'name').text
+ self.packages[arch].add(pkgname)
+
+ pdata = pkg.find(self.xp + 'format')
+ pre = pdata.find(self.rp + 'requires')
+ self.deps[arch][pkgname] = set()
+ for entry in pre.getchildren():
+ self.deps[arch][pkgname].add(entry.get('name'))
+ if entry.get('name').startswith('/'):
+ self.needed_paths.add(entry.get('name'))
+ pro = pdata.find(self.rp + 'provides')
+ if pro != None:
+ for entry in pro.getchildren():
+ prov = entry.get('name')
+ if prov not in self.provides[arch]:
+ self.provides[arch][prov] = list()
+ self.provides[arch][prov].append(pkgname)
+
+ def is_package(self, metadata, item):
+ arch = [a for a in self.arches if a in metadata.groups]
+ if not arch:
+ return False
+ return (item in self.packages['global'] or item in self.packages[arch[0]]) and \
+ item not in self.blacklist and \
+ ((len(self.whitelist) == 0) or item in self.whitelist)
+
+ def get_vpkgs(self, metadata):
+ rv = Source.get_vpkgs(self, metadata)
+ for arch, fmdata in self.filemap.iteritems():
+ if arch not in metadata.groups and arch != 'global':
+ continue
+ for filename, pkgs in fmdata.iteritems():
+ rv[filename] = pkgs
+ return rv
+
+ def filter_unknown(self, unknown):
+ filtered = set([u for u in unknown if u.startswith('rpmlib')])
+ unknown.difference_update(filtered)
+
+class APTSource(Source):
+ basegroups = ['apt', 'debian', 'ubuntu', 'nexenta']
+ ptype = 'deb'
+
+ def __init__(self, basepath, url, version, arches, components, groups,
+ rawurl, blacklist, whitelist, recommended):
+ Source.__init__(self, basepath, url, version, arches, components, groups,
+ rawurl, blacklist, whitelist, recommended)
+ self.pkgnames = set()
+
+ self.url_map = [{'rawurl': self.rawurl, 'url': self.url, 'version': self.version, \
+ 'components': self.components, 'arches': self.arches, 'groups': self.groups}]
+
+ def save_state(self):
+ cache = file(self.cachefile, 'wb')
+ cPickle.dump((self.pkgnames, self.deps, self.provides),
+ cache, 2)
+ cache.close()
+
+ def load_state(self):
+ data = file(self.cachefile)
+ self.pkgnames, self.deps, self.provides = cPickle.load(data)
+
+ def filter_unknown(self, unknown):
+ filtered = set([u for u in unknown if u.startswith('choice')])
+ unknown.difference_update(filtered)
+
+ def get_urls(self):
+ if not self.rawurl:
+ return ["%sdists/%s/%s/binary-%s/Packages.gz" % \
+ (self.url, self.version, part, arch) for part in self.components \
+ for arch in self.arches]
+ else:
+ return ["%sPackages.gz" % (self.rawurl)]
+ urls = property(get_urls)
+
+ def read_files(self):
+ bdeps = dict()
+ bprov = dict()
+ if self.recommended:
+ depfnames = ['Depends', 'Pre-Depends', 'Recommends']
+ else:
+ depfnames = ['Depends', 'Pre-Depends']
+ for fname in self.files:
+ if not self.rawurl:
+ barch = [x for x in fname.split('@') if x.startswith('binary-')][0][7:]
+ else:
+ # RawURL entries assume that they only have one <Arch></Arch>
+ # element and that it is the architecture of the source.
+ barch = self.arches[0]
+ if barch not in bdeps:
+ bdeps[barch] = dict()
+ bprov[barch] = dict()
+ try:
+ reader = gzip.GzipFile(fname)
+ except:
+ print("Failed to read file %s" % fname)
+ raise
+ for line in reader.readlines():
+ words = line.strip().split(':', 1)
+ if words[0] == 'Package':
+ pkgname = words[1].strip().rstrip()
+ self.pkgnames.add(pkgname)
+ bdeps[barch][pkgname] = []
+ elif words[0] in depfnames:
+ vindex = 0
+ for dep in words[1].split(','):
+ if '|' in dep:
+ cdeps = [re.sub('\s+', '', re.sub('\(.*\)', '', cdep)) for cdep in dep.split('|')]
+ dyn_dname = "choice-%s-%s-%s" % (pkgname, barch, vindex)
+ vindex += 1
+ bdeps[barch][pkgname].append(dyn_dname)
+ bprov[barch][dyn_dname] = set(cdeps)
+ else:
+ raw_dep = re.sub('\(.*\)', '', dep)
+ raw_dep = raw_dep.rstrip().strip()
+ bdeps[barch][pkgname].append(raw_dep)
+ elif words[0] == 'Provides':
+ for pkg in words[1].split(','):
+ dname = pkg.rstrip().strip()
+ if dname not in bprov[barch]:
+ bprov[barch][dname] = set()
+ bprov[barch][dname].add(pkgname)
+
+ self.deps['global'] = dict()
+ self.provides['global'] = dict()
+ for barch in bdeps:
+ self.deps[barch] = dict()
+ self.provides[barch] = dict()
+ for pkgname in self.pkgnames:
+ pset = set()
+ for barch in bdeps:
+ if pkgname not in bdeps[barch]:
+ bdeps[barch][pkgname] = []
+ pset.add(tuple(bdeps[barch][pkgname]))
+ if len(pset) == 1:
+ self.deps['global'][pkgname] = pset.pop()
+ else:
+ for barch in bdeps:
+ self.deps[barch][pkgname] = bdeps[barch][pkgname]
+ provided = set()
+ for bprovided in bprov.values():
+ provided.update(set(bprovided))
+ for prov in provided:
+ prset = set()
+ for barch in bprov:
+ if prov not in bprov[barch]:
+ continue
+ prset.add(tuple(bprov[barch].get(prov, ())))
+ if len(prset) == 1:
+ self.provides['global'][prov] = prset.pop()
+ else:
+ for barch in bprov:
+ self.provides[barch][prov] = bprov[barch].get(prov, ())
+ self.save_state()
+
+ def is_package(self, _, pkg):
+ return pkg in self.pkgnames and \
+ pkg not in self.blacklist and \
+ (len(self.whitelist) == 0 or pkg in self.whitelist)
+
+class PACSource(Source):
+ basegroups = ['arch', 'parabola']
+ ptype = 'pacman'
+
+ def __init__(self, basepath, url, version, arches, components, groups,
+ rawurl, blacklist, whitelist, recommended):
+ Source.__init__(self, basepath, url, version, arches, components, groups,
+ rawurl, blacklist, whitelist, recommended)
+ self.pkgnames = set()
+
+ self.url_map = [{'rawurl': self.rawurl, 'url': self.url, 'version': self.version, \
+ 'components': self.components, 'arches': self.arches, 'groups': self.groups}]
+
+ def save_state(self):
+ cache = file(self.cachefile, 'wb')
+ cPickle.dump((self.pkgnames, self.deps, self.provides),
+ cache, 2)
+ cache.close()
+
+ def load_state(self):
+ data = file(self.cachefile)
+ self.pkgnames, self.deps, self.provides = cPickle.load(data)
+
+ def filter_unknown(self, unknown):
+ filtered = set([u for u in unknown if u.startswith('choice')])
+ unknown.difference_update(filtered)
+
+ def get_urls(self):
+ if not self.rawurl:
+ return ["%s/%s/os/%s/%s.db.tar.gz" % \
+ (self.url, part, arch, part) for part in self.components \
+ for arch in self.arches]
+ else:
+ raise Exception("PACSource : RAWUrl not supported (yet)")
+ urls = property(get_urls)
+
+
+ def read_files(self):
+ bdeps = dict()
+ bprov = dict()
+
+ if self.recommended:
+ depfnames = ['Depends', 'Pre-Depends', 'Recommends']
+ else:
+ depfnames = ['Depends', 'Pre-Depends']
+
+ for fname in self.files:
+ if not self.rawurl:
+ barch = [x for x in fname.split('@') if x in self.arches][0]
+ else:
+ # RawURL entries assume that they only have one <Arch></Arch>
+ # element and that it is the architecture of the source.
+ barch = self.arches[0]
+
+ if barch not in bdeps:
+ bdeps[barch] = dict()
+ bprov[barch] = dict()
+ try:
+ print "try to read : " + fname
+ tar = tarfile.open(fname, "r")
+ reader = gzip.GzipFile(fname)
+ except:
+ print("Failed to read file %s" % fname)
+ raise
+
+ for tarinfo in tar:
+ if tarinfo.isdir():
+ self.pkgnames.add(tarinfo.name.rsplit("-",2)[0])
+ print "added : " + tarinfo.name.rsplit("-",2)[0]
+ tar.close()
+
+ self.deps['global'] = dict()
+ self.provides['global'] = dict()
+ for barch in bdeps:
+ self.deps[barch] = dict()
+ self.provides[barch] = dict()
+ for pkgname in self.pkgnames:
+ pset = set()
+ for barch in bdeps:
+ if pkgname not in bdeps[barch]:
+ bdeps[barch][pkgname] = []
+ pset.add(tuple(bdeps[barch][pkgname]))
+ if len(pset) == 1:
+ self.deps['global'][pkgname] = pset.pop()
+ else:
+ for barch in bdeps:
+ self.deps[barch][pkgname] = bdeps[barch][pkgname]
+ provided = set()
+ for bprovided in bprov.values():
+ provided.update(set(bprovided))
+ for prov in provided:
+ prset = set()
+ for barch in bprov:
+ if prov not in bprov[barch]:
+ continue
+ prset.add(tuple(bprov[barch].get(prov, ())))
+ if len(prset) == 1:
+ self.provides['global'][prov] = prset.pop()
+ else:
+ for barch in bprov:
+ self.provides[barch][prov] = bprov[barch].get(prov, ())
+ self.save_state()
+
+ def is_package(self, _, pkg):
+ return pkg in self.pkgnames and \
+ pkg not in self.blacklist and \
+ (len(self.whitelist) == 0 or pkg in self.whitelist)
+
+class Packages(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.StructureValidator,
+ Bcfg2.Server.Plugin.Generator,
+ Bcfg2.Server.Plugin.Connector):
+ name = 'Packages'
+ conflicts = ['Pkgmgr']
+ experimental = True
+ __rmi__ = Bcfg2.Server.Plugin.Plugin.__rmi__ + ['Refresh', 'Reload']
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.StructureValidator.__init__(self)
+ Bcfg2.Server.Plugin.Generator.__init__(self)
+ Bcfg2.Server.Plugin.Connector.__init__(self)
+ self.cachepath = self.data + '/cache'
+ self.sentinels = set()
+ self.sources = []
+ self.disableResolver = False
+ self.disableMetaData = False
+ self.virt_pkgs = dict()
+
+ if not os.path.exists(self.cachepath):
+ # create cache directory if needed
+ os.makedirs(self.cachepath)
+ self._load_config()
+
+ def get_relevant_groups(self, meta):
+ mgrps = list(set([g for g in meta.groups for s in self.get_matching_sources(meta) \
+ if g in s.basegroups or g in s.groups or g in s.arches]))
+ mgrps.sort()
+ return tuple(mgrps)
+
+ def build_vpkgs_entry(self, meta):
+ # build single entry for all matching sources
+ mgrps = self.get_relevant_groups(meta)
+ vpkgs = dict()
+ for source in self.get_matching_sources(meta):
+ s_vpkgs = source.get_vpkgs(meta)
+ for name, prov_set in s_vpkgs.iteritems():
+ if name not in vpkgs:
+ vpkgs[name] = set(prov_set)
+ else:
+ vpkgs[name].update(prov_set)
+ return vpkgs
+
+ def get_matching_sources(self, meta):
+ return [s for s in self.sources if s.applies(meta)]
+
+ def HandlesEntry(self, entry, metadata):
+ if [x for x in metadata.groups if x in self.sentinels] \
+ and entry.tag == 'Package':
+ return True
+ return False
+
+ def HandleEntry(self, entry, metadata):
+ entry.set('version', 'auto')
+ for source in self.sources:
+ if [x for x in metadata.groups if x in source.basegroups]:
+ entry.set('type', source.ptype)
+
+ def complete(self, meta, input_requirements, debug=False):
+ '''Build the transitive closure of all package dependencies
+
+ Arguments:
+ meta - client metadata instance
+ packages - set of package names
+ debug - print out debug information for the decision making process
+ returns => (set(packages), set(unsatisfied requirements), package type)
+ '''
+ sources = self.get_matching_sources(meta)
+ # reverse list so that priorities correspond to file order
+ sources.reverse()
+ if len(sources) == 0:
+ self.logger.error("Packages: No matching sources for client %s; improper group memberships?" % (meta.hostname))
+ return set(), set(), 'failed'
+ ptype = set([s.ptype for s in sources])
+ if len(ptype) < 1:
+ return set(), set(), 'failed'
+
+ # setup vpkg cache
+ pgrps = self.get_relevant_groups(meta)
+ if pgrps not in self.virt_pkgs:
+ self.virt_pkgs[pgrps] = self.build_vpkgs_entry(meta)
+ vpkg_cache = self.virt_pkgs[pgrps]
+
+ # unclassified is set of unsatisfied requirements (may be pkg for vpkg)
+ unclassified = set(input_requirements)
+ vpkgs = set()
+ both = set()
+ pkgs = set(input_requirements)
+
+ packages = set()
+ examined = set()
+ unknown = set()
+
+ final_pass = False
+ really_done = False
+ # do while unclassified or vpkgs or both or pkgs
+ while unclassified or pkgs or both or final_pass:
+ #print len(unclassified), len(pkgs), len(both), len(vpkgs), final_pass
+ if really_done:
+ break
+ if len(unclassified) + len(pkgs) + len(both) == 0:
+ # one more pass then exit
+ really_done = True
+
+ while unclassified:
+ current = unclassified.pop()
+ examined.add(current)
+ is_pkg = True in [source.is_package(meta, current) for source in sources]
+ is_vpkg = current in vpkg_cache
+
+ if is_pkg and is_vpkg:
+ both.add(current)
+ elif is_pkg and not is_vpkg:
+ pkgs.add(current)
+ elif is_vpkg and not is_pkg:
+ vpkgs.add(current)
+ elif not is_vpkg and not is_pkg:
+ unknown.add(current)
+
+ while pkgs:
+ # direct packages; current can be added, and all deps should be resolved
+ current = pkgs.pop()
+ if debug:
+ self.logger.debug("Packages: handling package requirement %s" % (current))
+ deps = ()
+ for source in sources:
+ if source.is_package(meta, current):
+ try:
+ deps = source.get_deps(meta, current)
+ break
+ except:
+ continue
+ packages.add(current)
+ newdeps = set(deps).difference(examined)
+ if debug and newdeps:
+ self.logger.debug("Packages: Package %s added requirements %s" % (current, newdeps))
+ unclassified.update(newdeps)
+
+ satisfied_vpkgs = set()
+ for current in vpkgs:
+ # virtual dependencies, satisfied if one of N in the config, or can be forced if only one provider
+ if len(vpkg_cache[current]) == 1:
+ if debug:
+ self.logger.debug("Packages: requirement %s satisfied by %s" % (current, vpkg_cache[current]))
+ unclassified.update(vpkg_cache[current].difference(examined))
+ satisfied_vpkgs.add(current)
+ elif [item for item in vpkg_cache[current] if item in packages]:
+ if debug:
+ self.logger.debug("Packages: requirement %s satisfied by %s" % (current, [item for item in vpkg_cache[current] if item in packages]))
+ satisfied_vpkgs.add(current)
+ vpkgs.difference_update(satisfied_vpkgs)
+
+ satisfied_both = set()
+ for current in both:
+ # packages that are both have virtual providers as well as a package with that name
+ # allow use of virt through explicit specification, then fall back to forcing current on last pass
+ if [item for item in vpkg_cache[current] if item in packages]:
+ if debug:
+ self.logger.debug("Packages: requirement %s satisfied by %s" % (current, [item for item in vpkg_cache[current] if item in packages]))
+ satisfied_both.add(current)
+ elif current in input_requirements or final_pass:
+ pkgs.add(current)
+ satisfied_both.add(current)
+ both.difference_update(satisfied_both)
+
+ if len(unclassified) + len(pkgs) == 0:
+ final_pass = True
+ else:
+ final_pass = False
+
+ for source in sources:
+ source.filter_unknown(unknown)
+
+ return packages, unknown, ptype.pop()
+
+ def validate_structures(self, meta, structures):
+ '''Ensure client configurations include all needed prerequisites
+
+ Arguments:
+ meta - client metadata instance
+ structures - a list of structure-stage entry combinations
+ '''
+ if self.disableResolver: return # Config requests no resolver
+
+ initial = set([pkg.get('name') for struct in structures \
+ for pkg in struct.findall('Package') +
+ struct.findall('BoundPackage')])
+ news = lxml.etree.Element('Independent')
+ packages, unknown, ptype = self.complete(meta, initial,
+ debug=self.debug_flag)
+ if unknown:
+ self.logger.info("Got unknown entries")
+ self.logger.info(list(unknown))
+ newpkgs = list(packages.difference(initial))
+ newpkgs.sort()
+ for pkg in newpkgs:
+ lxml.etree.SubElement(news, 'BoundPackage', name=pkg,
+ type=ptype, version='auto', origin='Packages')
+ structures.append(news)
+
+ def make_non_redundant(self, meta, plname=None, plist=None):
+ '''build a non-redundant version of a list of packages
+
+ Arguments:
+ meta - client metadata instance
+ plname - name of file containing a list of packages
+ '''
+ if plname is not None:
+ pkgnames = set([x.strip() for x in open(plname).readlines()])
+ elif plist is not None:
+ pkgnames = set(plist)
+ redundant = set()
+ sources = self.get_matching_sources(meta)
+ for source in sources:
+ for pkgname in pkgnames:
+ if source.is_pkg(meta, current):
+ try:
+ deps = source.get_deps(meta, pkgname)
+ except:
+ continue
+ for rpkg in deps:
+ if rpkg in pkgnames:
+ redundant.add(rpkg)
+ return pkgnames.difference(redundant), redundant
+
+ def Refresh(self):
+ '''Packages.Refresh() => True|False\nReload configuration specification and download sources\n'''
+ self._load_config(force_update=True)
+ return True
+
+ def Reload(self):
+ '''Packages.Refresh() => True|False\nReload configuration specification and sources\n'''
+ self._load_config()
+ return True
+
+ def _load_config(self, force_update=False):
+ '''
+ Load the configuration data and setup sources
+
+ Keyword args:
+ force_update Force downloading repo data
+ '''
+ self.virt_pkgs = dict()
+ try:
+ xdata = lxml.etree.parse(self.data + '/config.xml')
+ xdata.xinclude()
+ xdata = xdata.getroot()
+ except (lxml.etree.XIncludeError, \
+ lxml.etree.XMLSyntaxError), xmlerr:
+ self.logger.error("Package: Error processing xml: %s" % xmlerr)
+ raise Bcfg2.Server.Plugin.PluginInitError
+ except IOError:
+ self.logger.error("Failed to read Packages configuration. Have" +
+ " you created your config.xml file?")
+ raise Bcfg2.Server.Plugin.PluginInitError
+
+ # Load Packages config
+ config = xdata.xpath('//Sources/Config')
+ if config:
+ if config[0].get("resolver", "enabled").lower() == "disabled":
+ self.logger.info("Packages: Resolver disabled")
+ self.disableResolver = True
+ if config[0].get("metadata", "enabled").lower() == "disabled":
+ self.logger.info("Packages: Metadata disabled")
+ self.disableResolver = True
+ self.disableMetaData = True
+
+ self.sentinels = set()
+ self.sources = []
+ for s in xdata.findall('.//APTSource'):
+ self.sources.append(APTSource(self.cachepath, **source_from_xml(s)))
+ for s in xdata.findall('.//YUMSource'):
+ self.sources.append(YUMSource(self.cachepath, **source_from_xml(s)))
+ for s in xdata.findall('.//PACSource'):
+ self.sources.append(PACSource(self.cachepath, **source_from_xml(s)))
+
+ cachefiles = []
+ for source in self.sources:
+ cachefiles.append(source.cachefile)
+ if not self.disableMetaData: source.setup_data(force_update)
+ self.sentinels.update(source.basegroups)
+ for cfile in glob.glob("%s/cache-*" % self.cachepath):
+ if cfile not in cachefiles:
+ os.unlink(cfile)
+
+ def get_additional_data(self, meta):
+ sdata = []
+ [sdata.extend(copy.deepcopy(src.url_map)) for src in self.get_matching_sources(meta)]
+ return dict(sources=sdata)
diff --git a/build/lib/Bcfg2/Server/Plugins/Pkgmgr.py b/build/lib/Bcfg2/Server/Plugins/Pkgmgr.py
new file mode 100644
index 000000000..b58a7c91d
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Plugins/Pkgmgr.py
@@ -0,0 +1,155 @@
+'''This module implements a package management scheme for all images'''
+__revision__ = '$Revision$'
+
+import logging
+import re
+import Bcfg2.Server.Plugin
+
+logger = logging.getLogger('Bcfg2.Plugins.Pkgmgr')
+
+class FuzzyDict(dict):
+ fuzzy = re.compile('(?P<name>.*):(?P<alist>\S+(,\S+)*)')
+ def __getitem__(self, key):
+ if isinstance(key, str):
+ mdata = self.fuzzy.match(key)
+ if mdata:
+ return dict.__getitem__(self, mdata.groupdict()['name'])
+ else:
+ print "got non-string key %s" % str(key)
+ return dict.__getitem__(self, key)
+
+ def has_key(self, key):
+ if isinstance(key, str):
+ mdata = self.fuzzy.match(key)
+ if self.fuzzy.match(key):
+ return dict.has_key(self, mdata.groupdict()['name'])
+ return dict.has_key(self, key)
+
+ def get(self, key, default=None):
+ try:
+ return self.__getitem__(key)
+ except:
+ if default:
+ return default
+ raise
+
+class PNode(Bcfg2.Server.Plugin.INode):
+ """PNode has a list of packages available at a particular group intersection."""
+ splitters = {'rpm':re.compile('^(.*/)?(?P<name>[\w\+\d\.]+(-[\w\+\d\.]+)*)-' + \
+ '(?P<version>[\w\d\.]+-([\w\d\.]+))\.(?P<arch>\S+)\.rpm$'),
+ 'encap':re.compile('^(?P<name>[\w-]+)-(?P<version>[\w\d\.+-]+).encap.*$')}
+ ignore = ['Package']
+
+ def Match(self, metadata, data):
+ """Return a dictionary of package mappings."""
+ if self.predicate(metadata):
+ for key in self.contents:
+ try:
+ data[key].update(self.contents[key])
+ except:
+ data[key] = FuzzyDict()
+ data[key].update(self.contents[key])
+ for child in self.children:
+ child.Match(metadata, data)
+
+ def __init__(self, data, pdict, parent=None):
+ # copy local attributes to all child nodes if no local attribute exists
+ if not pdict.has_key('Package'):
+ pdict['Package'] = set()
+ for child in data.getchildren():
+ for attr in [key for key in data.attrib.keys() \
+ if key != 'name' and not child.attrib.has_key(key)]:
+ try:
+ child.set(attr, data.get(attr))
+ except:
+ # don't fail on things like comments and other immutable elements
+ pass
+ Bcfg2.Server.Plugin.INode.__init__(self, data, pdict, parent)
+ if not self.contents.has_key('Package'):
+ self.contents['Package'] = FuzzyDict()
+ for pkg in data.findall('./Package'):
+ if pkg.attrib.has_key('name') and pkg.get('name') not in pdict['Package']:
+ pdict['Package'].add(pkg.get('name'))
+ if pkg.get('name') != None:
+ self.contents['Package'][pkg.get('name')] = {}
+ if pkg.getchildren():
+ self.contents['Package'][pkg.get('name')]['__children__'] \
+ = pkg.getchildren()
+ if pkg.attrib.has_key('simplefile'):
+ pkg.set('url', "%s/%s" % (pkg.get('uri'), pkg.get('simplefile')))
+ self.contents['Package'][pkg.get('name')].update(pkg.attrib)
+ else:
+ if pkg.attrib.has_key('file'):
+ if pkg.attrib.has_key('multiarch'):
+ archs = pkg.get('multiarch').split()
+ srcs = pkg.get('srcs', pkg.get('multiarch')).split()
+ url = ' '.join(["%s/%s" % (pkg.get('uri'), pkg.get('file') % {'src':srcs[idx], 'arch':archs[idx]})
+ for idx in range(len(archs))])
+ pkg.set('url', url)
+ else:
+ pkg.set('url', '%s/%s' % (pkg.get('uri'), pkg.get('file')))
+ if self.splitters.has_key(pkg.get('type')) and pkg.get('file') != None:
+ mdata = self.splitters[pkg.get('type')].match(pkg.get('file'))
+ if not mdata:
+ logger.error("Failed to match pkg %s" % pkg.get('file'))
+ continue
+ pkgname = mdata.group('name')
+ self.contents['Package'][pkgname] = mdata.groupdict()
+ self.contents['Package'][pkgname].update(pkg.attrib)
+ if pkg.attrib.get('file'):
+ self.contents['Package'][pkgname]['url'] = pkg.get('url')
+ self.contents['Package'][pkgname]['type'] = pkg.get('type')
+ if pkg.get('verify'):
+ self.contents['Package'][pkgname]['verify'] = pkg.get('verify')
+ if pkg.get('multiarch'):
+ self.contents['Package'][pkgname]['multiarch'] = pkg.get('multiarch')
+ if pkgname not in pdict['Package']:
+ pdict['Package'].add(pkgname)
+ if pkg.getchildren():
+ self.contents['Package'][pkgname]['__children__'] = pkg.getchildren()
+ else:
+ self.contents['Package'][pkg.get('name')].update(pkg.attrib)
+
+
+class PkgSrc(Bcfg2.Server.Plugin.XMLSrc):
+ """PkgSrc files contain a PNode hierarchy that returns matching package entries."""
+ __node__ = PNode
+ __cacheobj__ = FuzzyDict
+
+class Pkgmgr(Bcfg2.Server.Plugin.PrioDir):
+ """This is a generator that handles package assignments."""
+ name = 'Pkgmgr'
+ __version__ = '$Id$'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+ __child__ = PkgSrc
+ __element__ = 'Package'
+
+ def HandleEvent(self, event):
+ '''Handle events and update dispatch table'''
+ Bcfg2.Server.Plugin.XMLDirectoryBacked.HandleEvent(self, event)
+ for src in self.entries.values():
+ for itype, children in src.items.iteritems():
+ for child in children:
+ try:
+ self.Entries[itype][child] = self.BindEntry
+ except KeyError:
+ self.Entries[itype] = FuzzyDict([(child,
+ self.BindEntry)])
+
+ def BindEntry(self, entry, metadata):
+ """Bind data for entry, and remove instances that are not requested."""
+ pname = entry.get('name')
+ Bcfg2.Server.Plugin.PrioDir.BindEntry(self, entry, metadata)
+ if entry.findall('Instance'):
+ mdata = FuzzyDict.fuzzy.match(pname)
+ if mdata:
+ arches = mdata.group('alist').split(',')
+ [entry.remove(inst) for inst in \
+ entry.findall('Instance') \
+ if inst.get('arch') not in arches]
+
+ def HandlesEntry(self, entry, metadata):
+ return entry.tag == 'Package' and entry.get('name').split(':')[0] in self.Entries['Package'].keys()
+
+ def HandleEntry(self, entry, metadata):
+ self.BindEntry(entry, metadata)
diff --git a/build/lib/Bcfg2/Server/Plugins/Probes.py b/build/lib/Bcfg2/Server/Plugins/Probes.py
new file mode 100644
index 000000000..c00185732
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Plugins/Probes.py
@@ -0,0 +1,150 @@
+import lxml.etree
+import re
+
+import Bcfg2.Server.Plugin
+
+specific_probe_matcher = re.compile("(.*/)?(?P<basename>\S+)(.(?P<mode>[GH](\d\d)?)_\S+)")
+probe_matcher = re.compile("(.*/)?(?P<basename>\S+)")
+
+class ProbeSet(Bcfg2.Server.Plugin.EntrySet):
+ ignore = re.compile("^(\.#.*|.*~|\\..*\\.(tmp|sw[px])|probed\\.xml)$")
+ def __init__(self, path, fam, encoding, plugin_name):
+ fpattern = '[0-9A-Za-z_\-]+'
+ self.plugin_name = plugin_name
+ Bcfg2.Server.Plugin.EntrySet.__init__(self, fpattern, path,
+ Bcfg2.Server.Plugin.SpecificData,
+ encoding)
+ fam.AddMonitor(path, self)
+ self.bangline = re.compile('^#!(?P<interpreter>.*)$')
+
+ def HandleEvent(self, event):
+ if event.filename != self.path:
+ return self.handle_event(event)
+
+ def get_probe_data(self, metadata):
+ ret = []
+ build = dict()
+ candidates = self.get_matching(metadata)
+ candidates.sort(lambda x, y: cmp(x.specific, y.specific))
+ for entry in candidates:
+ rem = specific_probe_matcher.match(entry.name)
+ if not rem:
+ rem = probe_matcher.match(entry.name)
+ pname = rem.group('basename')
+ if pname not in build:
+ build[pname] = entry
+
+ for (name, entry) in build.iteritems():
+ probe = lxml.etree.Element('probe')
+ probe.set('name', name.split('/')[-1])
+ probe.set('source', self.plugin_name)
+ probe.text = entry.data
+ match = self.bangline.match(entry.data.split('\n')[0])
+ if match:
+ probe.set('interpreter', match.group('interpreter'))
+ else:
+ probe.set('interpreter', '/bin/sh')
+ ret.append(probe)
+ return ret
+
+class Probes(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Probing,
+ Bcfg2.Server.Plugin.Connector):
+ """A plugin to gather information from a client machine."""
+ name = 'Probes'
+ __version__ = '$Id$'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Connector.__init__(self)
+ Bcfg2.Server.Plugin.Probing.__init__(self)
+
+ try:
+ self.probes = ProbeSet(self.data, core.fam, core.encoding,
+ self.name)
+ except:
+ raise Bcfg2.Server.Plugin.PluginInitError
+
+ self.probedata = dict()
+ self.cgroups = dict()
+ self.load_data()
+
+ def write_data(self):
+ """Write probe data out for use with bcfg2-info."""
+ top = lxml.etree.Element("Probed")
+ for client, probed in self.probedata.iteritems():
+ cx = lxml.etree.SubElement(top, 'Client', name=client)
+ for probe in probed:
+ lxml.etree.SubElement(cx, 'Probe', name=probe,
+ value=self.probedata[client][probe])
+ for group in self.cgroups[client]:
+ lxml.etree.SubElement(cx, "Group", name=group)
+ data = lxml.etree.tostring(top, encoding='UTF-8', xml_declaration=True,
+ pretty_print='true')
+ try:
+ datafile = open("%s/%s" % (self.data, 'probed.xml'), 'w')
+ except IOError:
+ self.logger.error("Failed to write probed.xml")
+ datafile.write(data)
+
+ def load_data(self):
+ try:
+ data = lxml.etree.parse(self.data + '/probed.xml').getroot()
+ except:
+ self.logger.error("Failed to read file probed.xml")
+ return
+ self.probedata = {}
+ self.cgroups = {}
+ for client in data.getchildren():
+ self.probedata[client.get('name')] = {}
+ self.cgroups[client.get('name')]=[]
+ for pdata in client:
+ if (pdata.tag == 'Probe'):
+ self.probedata[client.get('name')][pdata.get('name')] = pdata.get('value')
+ elif (pdata.tag == 'Group'):
+ self.cgroups[client.get('name')].append(pdata.get('name'))
+
+ def GetProbes(self, meta, force=False):
+ """Return a set of probes for execution on client."""
+ return self.probes.get_probe_data(meta)
+
+ def ReceiveData(self, client, datalist):
+ self.cgroups[client.hostname] = []
+ self.probedata[client.hostname] = {}
+ for data in datalist:
+ self.ReceiveDataItem(client, data)
+ self.write_data()
+
+ def ReceiveDataItem(self, client, data):
+ """Receive probe results pertaining to client."""
+ if not self.cgroups.has_key(client.hostname):
+ self.cgroups[client.hostname] = []
+ if data.text == None:
+ self.logger.error("Got null response to probe %s from %s" % \
+ (data.get('name'), client.hostname))
+ try:
+ self.probedata[client.hostname].update({data.get('name'): ''})
+ except KeyError:
+ self.probedata[client.hostname] = {data.get('name'): ''}
+ return
+ dlines = data.text.split('\n')
+ self.logger.debug("%s:probe:%s:%s" % (client.hostname,
+ data.get('name'), [line.strip() for line in dlines]))
+ for line in dlines[:]:
+ if line.split(':')[0] == 'group':
+ newgroup = line.split(':')[1].strip()
+ if newgroup not in self.cgroups[client.hostname]:
+ self.cgroups[client.hostname].append(newgroup)
+ dlines.remove(line)
+ dtext = "\n".join(dlines)
+ try:
+ self.probedata[client.hostname].update({data.get('name'):dtext})
+ except KeyError:
+ self.probedata[client.hostname] = {data.get('name'):dtext}
+
+ def get_additional_groups(self, meta):
+ return self.cgroups.get(meta.hostname, list())
+
+ def get_additional_data(self, meta):
+ return self.probedata.get(meta.hostname, dict())
diff --git a/build/lib/Bcfg2/Server/Plugins/Properties.py b/build/lib/Bcfg2/Server/Plugins/Properties.py
new file mode 100644
index 000000000..86330f6a0
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Plugins/Properties.py
@@ -0,0 +1,37 @@
+import copy
+import lxml.etree
+
+import Bcfg2.Server.Plugin
+
+
+class PropertyFile(Bcfg2.Server.Plugin.XMLFileBacked):
+ """Class for properties files."""
+
+ def Index(self):
+ """Build data into an xml object."""
+ try:
+ self.data = lxml.etree.XML(self.data)
+ except lxml.etree.XMLSyntaxError:
+ Bcfg2.Server.Plugin.logger.error("Failed to parse %s" % self.name)
+
+
+class PropDirectoryBacked(Bcfg2.Server.Plugin.DirectoryBacked):
+ __child__ = PropertyFile
+
+
+class Properties(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Connector):
+ """
+ The properties plugin maps property
+ files into client metadata instances.
+ """
+ name = 'Properties'
+ version = '$Revision$'
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Connector.__init__(self)
+ self.store = PropDirectoryBacked(self.data, core.fam)
+
+ def get_additional_data(self, _):
+ return copy.deepcopy(self.store.entries)
diff --git a/build/lib/Bcfg2/Server/Plugins/Rules.py b/build/lib/Bcfg2/Server/Plugins/Rules.py
new file mode 100644
index 000000000..eb0547cdb
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Plugins/Rules.py
@@ -0,0 +1,11 @@
+"""This generator provides rule-based entry mappings."""
+__revision__ = '$Revision$'
+
+import Bcfg2.Server.Plugin
+
+
+class Rules(Bcfg2.Server.Plugin.PrioDir):
+ """This is a generator that handles service assignments."""
+ name = 'Rules'
+ __version__ = '$Id$'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
diff --git a/build/lib/Bcfg2/Server/Plugins/SGenshi.py b/build/lib/Bcfg2/Server/Plugins/SGenshi.py
new file mode 100644
index 000000000..cead06e34
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Plugins/SGenshi.py
@@ -0,0 +1,76 @@
+'''This module implements a templating generator based on Genshi'''
+__revision__ = '$Revision$'
+
+import genshi.input
+import genshi.template
+import lxml.etree
+import logging
+
+import Bcfg2.Server.Plugin
+import Bcfg2.Server.Plugins.TGenshi
+
+logger = logging.getLogger('Bcfg2.Plugins.SGenshi')
+
+
+class SGenshiTemplateFile(Bcfg2.Server.Plugins.TGenshi.TemplateFile):
+
+ def get_xml_value(self, metadata):
+ if not hasattr(self, 'template'):
+ logger.error("No parsed template information for %s" % (self.name))
+ raise Bcfg2.Server.Plugin.PluginExecutionError
+ try:
+ stream = self.template.generate(metadata=metadata,).filter( \
+ Bcfg2.Server.Plugins.TGenshi.removecomment)
+ data = stream.render('xml', strip_whitespace=False)
+ return lxml.etree.XML(data)
+ except LookupError, lerror:
+ logger.error('Genshi lookup error: %s' % lerror)
+ except genshi.template.TemplateError, terror:
+ logger.error('Genshi template error: %s' % terror)
+ except genshi.input.ParseError, perror:
+ logger.error('Genshi parse error: %s' % perror)
+ raise
+
+
+class SGenshiEntrySet(Bcfg2.Server.Plugin.EntrySet):
+
+ def __init__(self, path, fam, encoding):
+ fpattern = '\S+\.xml'
+ Bcfg2.Server.Plugin.EntrySet.__init__(self, fpattern, path,
+ SGenshiTemplateFile, encoding)
+ fam.AddMonitor(path, self)
+
+ def HandleEvent(self, event):
+ '''passthrough event handler for old calling convention'''
+ if event.filename != self.path:
+ return self.handle_event(event)
+
+ def BuildStructures(self, metadata):
+ """Build SGenshi structures."""
+ ret = []
+ for entry in self.get_matching(metadata):
+ try:
+ ret.append(entry.get_xml_value(metadata))
+ except:
+ logger.error("SGenshi: Failed to template file %s" % entry.name)
+ return ret
+
+
+class SGenshi(SGenshiEntrySet,
+ Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Structure):
+ """The SGenshi plugin provides templated structures."""
+ name = 'SGenshi'
+ __version__ = '$Id$'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+ deprecated = True
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Structure.__init__(self)
+ try:
+ SGenshiEntrySet.__init__(self, self.data, self.core.fam, core.encoding)
+ except:
+ logger.error("Failed to load %s repository; disabling %s" \
+ % (self.name, self.name))
+ raise Bcfg2.Server.Plugin.PluginInitError
diff --git a/build/lib/Bcfg2/Server/Plugins/SSHbase.py b/build/lib/Bcfg2/Server/Plugins/SSHbase.py
new file mode 100644
index 000000000..6d68ecb0a
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Plugins/SSHbase.py
@@ -0,0 +1,279 @@
+'''This module manages ssh key files for bcfg2'''
+__revision__ = '$Revision$'
+
+import binascii
+import os
+import socket
+import shutil
+import tempfile
+from subprocess import Popen, PIPE
+import Bcfg2.Server.Plugin
+
+
+class SSHbase(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Generator,
+ Bcfg2.Server.Plugin.DirectoryBacked,
+ Bcfg2.Server.Plugin.PullTarget):
+ """
+ The sshbase generator manages ssh host keys (both v1 and v2)
+ for hosts. It also manages the ssh_known_hosts file. It can
+ integrate host keys from other management domains and similarly
+ export its keys. The repository contains files in the following
+ formats:
+
+ ssh_host_key.H_(hostname) -> the v1 host private key for
+ (hostname)
+ ssh_host_key.pub.H_(hostname) -> the v1 host public key
+ for (hostname)
+ ssh_host_(dr)sa_key.H_(hostname) -> the v2 ssh host
+ private key for (hostname)
+ ssh_host_(dr)sa_key.pub.H_(hostname) -> the v2 ssh host
+ public key for (hostname)
+ ssh_known_hosts -> the current known hosts file. this
+ is regenerated each time a new key is generated.
+
+ """
+ name = 'SSHbase'
+ __version__ = '$Id$'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+
+ pubkeys = ["ssh_host_dsa_key.pub.H_%s",
+ "ssh_host_rsa_key.pub.H_%s", "ssh_host_key.pub.H_%s"]
+ hostkeys = ["ssh_host_dsa_key.H_%s",
+ "ssh_host_rsa_key.H_%s", "ssh_host_key.H_%s"]
+ keypatterns = ['ssh_host_dsa_key', 'ssh_host_rsa_key', 'ssh_host_key',
+ 'ssh_host_dsa_key.pub', 'ssh_host_rsa_key.pub',
+ 'ssh_host_key.pub']
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Generator.__init__(self)
+ Bcfg2.Server.Plugin.PullTarget.__init__(self)
+ try:
+ Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data,
+ self.core.fam)
+ except OSError, ioerr:
+ self.logger.error("Failed to load SSHbase repository from %s" \
+ % (self.data))
+ self.logger.error(ioerr)
+ raise Bcfg2.Server.Plugin.PluginInitError
+ self.Entries = {'Path':
+ {'/etc/ssh/ssh_known_hosts': self.build_skn,
+ '/etc/ssh/ssh_host_dsa_key': self.build_hk,
+ '/etc/ssh/ssh_host_rsa_key': self.build_hk,
+ '/etc/ssh/ssh_host_dsa_key.pub': self.build_hk,
+ '/etc/ssh/ssh_host_rsa_key.pub': self.build_hk,
+ '/etc/ssh/ssh_host_key': self.build_hk,
+ '/etc/ssh/ssh_host_key.pub': self.build_hk}}
+ self.ipcache = {}
+ self.namecache = {}
+ self.__skn = False
+
+ def get_skn(self):
+ """Build memory cache of the ssh known hosts file."""
+ if not self.__skn:
+ self.__skn = "\n".join([value.data for key, value in \
+ self.entries.iteritems() if \
+ key.endswith('.static')])
+ names = dict()
+ # if no metadata is registered yet, defer
+ if len(self.core.metadata.query.all()) == 0:
+ self.__skn = False
+ return self.__skn
+ for cmeta in self.core.metadata.query.all():
+ names[cmeta.hostname] = set([cmeta.hostname])
+ names[cmeta.hostname].update(cmeta.aliases)
+ newnames = set()
+ newips = set()
+ for name in names[cmeta.hostname]:
+ newnames.add(name.split('.')[0])
+ try:
+ newips.add(self.get_ipcache_entry(name)[0])
+ except:
+ continue
+ names[cmeta.hostname].update(newnames)
+ names[cmeta.hostname].update(cmeta.addresses)
+ names[cmeta.hostname].update(newips)
+ # TODO: Only perform reverse lookups on IPs if an option is set.
+ if True:
+ for ip in newips:
+ try:
+ names[cmeta.hostname].update(self.get_namecache_entry(ip))
+ except:
+ continue
+ names[cmeta.hostname] = sorted(names[cmeta.hostname])
+ # now we have our name cache
+ pubkeys = [pubk for pubk in self.entries.keys() \
+ if pubk.find('.pub.H_') != -1]
+ pubkeys.sort()
+ badnames = set()
+ for pubkey in pubkeys:
+ hostname = pubkey.split('H_')[1]
+ if hostname not in names:
+ if hostname not in badnames:
+ badnames.add(hostname)
+ self.logger.error("SSHbase: Unknown host %s; ignoring public keys" % hostname)
+ continue
+ self.__skn += "%s %s" % (','.join(names[hostname]),
+ self.entries[pubkey].data)
+ return self.__skn
+
+ def set_skn(self, value):
+ """Set backing data for skn."""
+ self.__skn = value
+ skn = property(get_skn, set_skn)
+
+ def HandleEvent(self, event=None):
+ """Local event handler that does skn regen on pubkey change."""
+ Bcfg2.Server.Plugin.DirectoryBacked.HandleEvent(self, event)
+ if event and '_key.pub.H_' in event.filename:
+ self.skn = False
+ if event and event.filename.endswith('.static'):
+ self.skn = False
+ if not self.__skn:
+ if (len(self.entries.keys())) >= (len(os.listdir(self.data))-1):
+ _ = self.skn
+
+ def HandlesEntry(self, entry, _):
+ """Handle key entries dynamically."""
+ return entry.tag == 'Path' and \
+ ([fpat for fpat in self.keypatterns
+ if entry.get('name').endswith(fpat)]
+ or entry.get('name').endswith('ssh_known_hosts'))
+
+ def HandleEntry(self, entry, metadata):
+ """Bind data."""
+ if entry.get('name').endswith('ssh_known_hosts'):
+ return self.build_skn(entry, metadata)
+ else:
+ return self.build_hk(entry, metadata)
+
+ def get_ipcache_entry(self, client):
+ """Build a cache of dns results."""
+ if client in self.ipcache:
+ if self.ipcache[client]:
+ return self.ipcache[client]
+ else:
+ raise socket.gaierror
+ else:
+ # need to add entry
+ try:
+ ipaddr = socket.gethostbyname(client)
+ self.ipcache[client] = (ipaddr, client)
+ return (ipaddr, client)
+ except socket.gaierror:
+ cmd = "getent hosts %s" % client
+ ipaddr = Popen(cmd, shell=True, \
+ stdout=PIPE).stdout.read().strip().split()
+ if ipaddr:
+ self.ipcache[client] = (ipaddr, client)
+ return (ipaddr, client)
+ self.ipcache[client] = False
+ self.logger.error("Failed to find IP address for %s" % client)
+ raise socket.gaierror
+
+ def get_namecache_entry(self, cip):
+ """Build a cache of name lookups from client IP addresses."""
+ if cip in self.namecache:
+ # lookup cached name from IP
+ if self.namecache[cip]:
+ return self.namecache[cip]
+ else:
+ raise socket.gaierror
+ else:
+ # add an entry that has not been cached
+ try:
+ rvlookup = socket.gethostbyaddr(cip)
+ if rvlookup[0]:
+ self.namecache[cip] = [rvlookup[0]]
+ else:
+ self.namecache[cip] = []
+ self.namecache[cip].extend(rvlookup[1])
+ return self.namecache[cip]
+ except socket.gaierror:
+ self.namecache[cip] = False
+ self.logger.error("Failed to find any names associated with IP address %s" % cip)
+ raise
+
+ def build_skn(self, entry, metadata):
+ """This function builds builds a host specific known_hosts file."""
+ client = metadata.hostname
+ entry.text = self.skn
+ hostkeys = [keytmpl % client for keytmpl in self.pubkeys \
+ if (keytmpl % client) in self.entries]
+ hostkeys.sort()
+ for hostkey in hostkeys:
+ entry.text += "localhost,localhost.localdomain,127.0.0.1 %s" % (
+ self.entries[hostkey].data)
+ permdata = {'owner':'root',
+ 'group':'root',
+ 'type':'file',
+ 'perms':'0644'}
+ [entry.attrib.__setitem__(key, permdata[key]) for key in permdata]
+
+ def build_hk(self, entry, metadata):
+ """This binds host key data into entries."""
+ client = metadata.hostname
+ filename = "%s.H_%s" % (entry.get('name').split('/')[-1], client)
+ if filename not in self.entries.keys():
+ self.GenerateHostKeys(client)
+ if not filename in self.entries:
+ self.logger.error("%s still not registered" % filename)
+ raise Bcfg2.Server.Plugin.PluginExecutionError
+ keydata = self.entries[filename].data
+ permdata = {'owner':'root',
+ 'group':'root',
+ 'type':'file',
+ 'perms':'0600'}
+ if entry.get('name')[-4:] == '.pub':
+ permdata['perms'] = '0644'
+ [entry.attrib.__setitem__(key, permdata[key]) for key in permdata]
+ if "ssh_host_key.H_" == filename[:15]:
+ entry.attrib['encoding'] = 'base64'
+ entry.text = binascii.b2a_base64(keydata)
+ else:
+ entry.text = keydata
+
+ def GenerateHostKeys(self, client):
+ """Generate new host keys for client."""
+ keylist = [keytmpl % client for keytmpl in self.hostkeys]
+ for hostkey in keylist:
+ if 'ssh_host_rsa_key.H_' == hostkey[:19]:
+ keytype = 'rsa'
+ elif 'ssh_host_dsa_key.H_' == hostkey[:19]:
+ keytype = 'dsa'
+ else:
+ keytype = 'rsa1'
+
+ if hostkey not in self.entries.keys():
+ fileloc = "%s/%s" % (self.data, hostkey)
+ publoc = self.data + '/' + ".".join([hostkey.split('.')[0],
+ 'pub',
+ "H_%s" % client])
+ tempdir = tempfile.mkdtemp()
+ temploc = "%s/%s" % (tempdir, hostkey)
+ cmd = 'ssh-keygen -q -f %s -N "" -t %s -C root@%s < /dev/null'
+ os.system(cmd % (temploc, keytype, client))
+ shutil.copy(temploc, fileloc)
+ shutil.copy("%s.pub" % temploc, publoc)
+ self.AddEntry(hostkey)
+ self.AddEntry(".".join([hostkey.split('.')[0]]+['pub', "H_%s" \
+ % client]))
+ try:
+ os.unlink(temploc)
+ os.unlink("%s.pub" % temploc)
+ os.rmdir(tempdir)
+ except OSError:
+ self.logger.error("Failed to unlink temporary ssh keys")
+
+ def AcceptChoices(self, _, metadata):
+ return [Bcfg2.Server.Plugin.Specificity(hostname=metadata.hostname)]
+
+ def AcceptPullData(self, specific, entry, log):
+ """Per-plugin bcfg2-admin pull support."""
+ # specific will always be host specific
+ filename = "%s/%s.H_%s" % (self.data, entry['name'].split('/')[-1],
+ specific.hostname)
+ open(filename, 'w').write(entry['text'])
+ if log:
+ print "Wrote file %s" % filename
diff --git a/build/lib/Bcfg2/Server/Plugins/SSLCA.py b/build/lib/Bcfg2/Server/Plugins/SSLCA.py
new file mode 100644
index 000000000..0dc448e69
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Plugins/SSLCA.py
@@ -0,0 +1,239 @@
+import Bcfg2.Server.Plugin
+import Bcfg2.Options
+import lxml.etree
+import posixpath
+import tempfile
+import os
+from subprocess import Popen, PIPE, STDOUT
+from ConfigParser import ConfigParser
+
+class SSLCA(Bcfg2.Server.Plugin.GroupSpool):
+ """
+ The SSLCA generator handles the creation and
+ management of ssl certificates and their keys.
+ """
+ name = 'SSLCA'
+ __version__ = '$Id:$'
+ __author__ = 'g.hagger@gmail.com'
+ __child__ = Bcfg2.Server.Plugin.FileBacked
+ key_specs = {}
+ cert_specs = {}
+ CAs = {}
+
+ def HandleEvent(self, event=None):
+ """
+ Updates which files this plugin handles based upon filesystem events.
+ Allows configuration items to be added/removed without server restarts.
+ """
+ action = event.code2str()
+ if event.filename[0] == '/':
+ return
+ epath = "".join([self.data, self.handles[event.requestID],
+ event.filename])
+ if posixpath.isdir(epath):
+ ident = self.handles[event.requestID] + event.filename
+ else:
+ ident = self.handles[event.requestID][:-1]
+
+ fname = "".join([ident, '/', event.filename])
+
+ if event.filename.endswith('.xml'):
+ if action in ['exists', 'created', 'changed']:
+ if event.filename.endswith('key.xml'):
+ key_spec = dict(lxml.etree.parse(epath).find('Key').items())
+ self.key_specs[ident] = {
+ 'bits': key_spec.get('bits', 2048),
+ 'type': key_spec.get('type', 'rsa')
+ }
+ self.Entries['Path'][ident] = self.get_key
+ elif event.filename.endswith('cert.xml'):
+ cert_spec = dict(lxml.etree.parse(epath).find('Cert').items())
+ ca = cert_spec.get('ca', 'default')
+ self.cert_specs[ident] = {
+ 'ca': ca,
+ 'format': cert_spec.get('format', 'pem'),
+ 'key': cert_spec.get('key'),
+ 'days': cert_spec.get('days', 365),
+ 'C': cert_spec.get('c'),
+ 'L': cert_spec.get('l'),
+ 'ST': cert_spec.get('st'),
+ 'OU': cert_spec.get('ou'),
+ 'O': cert_spec.get('o'),
+ 'emailAddress': cert_spec.get('emailaddress')
+ }
+ cp = ConfigParser()
+ cp.read(self.core.cfile)
+ self.CAs[ca] = dict(cp.items('sslca_'+ca))
+ self.Entries['Path'][ident] = self.get_cert
+ if action == 'deleted':
+ if ident in self.Entries['Path']:
+ del self.Entries['Path'][ident]
+ else:
+ if action in ['exists', 'created']:
+ if posixpath.isdir(epath):
+ self.AddDirectoryMonitor(epath[len(self.data):])
+ if ident not in self.entries and posixpath.isfile(epath):
+ self.entries[fname] = self.__child__(epath)
+ self.entries[fname].HandleEvent(event)
+ if action == 'changed':
+ self.entries[fname].HandleEvent(event)
+ elif action == 'deleted':
+ if fname in self.entries:
+ del self.entries[fname]
+ else:
+ self.entries[fname].HandleEvent(event)
+
+ def get_key(self, entry, metadata):
+ """
+ either grabs a prexisting key hostfile, or triggers the generation
+ of a new key if one doesn't exist.
+ """
+ # set path type and permissions, otherwise bcfg2 won't bind the file
+ permdata = {'owner':'root',
+ 'group':'root',
+ 'type':'file',
+ 'perms':'644'}
+ [entry.attrib.__setitem__(key, permdata[key]) for key in permdata]
+
+ # check if we already have a hostfile, or need to generate a new key
+ # TODO: verify key fits the specs
+ path = entry.get('name')
+ filename = "".join([path, '/', path.rsplit('/', 1)[1], '.H_', metadata.hostname])
+ if filename not in self.entries.keys():
+ key = self.build_key(filename, entry, metadata)
+ open(self.data + filename, 'w').write(key)
+ entry.text = key
+ else:
+ entry.text = self.entries[filename].data
+
+ def build_key(self, filename, entry, metadata):
+ """
+ generates a new key according the the specification
+ """
+ type = self.key_specs[entry.get('name')]['type']
+ bits = self.key_specs[entry.get('name')]['bits']
+ if type == 'rsa':
+ cmd = "openssl genrsa %s " % bits
+ elif type == 'dsa':
+ cmd = "openssl dsaparam -noout -genkey %s" % bits
+ key = Popen(cmd, shell=True, stdout=PIPE).stdout.read()
+ return key
+
+ def get_cert(self, entry, metadata):
+ """
+ either grabs a prexisting cert hostfile, or triggers the generation
+ of a new cert if one doesn't exist.
+ """
+ # set path type and permissions, otherwise bcfg2 won't bind the file
+ permdata = {'owner':'root',
+ 'group':'root',
+ 'type':'file',
+ 'perms':'644'}
+ [entry.attrib.__setitem__(key, permdata[key]) for key in permdata]
+
+ path = entry.get('name')
+ filename = "".join([path, '/', path.rsplit('/', 1)[1], '.H_', metadata.hostname])
+
+ # first - ensure we have a key to work with
+ key = self.cert_specs[entry.get('name')].get('key')
+ key_filename = "".join([key, '/', key.rsplit('/', 1)[1], '.H_', metadata.hostname])
+ if key_filename not in self.entries:
+ e = lxml.etree.Element('Path')
+ e.attrib['name'] = key
+ self.core.Bind(e, metadata)
+
+ # check if we have a valid hostfile
+ if filename in self.entries.keys() and self.verify_cert(filename, entry):
+ entry.text = self.entries[filename].data
+ else:
+ cert = self.build_cert(key_filename, entry, metadata)
+ open(self.data + filename, 'w').write(cert)
+ entry.text = cert
+
+ def verify_cert(self, filename, entry):
+ """
+ check that a certificate validates against the ca cert,
+ and that it has not expired.
+ """
+ chaincert = self.CAs[self.cert_specs[entry.get('name')]['ca']].get('chaincert')
+ cert = self.data + filename
+ cmd = "openssl verify -CAfile %s %s" % (chaincert, cert)
+ res = Popen(cmd, shell=True, stdout=PIPE, stderr=STDOUT).stdout.read()
+ if res == cert + ": OK\n":
+ return True
+ return False
+
+ def build_cert(self, key_filename, entry, metadata):
+ """
+ creates a new certificate according to the specification
+ """
+ req_config = self.build_req_config(entry, metadata)
+ req = self.build_request(key_filename, req_config, entry)
+ ca = self.cert_specs[entry.get('name')]['ca']
+ ca_config = self.CAs[ca]['config']
+ days = self.cert_specs[entry.get('name')]['days']
+ passphrase = self.CAs[ca].get('passphrase')
+ if passphrase:
+ cmd = "openssl ca -config %s -in %s -days %s -batch -passin pass:%s" % (ca_config, req, days, passphrase)
+ else:
+ cmd = "openssl ca -config %s -in %s -days %s -batch" % (ca_config, req, days)
+ cert = Popen(cmd, shell=True, stdout=PIPE).stdout.read()
+ try:
+ os.unlink(req_config)
+ os.unlink(req)
+ except OSError:
+ self.logger.error("Failed to unlink temporary files")
+ return cert
+
+ def build_req_config(self, entry, metadata):
+ """
+ generates a temporary openssl configuration file that is
+ used to generate the required certificate request
+ """
+ # create temp request config file
+ conffile = open(tempfile.mkstemp()[1], 'w')
+ cp = ConfigParser({})
+ cp.optionxform = str
+ defaults = {
+ 'req': {
+ 'default_md': 'sha1',
+ 'distinguished_name': 'req_distinguished_name',
+ 'req_extensions': 'v3_req',
+ 'x509_extensions': 'v3_req',
+ 'prompt': 'no'
+ },
+ 'req_distinguished_name': {},
+ 'v3_req': {
+ 'subjectAltName': '@alt_names'
+ },
+ 'alt_names': {}
+ }
+ for section in defaults.keys():
+ cp.add_section(section)
+ for key in defaults[section]:
+ cp.set(section, key, defaults[section][key])
+ x = 1
+ altnames = list(metadata.aliases)
+ altnames.append(metadata.hostname)
+ for altname in altnames:
+ cp.set('alt_names', 'DNS.'+str(x), altname)
+ x += 1
+ for item in ['C', 'L', 'ST', 'O', 'OU', 'emailAddress']:
+ if self.cert_specs[entry.get('name')][item]:
+ cp.set('req_distinguished_name', item, self.cert_specs[entry.get('name')][item])
+ cp.set('req_distinguished_name', 'CN', metadata.hostname)
+ cp.write(conffile)
+ conffile.close()
+ return conffile.name
+
+ def build_request(self, key_filename, req_config, entry):
+ """
+ creates the certificate request
+ """
+ req = tempfile.mkstemp()[1]
+ days = self.cert_specs[entry.get('name')]['days']
+ key = self.data + key_filename
+ cmd = "openssl req -new -config %s -days %s -key %s -text -out %s" % (req_config, days, key, req)
+ res = Popen(cmd, shell=True, stdout=PIPE).stdout.read()
+ return req
+
diff --git a/build/lib/Bcfg2/Server/Plugins/Snapshots.py b/build/lib/Bcfg2/Server/Plugins/Snapshots.py
new file mode 100644
index 000000000..a4489ae95
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Plugins/Snapshots.py
@@ -0,0 +1,130 @@
+#import lxml.etree
+import logging
+import binascii
+import difflib
+#import sqlalchemy
+#import sqlalchemy.orm
+import Bcfg2.Server.Plugin
+import Bcfg2.Server.Snapshots
+import Bcfg2.Logger
+from Bcfg2.Server.Snapshots.model import Snapshot
+import Queue
+import time
+import threading
+
+logger = logging.getLogger('Snapshots')
+
+ftypes = ['ConfigFile', 'SymLink', 'Directory']
+datafields = {
+ 'Package': ['version'],
+ 'Path': ['type'],
+ 'Service': ['status'],
+ 'ConfigFile': ['owner', 'group', 'perms'],
+ 'Directory': ['owner', 'group', 'perms'],
+ 'SymLink': ['to'],
+ }
+
+def build_snap_ent(entry):
+ basefields = []
+ if entry.tag in ['Package', 'Service']:
+ basefields += ['type']
+ desired = dict([(key, unicode(entry.get(key))) for key in basefields])
+ state = dict([(key, unicode(entry.get(key))) for key in basefields])
+ desired.update([(key, unicode(entry.get(key))) for key in \
+ datafields[entry.tag]])
+ if entry.tag == 'ConfigFile' or \
+ ((entry.tag == 'Path') and (entry.get('type') == 'file')):
+ if entry.text == None:
+ desired['contents'] = None
+ else:
+ if entry.get('encoding', 'ascii') == 'ascii':
+ desired['contents'] = unicode(entry.text)
+ else:
+ desired['contents'] = unicode(binascii.a2b_base64(entry.text))
+
+ if 'current_bfile' in entry.attrib:
+ state['contents'] = unicode(binascii.a2b_base64( \
+ entry.get('current_bfile')))
+ elif 'current_bdiff' in entry.attrib:
+ diff = binascii.a2b_base64(entry.get('current_bdiff'))
+ state['contents'] = unicode( \
+ '\n'.join(difflib.restore(diff.split('\n'), 1)))
+
+ state.update([(key, unicode(entry.get('current_' + key, entry.get(key)))) \
+ for key in datafields[entry.tag]])
+ if entry.tag in ['ConfigFile', 'Path'] and entry.get('exists', 'true') == 'false':
+ state = None
+ return [desired, state]
+
+
+class Snapshots(Bcfg2.Server.Plugin.Statistics,
+ Bcfg2.Server.Plugin.Plugin):
+ name = 'Snapshots'
+ experimental = True
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Statistics.__init__(self)
+ self.session = Bcfg2.Server.Snapshots.setup_session(core.cfile)
+ self.work_queue = Queue.Queue()
+ self.loader = threading.Thread(target=self.load_snapshot)
+ self.loader.start()
+
+ def load_snapshot(self):
+ while self.running:
+ try:
+ (metadata, data) = self.work_queue.get(block=True, timeout=5)
+ except:
+ continue
+ self.statistics_from_old_stats(metadata, data)
+
+ def process_statistics(self, metadata, data):
+ return self.work_queue.put((metadata, data))
+
+ def statistics_from_old_stats(self, metadata, xdata):
+ # entries are name -> (modified, correct, start, desired, end)
+ # not sure we can get all of this from old format stats
+ t1 = time.time()
+ entries = dict([('Package', dict()),
+ ('Service', dict()), ('Path', dict())])
+ extra = dict([('Package', dict()), ('Service', dict()),
+ ('Path', dict())])
+ bad = []
+ state = xdata.find('.//Statistics')
+ correct = state.get('state') == 'clean'
+ revision = unicode(state.get('revision', '-1'))
+ for entry in state.find('.//Bad'):
+ data = [False, False, unicode(entry.get('name'))] \
+ + build_snap_ent(entry)
+ if entry.tag in ftypes:
+ etag = 'Path'
+ else:
+ etag = entry.tag
+ entries[etag][entry.get('name')] = data
+ for entry in state.find('.//Modified'):
+ if entry.tag in ftypes:
+ etag = 'Path'
+ else:
+ etag = entry.tag
+ if entry.get('name') in entries[etag]:
+ data = [True, False, unicode(entry.get('name'))] + \
+ build_snap_ent(entry)
+ else:
+ data = [True, False, unicode(entry.get('name'))] + \
+ build_snap_ent(entry)
+ for entry in state.find('.//Extra'):
+ if entry.tag in datafields:
+ data = build_snap_ent(entry)[1]
+ ename = unicode(entry.get('name'))
+ data['name'] = ename
+ extra[entry.tag][ename] = data
+ else:
+ print "extra", entry.tag, entry.get('name')
+ t2 = time.time()
+ snap = Snapshot.from_data(self.session, correct, revision,
+ metadata, entries, extra)
+ self.session.add(snap)
+ self.session.commit()
+ t3 = time.time()
+ logger.info("Snapshot storage took %fs" % (t3-t2))
+ return True
diff --git a/build/lib/Bcfg2/Server/Plugins/Statistics.py b/build/lib/Bcfg2/Server/Plugins/Statistics.py
new file mode 100644
index 000000000..c7fa0e534
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Plugins/Statistics.py
@@ -0,0 +1,161 @@
+'''This file manages the statistics collected by the BCFG2 Server'''
+__revision__ = '$Revision$'
+
+import binascii
+import copy
+import difflib
+import logging
+from lxml.etree import XML, SubElement, Element, XMLSyntaxError
+import lxml.etree
+import os
+import Queue
+from time import asctime, localtime, time, strptime, mktime
+import threading
+
+import Bcfg2.Server.Plugin
+
+
+class StatisticsStore(object):
+ """Manages the memory and file copy of statistics collected about client runs."""
+ __min_write_delay__ = 0
+
+ def __init__(self, filename):
+ self.filename = filename
+ self.element = Element('Dummy')
+ self.dirty = 0
+ self.lastwrite = 0
+ self.logger = logging.getLogger('Bcfg2.Server.Statistics')
+ self.ReadFromFile()
+
+ def WriteBack(self, force=0):
+ """Write statistics changes back to persistent store."""
+ if (self.dirty and (self.lastwrite + self.__min_write_delay__ <= time())) \
+ or force:
+ try:
+ fout = open(self.filename + '.new', 'w')
+ except IOError, ioerr:
+ self.logger.error("Failed to open %s for writing: %s" % (self.filename + '.new', ioerr))
+ else:
+ fout.write(lxml.etree.tostring(self.element, encoding='UTF-8', xml_declaration=True))
+ fout.close()
+ os.rename(self.filename + '.new', self.filename)
+ self.dirty = 0
+ self.lastwrite = time()
+
+ def ReadFromFile(self):
+ """Reads current state regarding statistics."""
+ try:
+ fin = open(self.filename, 'r')
+ data = fin.read()
+ fin.close()
+ self.element = XML(data)
+ self.dirty = 0
+ except (IOError, XMLSyntaxError):
+ self.logger.error("Creating new statistics file %s"%(self.filename))
+ self.element = Element('ConfigStatistics')
+ self.WriteBack()
+ self.dirty = 0
+
+ def updateStats(self, xml, client):
+ """Updates the statistics of a current node with new data."""
+
+ # Current policy:
+ # - Keep anything less than 24 hours old
+ # - Keep latest clean run for clean nodes
+ # - Keep latest clean and dirty run for dirty nodes
+ newstat = xml.find('Statistics')
+
+ if newstat.get('state') == 'clean':
+ node_dirty = 0
+ else:
+ node_dirty = 1
+
+ # Find correct node entry in stats data
+ # The following list comprehension should be guarenteed to return at
+ # most one result
+ nodes = [elem for elem in self.element.findall('Node') \
+ if elem.get('name') == client]
+ nummatch = len(nodes)
+ if nummatch == 0:
+ # Create an entry for this node
+ node = SubElement(self.element, 'Node', name=client)
+ elif nummatch == 1 and not node_dirty:
+ # Delete old instance
+ node = nodes[0]
+ [node.remove(elem) for elem in node.findall('Statistics') \
+ if self.isOlderThan24h(elem.get('time'))]
+ elif nummatch == 1 and node_dirty:
+ # Delete old dirty statistics entry
+ node = nodes[0]
+ [node.remove(elem) for elem in node.findall('Statistics') \
+ if (elem.get('state') == 'dirty' \
+ and self.isOlderThan24h(elem.get('time')))]
+ else:
+ # Shouldn't be reached
+ self.logger.error("Duplicate node entry for %s"%(client))
+
+ # Set current time for stats
+ newstat.set('time', asctime(localtime()))
+
+ # Add statistic
+ node.append(copy.deepcopy(newstat))
+
+ # Set dirty
+ self.dirty = 1
+ self.WriteBack(force=1)
+
+ def isOlderThan24h(self, testTime):
+ """Helper function to determine if <time> string is older than 24 hours."""
+ now = time()
+ utime = mktime(strptime(testTime))
+ secondsPerDay = 60*60*24
+
+ return (now-utime) > secondsPerDay
+
+
+class Statistics(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.ThreadedStatistics,
+ Bcfg2.Server.Plugin.PullSource):
+ name = 'Statistics'
+ __version__ = '$Id$'
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.ThreadedStatistics.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.PullSource.__init__(self)
+ fpath = "%s/etc/statistics.xml" % datastore
+ self.data_file = StatisticsStore(fpath)
+
+ def handle_statistic(self, metadata, data):
+ self.data_file.updateStats(data, metadata.hostname)
+
+ def FindCurrent(self, client):
+ rt = self.data_file.element.xpath('//Node[@name="%s"]' % client)[0]
+ maxtime = max([strptime(stat.get('time')) for stat \
+ in rt.findall('Statistics')])
+ return [stat for stat in rt.findall('Statistics') \
+ if strptime(stat.get('time')) == maxtime][0]
+
+ def GetExtra(self, client):
+ return [(entry.tag, entry.get('name')) for entry \
+ in self.FindCurrent(client).xpath('.//Extra/*')]
+
+ def GetCurrentEntry(self, client, e_type, e_name):
+ curr = self.FindCurrent(client)
+ entry = curr.xpath('.//Bad/%s[@name="%s"]' % (e_type, e_name))
+ if not entry:
+ raise Bcfg2.Server.Plugin.PluginExecutionError
+ cfentry = entry[-1]
+
+ owner = cfentry.get('current_owner', cfentry.get('owner'))
+ group = cfentry.get('current_group', cfentry.get('group'))
+ perms = cfentry.get('current_perms', cfentry.get('perms'))
+ if 'current_bfile' in cfentry.attrib:
+ contents = binascii.a2b_base64(cfentry.get('current_bfile'))
+ elif 'current_bdiff' in cfentry.attrib:
+ diff = binascii.a2b_base64(cfentry.get('current_bdiff'))
+ contents = '\n'.join(difflib.restore(diff.split('\n'), 1))
+ else:
+ contents = None
+
+ return (owner, group, perms, contents)
diff --git a/build/lib/Bcfg2/Server/Plugins/Svcmgr.py b/build/lib/Bcfg2/Server/Plugins/Svcmgr.py
new file mode 100644
index 000000000..6d25c1a6d
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Plugins/Svcmgr.py
@@ -0,0 +1,12 @@
+"""This generator provides service mappings."""
+__revision__ = '$Revision$'
+
+import Bcfg2.Server.Plugin
+
+
+class Svcmgr(Bcfg2.Server.Plugin.PrioDir):
+ """This is a generator that handles service assignments."""
+ name = 'Svcmgr'
+ __version__ = '$Id$'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+ deprecated = True
diff --git a/build/lib/Bcfg2/Server/Plugins/Svn.py b/build/lib/Bcfg2/Server/Plugins/Svn.py
new file mode 100644
index 000000000..cb4ab649b
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Plugins/Svn.py
@@ -0,0 +1,46 @@
+import os
+from subprocess import Popen, PIPE
+import Bcfg2.Server.Plugin
+
+# for debugging output only
+import logging
+logger = logging.getLogger('Bcfg2.Plugins.Svn')
+
+
+class Svn(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Version):
+ """Svn is a version plugin for dealing with Bcfg2 repos."""
+ name = 'Svn'
+ __version__ = '$Id$'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ self.core = core
+ self.datastore = datastore
+
+ # path to svn directory for bcfg2 repo
+ svn_dir = "%s/.svn" % datastore
+
+ # Read revision from bcfg2 repo
+ if os.path.isdir(svn_dir):
+ self.get_revision()
+ else:
+ logger.error("%s is not a directory" % svn_dir)
+ raise Bcfg2.Server.Plugin.PluginInitError
+
+ logger.debug("Initialized svn plugin with svn directory = %s" % svn_dir)
+
+ def get_revision(self):
+ """Read svn revision information for the Bcfg2 repository."""
+ try:
+ data = Popen(("env LC_ALL=C svn info %s" %
+ (self.datastore)), shell=True,
+ stdout=PIPE).communicate()[0].split('\n')
+ return [line.split(': ')[1] for line in data \
+ if line[:9] == 'Revision:'][-1]
+ except IndexError:
+ logger.error("Failed to read svn info; disabling svn support")
+ logger.error('''Ran command "svn info %s"''' % (self.datastore))
+ logger.error("Got output: %s" % data)
+ raise Bcfg2.Server.Plugin.PluginInitError
diff --git a/build/lib/Bcfg2/Server/Plugins/TCheetah.py b/build/lib/Bcfg2/Server/Plugins/TCheetah.py
new file mode 100644
index 000000000..d40f4baf3
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Plugins/TCheetah.py
@@ -0,0 +1,78 @@
+'''This module implements a templating generator based on Cheetah'''
+__revision__ = '$Revision$'
+
+import binascii
+import logging
+import sys
+import traceback
+import Bcfg2.Server.Plugin
+
+logger = logging.getLogger('Bcfg2.Plugins.TCheetah')
+
+try:
+ import Cheetah.Template
+ import Cheetah.Parser
+except:
+ logger.error("TCheetah: Failed to import Cheetah. Is it installed?")
+ raise
+
+
+class TemplateFile:
+ """Template file creates Cheetah template structures for the loaded file."""
+
+ def __init__(self, name, specific, encoding):
+ self.name = name
+ self.specific = specific
+ self.encoding = encoding
+ self.template = None
+ self.searchlist = dict()
+
+ def handle_event(self, event):
+ """Handle all fs events for this template."""
+ if event.code2str() == 'deleted':
+ return
+ try:
+ s = {'useStackFrames': False}
+ self.template = Cheetah.Template.Template(open(self.name).read(),
+ compilerSettings=s,
+ searchList=self.searchlist)
+ except Cheetah.Parser.ParseError, perror:
+ logger.error("Cheetah parse error for file %s" % (self.name))
+ logger.error(perror.report())
+
+ def bind_entry(self, entry, metadata):
+ """Build literal file information."""
+ self.template.metadata = metadata
+ self.searchlist['metadata'] = metadata
+ self.template.path = entry.get('realname', entry.get('name'))
+ self.searchlist['path'] = entry.get('realname', entry.get('name'))
+ self.template.source_path = self.name
+ self.searchlist['source_path'] = self.name
+
+ if entry.tag == 'Path':
+ entry.set('type', 'file')
+ try:
+ if type(self.template) == unicode:
+ entry.text = self.template
+ else:
+ if entry.get('encoding') == 'base64':
+ # take care of case where file needs base64 encoding
+ entry.text = binascii.b2a_base64(self.template)
+ else:
+ entry.text = unicode(str(self.template), self.encoding)
+ except:
+ (a, b, c) = sys.exc_info()
+ msg = traceback.format_exception(a, b, c, limit=2)[-1][:-1]
+ logger.error(msg)
+ logger.error("TCheetah template error for %s" % self.searchlist['path'])
+ del a, b, c
+ raise Bcfg2.Server.Plugin.PluginExecutionError
+
+
+class TCheetah(Bcfg2.Server.Plugin.GroupSpool):
+ """The TCheetah generator implements a templating mechanism for configuration files."""
+ name = 'TCheetah'
+ __version__ = '$Id$'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+ filename_pattern = 'template'
+ es_child_cls = TemplateFile
diff --git a/build/lib/Bcfg2/Server/Plugins/TGenshi.py b/build/lib/Bcfg2/Server/Plugins/TGenshi.py
new file mode 100644
index 000000000..29e6d7307
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Plugins/TGenshi.py
@@ -0,0 +1,126 @@
+"""This module implements a templating generator based on Genshi."""
+__revision__ = '$Revision$'
+
+import binascii
+import logging
+import Bcfg2.Server.Plugin
+
+logger = logging.getLogger('Bcfg2.Plugins.TGenshi')
+
+# try to import genshi stuff
+try:
+ import genshi.core
+ import genshi.input
+ from genshi.template import TemplateLoader, \
+ TextTemplate, MarkupTemplate, TemplateError
+except ImportError:
+ logger.error("TGenshi: Failed to import Genshi. Is it installed?")
+ raise Bcfg2.Server.Plugin.PluginInitError
+try:
+ from genshi.template import NewTextTemplate
+ have_ntt = True
+except:
+ have_ntt = False
+
+def removecomment(stream):
+ """A genshi filter that removes comments from the stream."""
+ for kind, data, pos in stream:
+ if kind is genshi.core.COMMENT:
+ continue
+ yield kind, data, pos
+
+
+class TemplateFile:
+ """Template file creates Genshi template structures for the loaded file."""
+
+ def __init__(self, name, specific, encoding):
+ self.name = name
+ self.specific = specific
+ self.encoding = encoding
+ if self.specific.all:
+ matchname = self.name
+ elif self.specific.group:
+ matchname = self.name[:self.name.find('.G')]
+ else:
+ matchname = self.name[:self.name.find('.H')]
+ if matchname.endswith('.txt'):
+ self.template_cls = TextTemplate
+ elif matchname.endswith('.newtxt'):
+ if not have_ntt:
+ logger.error("Genshi NewTextTemplates not supported by this version of Genshi")
+ else:
+ self.template_cls = NewTextTemplate
+ else:
+ self.template_cls = MarkupTemplate
+ self.HandleEvent = self.handle_event
+
+ def handle_event(self, event=None):
+ """Handle all fs events for this template."""
+ if event and event.code2str() == 'deleted':
+ return
+ try:
+ loader = TemplateLoader()
+ try:
+ self.template = loader.load(self.name, cls=self.template_cls,
+ encoding=self.encoding)
+ except LookupError, lerror:
+ logger.error('Genshi lookup error: %s' % lerror)
+ except TemplateError, terror:
+ logger.error('Genshi template error: %s' % terror)
+ except genshi.input.ParseError, perror:
+ logger.error('Genshi parse error: %s' % perror)
+
+ def bind_entry(self, entry, metadata):
+ """Build literal file information."""
+ fname = entry.get('realname', entry.get('name'))
+ if entry.tag == 'Path':
+ entry.set('type', 'file')
+ try:
+ stream = self.template.generate( \
+ name=fname, metadata=metadata,
+ path=self.name).filter(removecomment)
+ if have_ntt:
+ ttypes = [TextTemplate, NewTextTemplate]
+ else:
+ ttypes = [TextTemplate]
+ if True in [isinstance(self.template, t) for t in ttypes]:
+ try:
+ textdata = stream.render('text', strip_whitespace=False)
+ except TypeError:
+ textdata = stream.render('text')
+ if type(textdata) == unicode:
+ entry.text = textdata
+ else:
+ if entry.get('encoding') == 'base64':
+ # take care of case where file needs base64 encoding
+ entry.text = binascii.b2a_base64(textdata)
+ else:
+ entry.text = unicode(textdata, self.encoding)
+ else:
+ try:
+ xmldata = stream.render('xml', strip_whitespace=False)
+ except TypeError:
+ xmldata = stream.render('xml')
+ if type(xmldata) == unicode:
+ entry.text = xmldata
+ else:
+ entry.text = unicode(xmldata, self.encoding)
+ except TemplateError, terror:
+ logger.error('Genshi template error: %s' % terror)
+ raise Bcfg2.Server.Plugin.PluginExecutionError
+ except AttributeError, err:
+ logger.error('Genshi template loading error: %s' % err)
+ raise Bcfg2.Server.Plugin.PluginExecutionError
+
+
+class TGenshi(Bcfg2.Server.Plugin.GroupSpool):
+ """
+ The TGenshi generator implements a templating
+ mechanism for configuration files.
+
+ """
+ name = 'TGenshi'
+ __version__ = '$Id$'
+ __author__ = 'jeff@ocjtech.us'
+ filename_pattern = 'template\.(txt|newtxt|xml)'
+ es_child_cls = TemplateFile
diff --git a/build/lib/Bcfg2/Server/Plugins/Trigger.py b/build/lib/Bcfg2/Server/Plugins/Trigger.py
new file mode 100644
index 000000000..f6dd47e12
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Plugins/Trigger.py
@@ -0,0 +1,37 @@
+import os
+import Bcfg2.Server.Plugin
+
+
+def async_run(prog, args):
+ pid = os.fork()
+ if pid:
+ os.waitpid(pid, 0)
+ else:
+ dpid = os.fork()
+ if not dpid:
+ os.system(" ".join([prog] + args))
+ os._exit(0)
+
+
+class Trigger(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Statistics):
+ """Trigger is a plugin that calls external scripts (on the server)."""
+ name = 'Trigger'
+ __version__ = '$Id'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Statistics.__init__(self)
+ try:
+ os.stat(self.data)
+ except:
+ self.logger.error("Trigger: spool directory %s does not exist; unloading" % self.data)
+ raise Bcfg2.Server.Plugin.PluginInitError
+
+ def process_statistics(self, metadata, _):
+ args = [metadata.hostname, '-p', metadata.profile, '-g',
+ ':'.join([g for g in metadata.groups])]
+ for notifier in os.listdir(self.data):
+ n = self.data + '/' + notifier
+ async_run(n, args)
diff --git a/build/lib/Bcfg2/Server/Plugins/Web.py b/build/lib/Bcfg2/Server/Plugins/Web.py
new file mode 100644
index 000000000..e1646e429
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Plugins/Web.py
@@ -0,0 +1,47 @@
+import os
+import BaseHTTPServer
+import SimpleHTTPServer
+import Bcfg2.Server.Plugin
+
+# for debugging output only
+import logging
+logger = logging.getLogger('Bcfg2.Plugins.Web')
+
+class Web(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Version):
+ """Web is a simple webserver to display the content of the Bcfg2 repos."""
+ name = 'Web'
+ __version__ = '$Id$'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+ experimental = True
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Version.__init__(self)
+ self.core = core
+ self.datastore = datastore
+
+ # Change directory to the Bcfg2 repo
+ ##path = '/home/fab/backup'
+ if not os.path.exists(datastore):
+ ##print "Path '%s' doesn't exisit" % datastore
+ logger.error("%s doesn't exist" % datastore)
+ raise Bcfg2.Server.Plugin.PluginInitError
+ else:
+ os.chdir(datastore)
+ self.start_web()
+
+ logger.debug("Serving at port %s" % port)
+
+
+ def start_web(self, port=6788):
+ """Starts the webserver for directory listing of the Bcfg2 repo."""
+ try:
+ server_class = BaseHTTPServer.HTTPServer
+ handler_class = SimpleHTTPServer.SimpleHTTPRequestHandler
+ server_address = ('', port)
+ server = server_class(server_address, handler_class)
+ server.serve_forever()
+ except:
+ logger.error("Failed to start webserver")
+ raise Bcfg2.Server.Plugin.PluginInitError
diff --git a/build/lib/Bcfg2/Server/Plugins/__Web.py b/build/lib/Bcfg2/Server/Plugins/__Web.py
new file mode 100644
index 000000000..e1646e429
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Plugins/__Web.py
@@ -0,0 +1,47 @@
+import os
+import BaseHTTPServer
+import SimpleHTTPServer
+import Bcfg2.Server.Plugin
+
+# for debugging output only
+import logging
+logger = logging.getLogger('Bcfg2.Plugins.Web')
+
+class Web(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.Version):
+ """Web is a simple webserver to display the content of the Bcfg2 repos."""
+ name = 'Web'
+ __version__ = '$Id$'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+ experimental = True
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.Version.__init__(self)
+ self.core = core
+ self.datastore = datastore
+
+ # Change directory to the Bcfg2 repo
+ ##path = '/home/fab/backup'
+ if not os.path.exists(datastore):
+ ##print "Path '%s' doesn't exisit" % datastore
+ logger.error("%s doesn't exist" % datastore)
+ raise Bcfg2.Server.Plugin.PluginInitError
+ else:
+ os.chdir(datastore)
+ self.start_web()
+
+ logger.debug("Serving at port %s" % port)
+
+
+ def start_web(self, port=6788):
+ """Starts the webserver for directory listing of the Bcfg2 repo."""
+ try:
+ server_class = BaseHTTPServer.HTTPServer
+ handler_class = SimpleHTTPServer.SimpleHTTPRequestHandler
+ server_address = ('', port)
+ server = server_class(server_address, handler_class)
+ server.serve_forever()
+ except:
+ logger.error("Failed to start webserver")
+ raise Bcfg2.Server.Plugin.PluginInitError
diff --git a/build/lib/Bcfg2/Server/Plugins/__init__.py b/build/lib/Bcfg2/Server/Plugins/__init__.py
new file mode 100644
index 000000000..c69c37452
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Plugins/__init__.py
@@ -0,0 +1,35 @@
+"""Imports for Bcfg2.Server.Plugins."""
+__revision__ = '$Revision$'
+
+__all__ = [
+ 'Account',
+ 'Base',
+ 'Bundler',
+ 'Bzr',
+ 'Cfg',
+ 'Cvs',
+ 'Darcs',
+ 'Decisions',
+ 'Fossil',
+ 'Git',
+ 'GroupPatterns',
+ 'Hg',
+ 'Hostbase',
+ 'Metadata',
+ 'NagiosGen',
+ 'Ohai',
+ 'Packages',
+ 'Properties',
+ 'Probes',
+ 'Pkgmgr',
+ 'Rules',
+ 'SSHbase',
+ 'Snapshots',
+ 'Statistics',
+ 'Svcmgr',
+ 'Svn',
+ 'TCheetah',
+ 'Trigger',
+ 'SGenshi',
+ 'TGenshi',
+ ]
diff --git a/build/lib/Bcfg2/Server/Reports/__init__.py b/build/lib/Bcfg2/Server/Reports/__init__.py
new file mode 100644
index 000000000..bdf908f4a
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Reports/__init__.py
@@ -0,0 +1 @@
+__all__ = ['manage', 'nisauth', 'reports', 'settings', 'backends', 'urls', 'importscript']
diff --git a/build/lib/Bcfg2/Server/Reports/backends.py b/build/lib/Bcfg2/Server/Reports/backends.py
new file mode 100644
index 000000000..9207038ed
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Reports/backends.py
@@ -0,0 +1,35 @@
+from django.contrib.auth.models import User
+from nisauth import *
+
+class NISBackend(object):
+
+ def authenticate(self, username=None, password=None):
+ try:
+ print "start nis authenticate"
+ n = nisauth(username, password)
+ temp_pass = User.objects.make_random_password(100)
+ nis_user = dict(username=username,
+ )
+
+ user_session_obj = dict(
+ email = username,
+ first_name = None,
+ last_name = None,
+ uid = n.uid
+ )
+ user, created = User.objects.get_or_create(username=username)
+
+ return user
+
+ except NISAUTHError, e:
+ print str(e)
+ return None
+
+
+ def get_user(self, user_id):
+ try:
+ return User.objects.get(pk=user_id)
+ except User.DoesNotExist, e:
+ print str(e)
+ return None
+
diff --git a/build/lib/Bcfg2/Server/Reports/importscript.py b/build/lib/Bcfg2/Server/Reports/importscript.py
new file mode 100644
index 000000000..cdfd8079c
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Reports/importscript.py
@@ -0,0 +1,270 @@
+#! /usr/bin/env python
+'''Imports statistics.xml and clients.xml files in to database backend for new statistics engine'''
+__revision__ = '$Revision$'
+
+import os, sys, binascii
+try:
+ import Bcfg2.Server.Reports.settings
+except Exception, e:
+ sys.stderr.write("Failed to load configuration settings. %s\n" % e)
+ sys.exit(1)
+
+project_directory = os.path.dirname(Bcfg2.Server.Reports.settings.__file__)
+project_name = os.path.basename(project_directory)
+sys.path.append(os.path.join(project_directory, '..'))
+project_module = __import__(project_name, '', '', [''])
+sys.path.pop()
+# Set DJANGO_SETTINGS_MODULE appropriately.
+os.environ['DJANGO_SETTINGS_MODULE'] = '%s.settings' % project_name
+
+from Bcfg2.Server.Reports.reports.models import *
+from lxml.etree import XML, XMLSyntaxError
+from getopt import getopt, GetoptError
+from datetime import datetime
+from time import strptime
+from django.db import connection
+from Bcfg2.Server.Reports.updatefix import update_database
+import ConfigParser
+import logging
+import Bcfg2.Logger
+import platform
+
+def build_reason_kwargs(r_ent):
+ binary_file=False
+ if r_ent.get('current_bfile', False):
+ binary_file=True
+ rc_diff = r_ent.get('current_bfile')
+ if len(rc_diff) > 1024*1024:
+ rc_diff = ''
+ elif len(rc_diff) == 0:
+ # No point in flagging binary if we have no data
+ binary_file=False
+ elif r_ent.get('current_bdiff', False):
+ rc_diff = binascii.a2b_base64(r_ent.get('current_bdiff'))
+ elif r_ent.get('current_diff', False):
+ rc_diff = r_ent.get('current_diff')
+ else:
+ rc_diff = ''
+ return dict(owner=r_ent.get('owner', default=""),
+ current_owner=r_ent.get('current_owner', default=""),
+ group=r_ent.get('group', default=""),
+ current_group=r_ent.get('current_group', default=""),
+ perms=r_ent.get('perms', default=""),
+ current_perms=r_ent.get('current_perms', default=""),
+ status=r_ent.get('status', default=""),
+ current_status=r_ent.get('current_status', default=""),
+ to=r_ent.get('to', default=""),
+ current_to=r_ent.get('current_to', default=""),
+ version=r_ent.get('version', default=""),
+ current_version=r_ent.get('current_version', default=""),
+ current_exists=r_ent.get('current_exists', default="True").capitalize()=="True",
+ current_diff=rc_diff,
+ is_binary=binary_file)
+
+
+def load_stats(cdata, sdata, vlevel, logger, quick=False, location=''):
+ clients = {}
+ [clients.__setitem__(c.name, c) \
+ for c in Client.objects.all()]
+
+ pingability = {}
+ [pingability.__setitem__(n.get('name'), n.get('pingable', default='N')) \
+ for n in cdata.findall('Client')]
+
+ for node in sdata.findall('Node'):
+ name = node.get('name')
+ c_inst, created = Client.objects.get_or_create(name=name)
+ if vlevel > 0:
+ logger.info("Client %s added to db" % name)
+ clients[name] = c_inst
+ try:
+ pingability[name]
+ except KeyError:
+ pingability[name] = 'N'
+ for statistics in node.findall('Statistics'):
+ timestamp = datetime(*strptime(statistics.get('time'))[0:6])
+ ilist = Interaction.objects.filter(client=c_inst,
+ timestamp=timestamp)
+ if ilist:
+ current_interaction = ilist[0]
+ if vlevel > 0:
+ logger.info("Interaction for %s at %s with id %s already exists" % \
+ (c_inst.id, timestamp, current_interaction.id))
+ continue
+ else:
+ newint = Interaction(client=c_inst,
+ timestamp = timestamp,
+ state = statistics.get('state', default="unknown"),
+ repo_rev_code = statistics.get('revision',default="unknown"),
+ client_version = statistics.get('client_version',default="unknown"),
+ goodcount = statistics.get('good',default="0"),
+ totalcount = statistics.get('total',default="0"),
+ server = location)
+ newint.save()
+ current_interaction = newint
+ if vlevel > 0:
+ logger.info("Interaction for %s at %s with id %s INSERTED in to db"%(c_inst.id,
+ timestamp, current_interaction.id))
+
+
+ counter_fields = { TYPE_CHOICES[0]: 0, TYPE_CHOICES[1]: 0, TYPE_CHOICES[2]: 0 }
+ pattern = [('Bad/*', TYPE_CHOICES[0]),
+ ('Extra/*', TYPE_CHOICES[2]),
+ ('Modified/*', TYPE_CHOICES[1]),]
+ for (xpath, type) in pattern:
+ for x in statistics.findall(xpath):
+ counter_fields[type] = counter_fields[type] + 1
+ kargs = build_reason_kwargs(x)
+
+ try:
+ rr = None
+ if not quick:
+ try:
+ rr = Reason.objects.filter(**kargs)[0]
+ except IndexError:
+ pass
+ if not rr:
+ rr = Reason(**kargs)
+ rr.save()
+ if vlevel > 0:
+ logger.info("Created reason: %s" % rr.id)
+ except Exception, ex:
+ logger.error("Failed to create reason for %s: %s" % (x.get('name'), ex))
+ rr = Reason(current_exists=x.get('current_exists',
+ default="True").capitalize()=="True")
+ rr.save()
+
+ entry, created = Entries.objects.get_or_create(\
+ name=x.get('name'), kind=x.tag)
+
+ Entries_interactions(entry=entry, reason=rr,
+ interaction=current_interaction,
+ type=type[0]).save()
+ if vlevel > 0:
+ logger.info("%s interaction created with reason id %s and entry %s" % (xpath, rr.id, entry.id))
+
+ # Update interaction counters
+ current_interaction.bad_entries = counter_fields[TYPE_CHOICES[0]]
+ current_interaction.modified_entries = counter_fields[TYPE_CHOICES[1]]
+ current_interaction.extra_entries = counter_fields[TYPE_CHOICES[2]]
+ current_interaction.save()
+
+ mperfs = []
+ for times in statistics.findall('OpStamps'):
+ for metric, value in times.items():
+ mmatch = []
+ if not quick:
+ mmatch = Performance.objects.filter(metric=metric, value=value)
+
+ if mmatch:
+ mperf = mmatch[0]
+ else:
+ mperf = Performance(metric=metric, value=value)
+ mperf.save()
+ mperfs.append(mperf)
+ current_interaction.performance_items.add(*mperfs)
+
+ for key in pingability.keys():
+ if key not in clients:
+ continue
+ try:
+ pmatch = Ping.objects.filter(client=clients[key]).order_by('-endtime')[0]
+ if pmatch.status == pingability[key]:
+ pmatch.endtime = datetime.now()
+ pmatch.save()
+ continue
+ except IndexError:
+ pass
+ Ping(client=clients[key], status=pingability[key],
+ starttime=datetime.now(),
+ endtime=datetime.now()).save()
+
+ if vlevel > 1:
+ logger.info("---------------PINGDATA SYNCED---------------------")
+
+ #Clients are consistent
+
+if __name__ == '__main__':
+ from sys import argv
+ verb = 0
+ cpath = "/etc/bcfg2.conf"
+ clientpath = False
+ statpath = False
+ syslog = False
+
+ try:
+ opts, args = getopt(argv[1:], "hvudc:s:CS", ["help", "verbose", "updates" ,
+ "debug", "clients=", "stats=",
+ "config=", "syslog"])
+ except GetoptError, mesg:
+ # print help information and exit:
+ print "%s\nUsage:\nimportscript.py [-h] [-v] [-u] [-d] [-S] [-C bcfg2 config file] [-c clients-file] [-s statistics-file]" % (mesg)
+ raise SystemExit, 2
+
+ for o, a in opts:
+ if o in ("-h", "--help"):
+ print "Usage:\nimportscript.py [-h] [-v] -c <clients-file> -s <statistics-file> \n"
+ print "h : help; this message"
+ print "v : verbose; print messages on record insertion/skip"
+ print "u : updates; print status messages as items inserted semi-verbose"
+ print "d : debug; print most SQL used to manipulate database"
+ print "C : path to bcfg2.conf config file."
+ print "c : clients.xml file"
+ print "s : statistics.xml file"
+ print "S : syslog; output to syslog"
+ raise SystemExit
+ if o in ["-C", "--config"]:
+ cpath = a
+
+ if o in ("-v", "--verbose"):
+ verb = 1
+ if o in ("-u", "--updates"):
+ verb = 2
+ if o in ("-d", "--debug"):
+ verb = 3
+ if o in ("-c", "--clients"):
+ clientspath = a
+
+ if o in ("-s", "--stats"):
+ statpath = a
+ if o in ("-S", "--syslog"):
+ syslog = True
+
+ logger = logging.getLogger('importscript.py')
+ logging.getLogger().setLevel(logging.INFO)
+ Bcfg2.Logger.setup_logging('importscript.py',
+ True,
+ syslog)
+
+ cf = ConfigParser.ConfigParser()
+ cf.read([cpath])
+
+ if not statpath:
+ try:
+ statpath = "%s/etc/statistics.xml" % cf.get('server', 'repository')
+ except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
+ print "Could not read bcfg2.conf; exiting"
+ raise SystemExit, 1
+ try:
+ statsdata = XML(open(statpath).read())
+ except (IOError, XMLSyntaxError):
+ print("StatReports: Failed to parse %s"%(statpath))
+ raise SystemExit, 1
+
+ if not clientpath:
+ try:
+ clientspath = "%s/Metadata/clients.xml" % \
+ cf.get('server', 'repository')
+ except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
+ print "Could not read bcfg2.conf; exiting"
+ raise SystemExit, 1
+ try:
+ clientsdata = XML(open(clientspath).read())
+ except (IOError, XMLSyntaxError):
+ print("StatReports: Failed to parse %s"%(clientspath))
+ raise SystemExit, 1
+
+ q = '-O3' in sys.argv
+ # Be sure the database is ready for new schema
+ update_database()
+ load_stats(clientsdata, statsdata, verb, logger, quick=q, location=platform.node())
diff --git a/build/lib/Bcfg2/Server/Reports/manage.py b/build/lib/Bcfg2/Server/Reports/manage.py
new file mode 100644
index 000000000..5e78ea979
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Reports/manage.py
@@ -0,0 +1,11 @@
+#!/usr/bin/env python
+from django.core.management import execute_manager
+try:
+ import settings # Assumed to be in the same directory.
+except ImportError:
+ import sys
+ sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
+ sys.exit(1)
+
+if __name__ == "__main__":
+ execute_manager(settings)
diff --git a/build/lib/Bcfg2/Server/Reports/nisauth.py b/build/lib/Bcfg2/Server/Reports/nisauth.py
new file mode 100644
index 000000000..b4be0e391
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Reports/nisauth.py
@@ -0,0 +1,43 @@
+import os
+import crypt, nis
+from Bcfg2.Server.Reports.settings import AUTHORIZED_GROUP
+
+"""Checks with NIS to see if the current user is in the support group"""
+
+__revision__ = "$Revision: $"
+
+class NISAUTHError(Exception):
+ """NISAUTHError is raised when somehting goes boom."""
+ pass
+
+class nisauth(object):
+ group_test = False
+ samAcctName = None
+ distinguishedName = None
+ sAMAccountName = None
+ telephoneNumber = None
+ title = None
+ memberOf = None
+ department = None #this will be a list
+ mail = None
+ extensionAttribute1 = None #badgenumber
+ badge_no = None
+ uid = None
+
+ def __init__(self,login,passwd=None):
+ """get user profile from NIS"""
+ try:
+ p = nis.match(login, 'passwd.byname').split(":")
+ print p
+ except:
+ raise NISAUTHError('username')
+ # check user password using crypt and 2 character salt from passwd file
+ if p[1] == crypt.crypt(passwd, p[1][:2]):
+ # check to see if user is in valid support groups
+ # will have to include these groups in a settings file eventually
+ if not login in nis.match(AUTHORIZED_GROUP, 'group.byname').split(':')[-1].split(','):
+ raise NISAUTHError('group')
+ self.uid = p[2]
+ print self.uid
+ else:
+ raise NISAUTHError('password')
diff --git a/build/lib/Bcfg2/Server/Reports/reports/__init__.py b/build/lib/Bcfg2/Server/Reports/reports/__init__.py
new file mode 100644
index 000000000..ccdce8943
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Reports/reports/__init__.py
@@ -0,0 +1 @@
+__all__ = ['templatetags']
diff --git a/build/lib/Bcfg2/Server/Reports/reports/fixtures/initial_version.xml b/build/lib/Bcfg2/Server/Reports/reports/fixtures/initial_version.xml
new file mode 100644
index 000000000..1b1359eed
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Reports/reports/fixtures/initial_version.xml
@@ -0,0 +1,35 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<django-objects version="1.0">
+ <object pk="1" model="reports.internaldatabaseversion">
+ <field type="IntegerField" name="version">0</field>
+ <field type="DateTimeField" name="updated">2008-08-05 11:03:50</field>
+ </object>
+ <object pk="2" model="reports.internaldatabaseversion">
+ <field type="IntegerField" name="version">1</field>
+ <field type="DateTimeField" name="updated">2008-08-05 11:04:10</field>
+ </object>
+ <object pk="3" model="reports.internaldatabaseversion">
+ <field type="IntegerField" name="version">2</field>
+ <field type="DateTimeField" name="updated">2008-08-05 13:37:19</field>
+ </object>
+ <object pk="4" model="reports.internaldatabaseversion">
+ <field type='IntegerField' name='version'>3</field>
+ <field type='DateTimeField' name='updated'>2008-08-11 08:44:36</field>
+ </object>
+ <object pk="5" model="reports.internaldatabaseversion">
+ <field type='IntegerField' name='version'>10</field>
+ <field type='DateTimeField' name='updated'>2008-08-22 11:28:50</field>
+ </object>
+ <object pk="5" model="reports.internaldatabaseversion">
+ <field type='IntegerField' name='version'>11</field>
+ <field type='DateTimeField' name='updated'>2009-01-13 12:26:10</field>
+ </object>
+ <object pk="6" model="reports.internaldatabaseversion">
+ <field type='IntegerField' name='version'>16</field>
+ <field type='DateTimeField' name='updated'>2010-06-01 12:26:10</field>
+ </object>
+ <object pk="7" model="reports.internaldatabaseversion">
+ <field type='IntegerField' name='version'>17</field>
+ <field type='DateTimeField' name='updated'>2010-07-02 00:00:00</field>
+ </object>
+</django-objects>
diff --git a/build/lib/Bcfg2/Server/Reports/reports/models.py b/build/lib/Bcfg2/Server/Reports/reports/models.py
new file mode 100644
index 000000000..1963a9090
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Reports/reports/models.py
@@ -0,0 +1,328 @@
+"""Django models for Bcfg2 reports."""
+from django.db import models
+from django.db import connection, transaction
+from django.db.models import Q
+from datetime import datetime, timedelta
+from time import strptime
+
+KIND_CHOICES = (
+ #These are the kinds of config elements
+ ('Package', 'Package'),
+ ('Path', 'directory'),
+ ('Path', 'file'),
+ ('Path', 'permissions'),
+ ('Path', 'symlink'),
+ ('Service', 'Service'),
+)
+PING_CHOICES = (
+ #These are possible ping states
+ ('Up (Y)', 'Y'),
+ ('Down (N)', 'N')
+)
+TYPE_BAD = 1
+TYPE_MODIFIED = 2
+TYPE_EXTRA = 3
+
+TYPE_CHOICES = (
+ (TYPE_BAD, 'Bad'),
+ (TYPE_MODIFIED, 'Modified'),
+ (TYPE_EXTRA, 'Extra'),
+)
+
+def convert_entry_type_to_id(type_name):
+ """Convert a entry type to its entry id"""
+ for e_id, e_name in TYPE_CHOICES:
+ if e_name.lower() == type_name.lower():
+ return e_id
+ return -1
+
+class ClientManager(models.Manager):
+ """Extended client manager functions."""
+ def active(self, timestamp=None):
+ """returns a set of clients that have been created and have not yet been
+ expired as of optional timestmamp argument. Timestamp should be a
+ datetime object."""
+
+ if timestamp == None:
+ timestamp = datetime.now()
+ elif not isinstance(timestamp, datetime):
+ raise ValueError, 'Expected a datetime object'
+ else:
+ try:
+ timestamp = datetime(*strptime(timestamp, "%Y-%m-%d %H:%M:%S")[0:6])
+ except ValueError:
+ return self.none()
+
+ return self.filter(Q(expiration__gt=timestamp) | Q(expiration__isnull=True),
+ creation__lt=timestamp)
+
+
+class Client(models.Model):
+ """Object representing every client we have seen stats for."""
+ creation = models.DateTimeField(auto_now_add=True)
+ name = models.CharField(max_length=128,)
+ current_interaction = models.ForeignKey('Interaction',
+ null=True, blank=True,
+ related_name="parent_client")
+ expiration = models.DateTimeField(blank=True, null=True)
+
+ def __str__(self):
+ return self.name
+
+ objects = ClientManager()
+
+ class Admin:
+ pass
+
+class Ping(models.Model):
+ """Represents a ping of a client (sparsely)."""
+ client = models.ForeignKey(Client, related_name="pings")
+ starttime = models.DateTimeField()
+ endtime = models.DateTimeField()
+ status = models.CharField(max_length=4, choices=PING_CHOICES)#up/down
+
+ class Meta:
+ get_latest_by = 'endtime'
+
+class InteractiveManager(models.Manager):
+ """Manages interactions objects."""
+
+ def recent_interactions_dict(self, maxdate=None, active_only=True):
+ """
+ Return the most recent interactions for clients as of a date.
+
+ This method uses aggregated queries to return a ValuesQueryDict object.
+ Faster then raw sql since this is executed as a single query.
+ """
+
+ return self.values('client').annotate(max_timestamp=Max('timestamp')).values()
+
+ def interaction_per_client(self, maxdate = None, active_only=True):
+ """
+ Returns the most recent interactions for clients as of a date
+
+ Arguments:
+ maxdate -- datetime object. Most recent date to pull. (dafault None)
+ active_only -- Include only active clients (default True)
+
+ """
+
+ if maxdate and not isinstance(maxdate,datetime):
+ raise ValueError, 'Expected a datetime object'
+ return self.filter(id__in = self.get_interaction_per_client_ids(maxdate, active_only))
+
+ def get_interaction_per_client_ids(self, maxdate = None, active_only=True):
+ """
+ Returns the ids of most recent interactions for clients as of a date.
+
+ Arguments:
+ maxdate -- datetime object. Most recent date to pull. (dafault None)
+ active_only -- Include only active clients (default True)
+
+ """
+ from django.db import connection
+ cursor = connection.cursor()
+ cfilter = "expiration is null"
+
+ sql = 'select reports_interaction.id, x.client_id from (select client_id, MAX(timestamp) ' + \
+ 'as timer from reports_interaction'
+ if maxdate:
+ if not isinstance(maxdate,datetime):
+ raise ValueError, 'Expected a datetime object'
+ sql = sql + " where timestamp <= '%s' " % maxdate
+ cfilter = "(expiration is null or expiration > '%s') and creation <= '%s'" % (maxdate,maxdate)
+ sql = sql + ' GROUP BY client_id) x, reports_interaction where ' + \
+ 'reports_interaction.client_id = x.client_id AND reports_interaction.timestamp = x.timer'
+ if active_only:
+ sql = sql + " and x.client_id in (select id from reports_client where %s)" % \
+ cfilter
+ try:
+ cursor.execute(sql)
+ return [item[0] for item in cursor.fetchall()]
+ except:
+ '''FIXME - really need some error hadling'''
+ pass
+ return []
+
+class Interaction(models.Model):
+ """Models each reconfiguration operation interaction between client and server."""
+ client = models.ForeignKey(Client, related_name="interactions",)
+ timestamp = models.DateTimeField()#Timestamp for this record
+ state = models.CharField(max_length=32)#good/bad/modified/etc
+ repo_rev_code = models.CharField(max_length=64)#repo revision at time of interaction
+ client_version = models.CharField(max_length=32)#Client Version
+ goodcount = models.IntegerField()#of good config-items
+ totalcount = models.IntegerField()#of total config-items
+ server = models.CharField(max_length=256) # Name of the server used for the interaction
+ bad_entries = models.IntegerField(default=-1)
+ modified_entries = models.IntegerField(default=-1)
+ extra_entries = models.IntegerField(default=-1)
+
+ def __str__(self):
+ return "With " + self.client.name + " @ " + self.timestamp.isoformat()
+
+ def percentgood(self):
+ if not self.totalcount == 0:
+ return (self.goodcount/float(self.totalcount))*100
+ else:
+ return 0
+
+ def percentbad(self):
+ if not self.totalcount == 0:
+ return ((self.totalcount-self.goodcount)/(float(self.totalcount)))*100
+ else:
+ return 0
+
+ def isclean(self):
+ if (self.bad_entry_count() == 0 and self.goodcount == self.totalcount):
+ return True
+ else:
+ return False
+
+ def isstale(self):
+ if (self == self.client.current_interaction):#Is Mostrecent
+ if(datetime.now()-self.timestamp > timedelta(hours=25) ):
+ return True
+ else:
+ return False
+ else:
+ #Search for subsequent Interaction for this client
+ #Check if it happened more than 25 hrs ago.
+ if (self.client.interactions.filter(timestamp__gt=self.timestamp)
+ .order_by('timestamp')[0].timestamp -
+ self.timestamp > timedelta(hours=25)):
+ return True
+ else:
+ return False
+ def save(self):
+ super(Interaction, self).save() #call the real save...
+ self.client.current_interaction = self.client.interactions.latest()
+ self.client.save()#save again post update
+
+ def delete(self):
+ '''Override the default delete. Allows us to remove Performance items'''
+ pitems = list(self.performance_items.all())
+ super(Interaction, self).delete()
+ for perf in pitems:
+ if perf.interaction.count() == 0:
+ perf.delete()
+
+ def badcount(self):
+ return self.totalcount - self.goodcount
+
+ def bad(self):
+ return Entries_interactions.objects.select_related().filter(interaction=self, type=TYPE_BAD)
+
+ def bad_entry_count(self):
+ """Number of bad entries. Store the count in the interation field to save db queries."""
+ if self.bad_entries < 0:
+ self.bad_entries = Entries_interactions.objects.filter(interaction=self, type=TYPE_BAD).count()
+ self.save()
+ return self.bad_entries
+
+ def modified(self):
+ return Entries_interactions.objects.select_related().filter(interaction=self, type=TYPE_MODIFIED)
+
+ def modified_entry_count(self):
+ """Number of modified entries. Store the count in the interation field to save db queries."""
+ if self.modified_entries < 0:
+ self.modified_entries = Entries_interactions.objects.filter(interaction=self, type=TYPE_MODIFIED).count()
+ self.save()
+ return self.modified_entries
+
+ def extra(self):
+ return Entries_interactions.objects.select_related().filter(interaction=self, type=TYPE_EXTRA)
+
+ def extra_entry_count(self):
+ """Number of extra entries. Store the count in the interation field to save db queries."""
+ if self.extra_entries < 0:
+ self.extra_entries = Entries_interactions.objects.filter(interaction=self, type=TYPE_EXTRA).count()
+ self.save()
+ return self.extra_entries
+
+ objects = InteractiveManager()
+
+ class Admin:
+ list_display = ('client', 'timestamp', 'state')
+ list_filter = ['client', 'timestamp']
+ pass
+ class Meta:
+ get_latest_by = 'timestamp'
+ ordering = ['-timestamp']
+ unique_together = ("client", "timestamp")
+
+class Reason(models.Model):
+ """reason why modified or bad entry did not verify, or changed."""
+ owner = models.TextField(max_length=128, blank=True)
+ current_owner = models.TextField(max_length=128, blank=True)
+ group = models.TextField(max_length=128, blank=True)
+ current_group = models.TextField(max_length=128, blank=True)
+ perms = models.TextField(max_length=4, blank=True)#txt fixes typing issue
+ current_perms = models.TextField(max_length=4, blank=True)
+ status = models.TextField(max_length=3, blank=True)#on/off/(None)
+ current_status = models.TextField(max_length=1, blank=True)#on/off/(None)
+ to = models.TextField(max_length=256, blank=True)
+ current_to = models.TextField(max_length=256, blank=True)
+ version = models.TextField(max_length=128, blank=True)
+ current_version = models.TextField(max_length=128, blank=True)
+ current_exists = models.BooleanField()#False means its missing. Default True
+ current_diff = models.TextField(max_length=1280, blank=True)
+ is_binary = models.BooleanField(default=False)
+ def _str_(self):
+ return "Reason"
+
+ @staticmethod
+ @transaction.commit_on_success
+ def prune_orphans():
+ '''Prune oprhaned rows... no good way to use the ORM'''
+ cursor = connection.cursor()
+ cursor.execute('delete from reports_reason where not exists (select rei.id from reports_entries_interactions rei where rei.reason_id = reports_reason.id)')
+ transaction.set_dirty()
+
+
+class Entries(models.Model):
+ """Contains all the entries feed by the client."""
+ name = models.CharField(max_length=128, db_index=True)
+ kind = models.CharField(max_length=16, choices=KIND_CHOICES, db_index=True)
+
+ def __str__(self):
+ return self.name
+
+ @staticmethod
+ @transaction.commit_on_success
+ def prune_orphans():
+ '''Prune oprhaned rows... no good way to use the ORM'''
+ cursor = connection.cursor()
+ cursor.execute('delete from reports_entries where not exists (select rei.id from reports_entries_interactions rei where rei.entry_id = reports_entries.id)')
+ transaction.set_dirty()
+
+class Entries_interactions(models.Model):
+ """Define the relation between the reason, the interaction and the entry."""
+ entry = models.ForeignKey(Entries)
+ reason = models.ForeignKey(Reason)
+ interaction = models.ForeignKey(Interaction)
+ type = models.IntegerField(choices=TYPE_CHOICES)
+
+class Performance(models.Model):
+ """Object representing performance data for any interaction."""
+ interaction = models.ManyToManyField(Interaction, related_name="performance_items")
+ metric = models.CharField(max_length=128)
+ value = models.DecimalField(max_digits=32, decimal_places=16)
+ def __str__(self):
+ return self.metric
+
+ @staticmethod
+ @transaction.commit_on_success
+ def prune_orphans():
+ '''Prune oprhaned rows... no good way to use the ORM'''
+ cursor = connection.cursor()
+ cursor.execute('delete from reports_performance where not exists (select ri.id from reports_performance_interaction ri where ri.performance_id = reports_performance.id)')
+ transaction.set_dirty()
+
+class InternalDatabaseVersion(models.Model):
+ """Object that tell us to witch version is the database."""
+ version = models.IntegerField()
+ updated = models.DateTimeField(auto_now_add=True)
+
+ def __str__(self):
+ return "version %d updated the %s" % (self.version, self.updated.isoformat())
diff --git a/build/lib/Bcfg2/Server/Reports/reports/templates/404.html b/build/lib/Bcfg2/Server/Reports/reports/templates/404.html
new file mode 100644
index 000000000..168bd9fec
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Reports/reports/templates/404.html
@@ -0,0 +1,8 @@
+{% extends 'base.html' %}
+{% block title %}Bcfg2 - Page not found{% endblock %}
+{% block fullcontent %}
+<h2>Page not found</h2>
+<p>
+The page or object requested could not be found.
+</p>
+{% endblock %}
diff --git a/build/lib/Bcfg2/Server/Reports/reports/templates/base-timeview.html b/build/lib/Bcfg2/Server/Reports/reports/templates/base-timeview.html
new file mode 100644
index 000000000..842de36f0
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Reports/reports/templates/base-timeview.html
@@ -0,0 +1,25 @@
+{% extends "base.html" %}
+
+{% block timepiece %}
+<script type="text/javascript">
+function showCalendar() {
+ var cal = new CalendarPopup("calendar_div");
+ cal.showYearNavigation();
+ cal.select(document.forms['cal_form'].cal_date,'cal_link',
+ 'yyyy/MM/dd' {% if timestamp %}, '{{ timestamp|date:"Y/m/d" }}'{% endif %} );
+ return false;
+}
+function bcfg2_check_date() {
+ var new_date = document.getElementById('cal_date').value;
+ if(new_date) {
+ document.cal_form.submit();
+ }
+}
+document.write(getCalendarStyles());
+</script>
+{% if not timestamp %}Rendered at {% now "Y-m-d H:i" %} | {% else %}View as of {{ timestamp|date:"Y-m-d H:i" }} | {% endif %}{% spaceless %}
+ <a id='cal_link' name='cal_link' href='#' onclick='showCalendar(); return false;'
+ >[change]</a>
+ <form method='post' action='{{ path }}' id='cal_form' name='cal_form'><input id='cal_date' name='cal_date' type='hidden' value=''/></form>
+{% endspaceless %}
+{% endblock %}
diff --git a/build/lib/Bcfg2/Server/Reports/reports/templates/base.html b/build/lib/Bcfg2/Server/Reports/reports/templates/base.html
new file mode 100644
index 000000000..9bd9da218
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Reports/reports/templates/base.html
@@ -0,0 +1,95 @@
+{% load bcfg2_tags %}
+
+<?xml version="1.0"?>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
+<head>
+<title>{% block title %}Bcfg2 Reporting System{% endblock %}</title>
+
+<meta http-equiv="Content-type" content="text/html; charset=utf-8" />
+<meta http-equiv="Content-language" content="en" />
+<meta http-equiv="X-UA-Compatible" content="IE=EmulateIE7" />
+<meta name="robots" content="noindex, nofollow" />
+<meta http-equiv="cache-control" content="no-cache" />
+
+<link rel="stylesheet" type="text/css" href="{% to_media_url bcfg2_base.css %}" media="all" />
+<script type="text/javascript" src="{% to_media_url bcfg2.js %}"></script>
+<script type="text/javascript" src="{% to_media_url date.js %}"></script>
+<script type="text/javascript" src="{% to_media_url AnchorPosition.js %}"></script>
+<script type="text/javascript" src="{% to_media_url CalendarPopup.js %}"></script>
+<script type="text/javascript" src="{% to_media_url PopupWindow.js %}"></script>
+{% block extra_header_info %}{% endblock %}
+
+</head>
+<body onload="{% block body_onload %}{% endblock %}">
+
+ <div id="header">
+ <a href="http://bcfg2.org"><img src='{% to_media_url bcfg2_logo.png %}'
+ height='115' width='300' alt='Bcfg2' style='float:left; height: 115px' /></a>
+ </div>
+
+<div id="document">
+ <div id="content"><div id="contentwrapper">
+ {% block fullcontent %}
+ <div class='page_name'>
+ <h1>{% block pagebanner %}Page Banner{% endblock %}</h1>
+ <div id="timepiece">{% block timepiece %}Rendered at {% now "Y-m-d H:i" %}{% endblock %}</div>
+ </div>
+ <div class='detail_wrapper'>
+ {% block content %}{% endblock %}
+ </div>
+ {% endblock %}
+ </div></div><!-- content -->
+ <div id="sidemenucontainer"><div id="sidemenu">
+ {% block sidemenu %}
+ <ul class='menu-level1'>
+ <li>Overview</li>
+ </ul>
+ <ul class='menu-level2'>
+ <li><a href="{% url reports_summary %}">Summary</a></li>
+ <li><a href="{% url reports_history %}">Recent Interactions</a></li>
+ <li><a href="{% url reports_timing %}">Timing</a></li>
+ </ul>
+ <ul class='menu-level1'>
+ <li>Clients</li>
+ </ul>
+ <ul class='menu-level2'>
+ <li><a href="{% url reports_grid_view %}">Grid View</a></li>
+ <li><a href="{% url reports_detailed_list %}">Detailed List</a></li>
+ <li><a href="{% url reports_client_manage %}">Manage</a></li>
+ </ul>
+ <ul class='menu-level1'>
+ <li>Entries Configured</li>
+ </ul>
+ <ul class='menu-level2'>
+ <li><a href="{% url reports_item_list "bad" %}">Bad</a></li>
+ <li><a href="{% url reports_item_list "modified" %}">Modified</a></li>
+ <li><a href="{% url reports_item_list "extra" %}">Extra</a></li>
+ </ul>
+{% comment %}
+ TODO
+ <ul class='menu-level1'>
+ <li>Entry Types</li>
+ </ul>
+ <ul class='menu-level2'>
+ <li><a href="#">Action</a></li>
+ <li><a href="#">Package</a></li>
+ <li><a href="#">Path</a></li>
+ <li><a href="#">Service</a></li>
+ </ul>
+{% endcomment %}
+ <ul class='menu-level1'>
+ <li><a href="http://bcfg2.org">Homepage</a></li>
+ <li><a href="http://docs.bcfg2.org">Documentation</a></li>
+ </ul>
+ {% endblock %}
+ </div></div><!-- sidemenu -->
+ <div style='clear:both'></div>
+</div><!-- document -->
+ <div id="footer">
+ <span>Bcfg2 Version 1.1.0</span>
+ </div>
+
+<div id="calendar_div" style='position:absolute; visibility:hidden; background-color:white; layer-background-color:white;'></div>
+</body>
+</html>
diff --git a/build/lib/Bcfg2/Server/Reports/reports/templates/clients/detail.html b/build/lib/Bcfg2/Server/Reports/reports/templates/clients/detail.html
new file mode 100644
index 000000000..efd5f9e00
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Reports/reports/templates/clients/detail.html
@@ -0,0 +1,127 @@
+{% extends "base.html" %}
+{% load bcfg2_tags %}
+
+{% block title %}Bcfg2 - Client {{client.name}}{% endblock %}
+
+{% block extra_header_info %}
+<style type="text/css">
+.node_data {
+ border: 1px solid #98DBCC;
+ margin: 10px;
+ padding-left: 18px;
+}
+.node_data td {
+ padding: 1px 20px 1px 2px;
+}
+span.history_links {
+ font-size: 90%;
+ margin-left: 50px;
+}
+span.history_links a {
+ font-size: 90%;
+}
+</style>
+{% endblock %}
+
+{% block body_onload %}javascript:clientdetailload(){% endblock %}
+
+{% block pagebanner %}Client Details{% endblock %}
+
+{% block content %}
+ <div class='detail_header'>
+ <h2>{{client.name}}</h2>
+ <a href='{% url reports_client_manage %}#{{ client.name }}'>[manage]</a>
+ <span class='history_links'><a href="{% url reports_client_history client.name %}">View History</a> | Jump to&nbsp;
+ <select id="quick" name="quick" onchange="javascript:pageJump('quick');">
+ <option value="" selected="selected">--- Time ---</option>
+ {% for i in client.interactions.all|slice:":25" %}
+ <option value="{% url reports_client_detail_pk hostname=client.name, pk=i.id %}">{{i.timestamp}}</option>
+ {% endfor %}
+ </select></span>
+ </div>
+
+ {% if interaction.isstale %}
+ <div class="warningbox">
+ This node did not run within the last 24 hours &#8212; it may be out of date.
+ </div>
+ {% endif %}
+ <table class='node_data'>
+ <tr><td>Timestamp</td><td>{{interaction.timestamp}}</td></tr>
+ {% if interaction.server %}
+ <tr><td>Served by</td><td>{{interaction.server}}</td></tr>
+ {% endif %}
+ {% if interaction.repo_rev_code %}
+ <tr><td>Revision</td><td>{{interaction.repo_rev_code}}</td></tr>
+ {% endif %}
+ <tr><td>State</td><td class='{{interaction.state}}-lineitem'>{{interaction.state|capfirst}}</td></tr>
+ <tr><td>Managed entries</td><td>{{interaction.totalcount}}</td></tr>
+ {% if not interaction.isclean %}
+ <tr><td>Deviation</td><td>{{interaction.percentbad|floatformat:"3"}}%</td></tr>
+ {% endif %}
+ </table>
+
+ {% if interaction.bad_entry_count %}
+ <div class='entry_list'>
+ <div class='entry_list_head dirty-lineitem'>
+ <div class='entry_expand_tab' onclick='javascript:toggleMe("bad_table");'>[+]</div>
+ <h3>Bad Entries &#8212; {{ interaction.bad_entry_count }}</h3>
+ </div>
+ <table id='bad_table' class='entry_list'>
+ {% for e in interaction.bad|sortwell %}
+ <tr class='{% cycle listview,listview_alt %}'>
+ <td class='entry_list_type'>{{e.entry.kind}}:</td>
+ <td><a href="{% url reports_item "bad",e.id %}">
+ {{e.entry.name}}</a></td>
+ </tr>
+ {% endfor %}
+ </table>
+ </div>
+ {% endif %}
+
+ {% if interaction.modified_entry_count %}
+ <div class='entry_list'>
+ <div class='entry_list_head modified-lineitem'>
+ <div class='entry_expand_tab' onclick='javascript:toggleMe("modified_table");'>[+]</div>
+ <h3>Modified Entries &#8212; {{ interaction.modified_entry_count }}</h3>
+ </div>
+ <table id='modified_table' class='entry_list'>
+ {% for e in interaction.modified|sortwell %}
+ <tr class='{% cycle listview,listview_alt %}'>
+ <td class='entry_list_type'>{{e.entry.kind}}:</td>
+ <td><a href="{% url reports_item "modified",e.id %}">
+ {{e.entry.name}}</a></td>
+ </tr>
+ {% endfor %}
+ </table>
+ </div>
+ {% endif %}
+
+ {% if interaction.extra_entry_count %}
+ <div class='entry_list'>
+ <div class='entry_list_head extra-lineitem'>
+ <div class='entry_expand_tab' onclick='javascript:toggleMe("extra_table");'>[+]</div>
+ <h3>Extra Entries &#8212; {{ interaction.extra_entry_count }}</h3>
+ </div>
+ <table id='extra_table' class='entry_list'>
+ {% for e in interaction.extra|sortwell %}
+ <tr class='{% cycle listview,listview_alt %}'>
+ <td class='entry_list_type'>{{e.entry.kind}}:</td>
+ <td><a href="{% url reports_item "extra",e.id %}">{{e.entry.name}}</a></td>
+ </tr>
+ {% endfor %}
+ </table>
+ </div>
+ {% endif %}
+
+ {% if entry_list %}
+ <div class="entry_list recent_history_wrapper">
+ <div class="entry_list_head" style="border-bottom: 2px solid #98DBCC;">
+ <h4 style="display: inline"><a href="{% url reports_client_history client.name %}">Recent Interactions</a></h4>
+ </div>
+ <div class='recent_history_box'>
+ {% include "widgets/interaction_list.inc" %}
+ <div style='padding-left: 5px'><a href="{% url reports_client_history client.name %}">more...</a></div>
+ </div>
+ </div>
+ {% endif %}
+{% endblock %}
diff --git a/build/lib/Bcfg2/Server/Reports/reports/templates/clients/detailed-list.html b/build/lib/Bcfg2/Server/Reports/reports/templates/clients/detailed-list.html
new file mode 100644
index 000000000..0c1fae8d5
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Reports/reports/templates/clients/detailed-list.html
@@ -0,0 +1,46 @@
+{% extends "base-timeview.html" %}
+{% load bcfg2_tags %}
+
+{% block title %}Bcfg2 - Detailed Client Listing{% endblock %}
+{% block pagebanner %}Clients - Detailed View{% endblock %}
+
+{% block content %}
+<div class='client_list_box'>
+{% if entry_list %}
+ {% filter_navigator %}
+ <table cellpadding="3">
+ <tr id='table_list_header' class='listview'>
+ <td class='left_column'>Node</td>
+ <td class='right_column' style='width:75px'>State</td>
+ <td class='right_column_narrow'>Good</td>
+ <td class='right_column_narrow'>Bad</td>
+ <td class='right_column_narrow'>Modified</td>
+ <td class='right_column_narrow'>Extra</td>
+ <td class='right_column'>Last Run</td>
+ <td class='right_column_wide'>Server</td>
+ </tr>
+ {% for entry in entry_list %}
+ <tr class='{% cycle listview,listview_alt %}'>
+ <td class='left_column'><a href='{% url Bcfg2.Server.Reports.reports.views.client_detail hostname=entry.client.name, pk=entry.id %}'>{{ entry.client.name }}</a></td>
+ <td class='right_column' style='width:75px'><a href='{% add_url_filter state=entry.state %}'
+ {% ifequal entry.state 'dirty' %}class='dirty-lineitem'{% endifequal %}>{{ entry.state }}</a></td>
+ <td class='right_column_narrow'>{{ entry.goodcount }}</td>
+ <td class='right_column_narrow'>{{ entry.bad_entry_count }}</td>
+ <td class='right_column_narrow'>{{ entry.modified_entry_count }}</td>
+ <td class='right_column_narrow'>{{ entry.extra_entry_count }}</td>
+ <td class='right_column'><span {% if entry.timestamp|isstale:entry_max %}class='dirty-lineitem'{% endif %}>{{ entry.timestamp|date:"Y-m-d\&\n\b\s\p\;H:i"|safe }}</span></td>
+ <td class='right_column_wide'>
+ {% if entry.server %}
+ <a href='{% add_url_filter server=entry.server %}'>{{ entry.server }}</a>
+ {% else %}
+ &nbsp;
+ {% endif %}
+ </td>
+ </tr>
+ {% endfor %}
+ </table>
+{% else %}
+ <p>No client records are available.</p>
+{% endif %}
+</div>
+{% endblock %}
diff --git a/build/lib/Bcfg2/Server/Reports/reports/templates/clients/history.html b/build/lib/Bcfg2/Server/Reports/reports/templates/clients/history.html
new file mode 100644
index 000000000..01d4ec2f4
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Reports/reports/templates/clients/history.html
@@ -0,0 +1,20 @@
+{% extends "base.html" %}
+{% load bcfg2_tags %}
+
+{% block title %}Bcfg2 - Interaction History{% endblock %}
+{% block pagebanner %}Interaction history{% if client %} for {{ client.name }}{% endif %}{% endblock %}
+
+{% block extra_header_info %}
+{% endblock %}
+
+{% block content %}
+<div class='client_list_box'>
+{% if entry_list %}
+ {% filter_navigator %}
+ {% include "widgets/interaction_list.inc" %}
+{% else %}
+ <p>No client records are available.</p>
+{% endif %}
+</div>
+{% page_navigator %}
+{% endblock %}
diff --git a/build/lib/Bcfg2/Server/Reports/reports/templates/clients/index.html b/build/lib/Bcfg2/Server/Reports/reports/templates/clients/index.html
new file mode 100644
index 000000000..e0c0d2d7a
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Reports/reports/templates/clients/index.html
@@ -0,0 +1,34 @@
+{% extends "base-timeview.html" %}
+
+{% block extra_header_info %}
+{% endblock%}
+
+{% block title %}Bcfg2 - Client Grid View{% endblock %}
+
+{% block pagebanner %}Clients - Grid View{% endblock %}
+
+{% block content %}
+
+{% if inter_list %}
+ <table class='grid-view' align='center'>
+ {% for inter in inter_list %}
+ {% if forloop.first %}<tr>{% endif %}
+ <td class="{{inter.state}}-lineitem">
+ <a href="{% spaceless %}{% if not timestamp %}
+ {% url reports_client_detail inter.client.name %}
+ {% else %}
+ {% url reports_client_detail_pk inter.client.name,inter.id %}
+ {% endif %}
+ {% endspaceless %}">{{ inter.client.name }}</a>
+ </td>
+ {% if forloop.last %}
+ </tr>
+ {% else %}
+ {% if forloop.counter|divisibleby:"4" %}</tr><tr>{% endif %}
+ {% endif %}
+ {% endfor %}
+ </table>
+{% else %}
+ <p>No client records are available.</p>
+{% endif %}
+{% endblock %}
diff --git a/build/lib/Bcfg2/Server/Reports/reports/templates/clients/manage.html b/build/lib/Bcfg2/Server/Reports/reports/templates/clients/manage.html
new file mode 100644
index 000000000..5725ae577
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Reports/reports/templates/clients/manage.html
@@ -0,0 +1,45 @@
+{% extends "base.html" %}
+
+{% block extra_header_info %}
+{% endblock%}
+
+{% block title %}Bcfg2 - Manage Clients{% endblock %}
+
+{% block pagebanner %}Clients - Manage{% endblock %}
+
+{% block content %}
+<div class='client_list_box'>
+ {% if message %}
+ <div class="warningbox">{{ message }}</div>
+ {% endif %}
+{% if clients %}
+ <table cellpadding="3">
+ <tr id='table_list_header' class='listview'>
+ <td class='left_column'>Node</td>
+ <td class='right_column'>Expiration</td>
+ <td class='right_column_narrow'>Manage</td>
+ </tr>
+ {% for client in clients %}
+ <tr class='{% cycle listview,listview_alt %}'>
+ <td><span id="{{ client.name }}"> </span>
+ <span id="ttag-{{ client.name }}"> </span>
+ <span id="s-ttag-{{ client.name }}"> </span>
+ <a href="{% url reports_client_detail client.name %}">{{ client.name }}</a></td>
+ <td>{% firstof client.expiration 'Active' %}</td>
+ <td>
+ <form method="post" action="{% url reports_client_manage %}">
+ <div> {# here for no reason other then to validate #}
+ <input type="hidden" name="client_name" value="{{ client.name }}" />
+ <input type="hidden" name="client_action" value="{% if client.expiration %}unexpire{% else %}expire{% endif %}" />
+ <input type="submit" value="{% if client.expiration %}Activate{% else %}Expire Now{% endif %}" />
+ </div>
+ </form>
+ </td>
+ </tr>
+ {% endfor %}
+ </table>
+ </div>
+{% else %}
+ <p>No client records are available.</p>
+{% endif %}
+{% endblock %}
diff --git a/build/lib/Bcfg2/Server/Reports/reports/templates/config_items/item.html b/build/lib/Bcfg2/Server/Reports/reports/templates/config_items/item.html
new file mode 100644
index 000000000..58aed1684
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Reports/reports/templates/config_items/item.html
@@ -0,0 +1,109 @@
+{% extends "base.html" %}
+{% load syntax_coloring %}
+
+
+{% block title %}Bcfg2 - Element Details{% endblock %}
+
+
+{% block extra_header_info %}
+<style type="text/css">
+#table_list_header {
+ font-size: 100%;
+}
+table.entry_list {
+ width: auto;
+}
+div.information_wrapper {
+ margin: 15px;
+}
+div.diff_wrapper {
+ overflow: auto;
+}
+div.entry_list h3 {
+ font-size: 90%;
+ padding: 5px;
+}
+</style>
+{% endblock%}
+
+{% block pagebanner %}Element Details{% endblock %}
+
+{% block content %}
+ <div class='detail_header'>
+ <h3>{{mod_or_bad|capfirst}} {{item.entry.kind}}: {{item.entry.name}}</h3>
+ </div>
+
+ <div class="information_wrapper">
+
+ {% if isextra %}
+ <p>This item exists on the host but is not defined in the configuration.</p>
+ {% endif %}
+
+ {% if not item.reason.current_exists %}
+ <div class="warning">This item does not currently exist on the host but is specified to exist in the configuration.</div>
+ {% endif %}
+
+ {% if item.reason.current_owner or item.reason.current_group or item.reason.current_perms or item.reason.current_status or item.reason.current_status or item.reason.current_to or item.reason.current_version %}
+ <table class='entry_list'>
+ <tr id='table_list_header'>
+ <td style='text-align: right;'>Problem Type</td><td>Expected</td><td style='border-bottom: 1px solid #98DBCC;'>Found</td></tr>
+ {% if item.reason.current_owner %}
+ <tr><td style='text-align: right'><b>Owner</b></td><td>{{item.reason.owner}}</td>
+ <td>{{item.reason.current_owner}}</td></tr>
+ {% endif %}
+ {% if item.reason.current_group %}
+ <tr><td style='text-align: right'><b>Group</b></td><td>{{item.reason.group}}</td>
+ <td>{{item.reason.current_group}}</td></tr>
+ {% endif %}
+ {% if item.reason.current_perms %}
+ <tr><td style='text-align: right'><b>Permissions</b></td><td>{{item.reason.perms}}</td>
+ <td>{{item.reason.current_perms}}</td></tr>
+ {% endif %}
+ {% if item.reason.current_status %}
+ <tr><td style='text-align: right'><b>Status</b></td><td>{{item.reason.status}}</td>
+ <td>{{item.reason.current_status}}</td></tr>
+ {% endif %}
+ {% if item.reason.current_to %}
+ <tr><td style='text-align: right'><b>Symlink Target</b></td><td>{{item.reason.to}}</td>
+ <td>{{item.reason.current_to}}</td></tr>
+ {% endif %}
+ {% if item.reason.current_version %}
+ <tr><td style='text-align: right'><b>Package Version</b></td><td>{{item.reason.version|cut:"("|cut:")"}}</td>
+ <td>{{item.reason.current_version|cut:"("|cut:")"}}</td></tr>
+ {% endif %}
+ </table>
+ {% endif %}
+
+ {% if item.reason.current_diff %}
+ <div class='entry_list'>
+ <div class='entry_list_head'>
+ <h3>Incorrect file contents</h3>
+ </div>
+ <div class='diff_wrapper'>
+ {{ item.reason.current_diff|syntaxhilight }}
+ </div>
+ </div>
+ {% endif %}
+
+
+ <div class='entry_list'>
+ <div class='entry_list_head'>
+ <h3>Occurences on {{ timestamp|date:"Y-m-d" }}</h3>
+ </div>
+ {% if associated_list %}
+ <table class="entry_list" cellpadding="3">
+ {% for inter in associated_list %}
+ <tr><td><a href="{% url reports_client_detail inter.client.name %}"
+ >{{inter.client.name}}</a></td>
+ <td><a href="{% url reports_client_detail_pk hostname=inter.client.name,pk=inter.id %}"
+ >{{inter.timestamp}}</a></td>
+ </tr>
+ {% endfor %}
+ </table>
+ {% else %}
+ <p>Missing client list</p>
+ {% endif %}
+ </div>
+
+ </div><!-- information_wrapper -->
+{% endblock %}
diff --git a/build/lib/Bcfg2/Server/Reports/reports/templates/config_items/listing.html b/build/lib/Bcfg2/Server/Reports/reports/templates/config_items/listing.html
new file mode 100644
index 000000000..572249470
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Reports/reports/templates/config_items/listing.html
@@ -0,0 +1,33 @@
+{% extends "base-timeview.html" %}
+{% load bcfg2_tags %}
+
+{% block title %}Bcfg2 - Element Listing{% endblock %}
+
+{% block extra_header_info %}
+{% endblock%}
+
+{% block pagebanner %}{{mod_or_bad|capfirst}} Element Listing{% endblock %}
+
+{% block content %}
+{% if item_list_dict %}
+ {% for kind, entries in item_list_dict.items %}
+
+ <div class='entry_list'>
+ <div class='entry_list_head element_list_head'>
+ <div class='entry_expand_tab' onclick='javascript:toggleMe("table_{{ kind }}");'>[+]</div>
+ <h3>{{ kind }} &#8212; {{ entries|length }}</h3>
+ </div>
+
+ <table id='table_{{ kind }}' class='entry_list'>
+ {% for e in entries %}
+ <tr class='{% cycle listview,listview_alt %}'>
+ <td><a href="{% url reports_item type=mod_or_bad,pk=e.id %}">{{e.entry.name}}</a></td>
+ </tr>
+ {% endfor %}
+ </table>
+ </div>
+ {% endfor %}
+{% else %}
+ <p>There are currently no inconsistent configuration entries.</p>
+{% endif %}
+{% endblock %}
diff --git a/build/lib/Bcfg2/Server/Reports/reports/templates/displays/summary.html b/build/lib/Bcfg2/Server/Reports/reports/templates/displays/summary.html
new file mode 100644
index 000000000..0124f635d
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Reports/reports/templates/displays/summary.html
@@ -0,0 +1,42 @@
+{% extends "base-timeview.html" %}
+{% load bcfg2_tags %}
+
+{% block title %}Bcfg2 - Client Summary{% endblock %}
+{% block pagebanner %}Clients - Summary{% endblock %}
+
+{% block body_onload %}javascript:hide_table_array(hide_tables){% endblock %}
+
+{% block extra_header_info %}
+<script type="text/javascript">
+var hide_tables = new Array({{ summary_data|length }});
+{% for summary in summary_data %}
+hide_tables[{{ forloop.counter0 }}] = "table_{{ summary.name }}";
+{% endfor %}
+</script>
+{% endblock%}
+
+{% block content %}
+ <div class='detail_header'>
+ <h2>{{ node_count }} nodes reporting in</h2>
+ </div>
+{% if summary_data %}
+ {% for summary in summary_data %}
+ <div class='entry_list'>
+ <div class='entry_list_head element_list_head'>
+ <div class='entry_expand_tab' onclick='javascript:toggleMe("table_{{ summary.name }}");'>[+]</div>
+ <h3>{{ summary.nodes|length }} {{ summary.label }}</h3>
+ </div>
+
+ <table id='table_{{ summary.name }}' class='entry_list'>
+ {% for node in summary.nodes|sort_interactions_by_name %}
+ <tr class='{% cycle listview,listview_alt %}'>
+ <td><a href="{% url reports_client_detail_pk hostname=node.client.name,pk=node.id %}">{{ node.client.name }}</a></td>
+ </tr>
+ {% endfor %}
+ </table>
+ </div>
+ {% endfor %}
+{% else %}
+ <p>No data to report on</p>
+{% endif %}
+{% endblock %}
diff --git a/build/lib/Bcfg2/Server/Reports/reports/templates/displays/timing.html b/build/lib/Bcfg2/Server/Reports/reports/templates/displays/timing.html
new file mode 100644
index 000000000..47accb2cb
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Reports/reports/templates/displays/timing.html
@@ -0,0 +1,38 @@
+{% extends "base-timeview.html" %}
+{% load bcfg2_tags %}
+
+{% block title %}Bcfg2 - Performance Metrics{% endblock %}
+{% block pagebanner %}Performance Metrics{% endblock %}
+
+
+{% block extra_header_info %}
+{% endblock%}
+
+{% block content %}
+<div class='client_list_box'>
+ {% if metrics %}
+ <table cellpadding="3">
+ <tr id='table_list_header' class='listview'>
+ <td>Name</td>
+ <td>Parse</td>
+ <td>Probe</td>
+ <td>Inventory</td>
+ <td>Install</td>
+ <td>Config</td>
+ <td>Total</td>
+ </tr>
+ {% for metric in metrics|dictsort:"name" %}
+ <tr class='{% cycle listview,listview_alt %}'>
+ <td><a style='font-size: 100%'
+ href="{% url reports_client_detail hostname=metric.name %}">{{ metric.name }}</a></td>
+ {% for mitem in metric|build_metric_list %}
+ <td>{{ mitem }}</td>
+ {% endfor %}
+ </tr>
+ {% endfor %}
+ </table>
+ {% else %}
+ <p>No metric data available</p>
+ {% endif %}
+</div>
+{% endblock %}
diff --git a/build/lib/Bcfg2/Server/Reports/reports/templates/widgets/filter_bar.html b/build/lib/Bcfg2/Server/Reports/reports/templates/widgets/filter_bar.html
new file mode 100644
index 000000000..6b57baf6a
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Reports/reports/templates/widgets/filter_bar.html
@@ -0,0 +1,13 @@
+{% spaceless %}
+{% if filters %}
+{% for filter, filter_url in filters %}
+ {% if forloop.first %}
+ <div class="filter_bar">Active filters (click to remove):
+ {% endif %}
+ <a href='{{ filter_url }}'>{{ filter|capfirst }}</a>{% if not forloop.last %}, {% endif %}
+ {% if forloop.last %}
+ </div>
+ {% endif %}
+{% endfor %}
+{% endif %}
+{% endspaceless %}
diff --git a/build/lib/Bcfg2/Server/Reports/reports/templates/widgets/interaction_list.inc b/build/lib/Bcfg2/Server/Reports/reports/templates/widgets/interaction_list.inc
new file mode 100644
index 000000000..8f2dec1dc
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Reports/reports/templates/widgets/interaction_list.inc
@@ -0,0 +1,38 @@
+{% load bcfg2_tags %}
+<div class='interaction_history_widget'>
+ <table cellpadding="3">
+ <tr id='table_list_header' class='listview'>
+ <td class='left_column'>Timestamp</td>
+ {% if not client %}
+ <td class='right_column_wide'>Client</td>
+ {% endif %}
+ <td class='right_column' style='width:75px'>State</td>
+ <td class='right_column_narrow'>Good</td>
+ <td class='right_column_narrow'>Bad</td>
+ <td class='right_column_narrow'>Modified</td>
+ <td class='right_column_narrow'>Extra</td>
+ <td class='right_column_wide'>Server</td>
+ </tr>
+ {% for entry in entry_list %}
+ <tr class='{% cycle listview,listview_alt %}'>
+ <td class='left_column'><a href='{% url reports_client_detail_pk hostname=entry.client.name, pk=entry.id %}'>{{ entry.timestamp|date:"Y-m-d\&\n\b\s\p\;H:i"|safe }}</a></td>
+ {% if not client %}
+ <td class='right_column_wide'><a href='{% add_url_filter hostname=entry.client.name %}'>{{ entry.client.name }}</a></td>
+ {% endif %}
+ <td class='right_column' style='width:75px'><a href='{% add_url_filter state=entry.state %}'
+ {% ifequal entry.state 'dirty' %}class='dirty-lineitem'{% endifequal %}>{{ entry.state }}</a></td>
+ <td class='right_column_narrow'>{{ entry.goodcount }}</td>
+ <td class='right_column_narrow'>{{ entry.bad_entry_count }}</td>
+ <td class='right_column_narrow'>{{ entry.modified_entry_count }}</td>
+ <td class='right_column_narrow'>{{ entry.extra_entry_count }}</td>
+ <td class='right_column_wide'>
+ {% if entry.server %}
+ <a href='{% add_url_filter server=entry.server %}'>{{ entry.server }}</a>
+ {% else %}
+ &nbsp;
+ {% endif %}
+ </td>
+ </tr>
+ {% endfor %}
+ </table>
+</div>
diff --git a/build/lib/Bcfg2/Server/Reports/reports/templates/widgets/page_bar.html b/build/lib/Bcfg2/Server/Reports/reports/templates/widgets/page_bar.html
new file mode 100644
index 000000000..aa0def83e
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Reports/reports/templates/widgets/page_bar.html
@@ -0,0 +1,23 @@
+{% spaceless %}
+{% for page, page_url in pager %}
+ {% if forloop.first %}
+ <div class="page_bar">
+ {% if prev_page %}<a href="{{ prev_page }}">&lt; Prev</a><span>&nbsp;</span>{% endif %}
+ {% if first_page %}<a href="{{ first_page }}">1</a><span>&nbsp;...&nbsp;</span>{% endif %}
+ {% endif %}
+ {% ifequal page current_page %}
+ <span class='nav_bar_current'>{{ page }}</span>
+ {% else %}
+ <a href="{{ page_url }}">{{ page }}</a>
+ {% endifequal %}
+ {% if forloop.last %}
+ {% if last_page %}<span>&nbsp;...&nbsp;</span><a href="{{ last_page }}">{{ total_pages }}</a><span>&nbsp;</span>{% endif %}
+ {% if next_page %}<a href="{{ next_page }}">Next &gt;</a><span>&nbsp;</span>{% endif %}
+ |{% for limit, limit_url in page_limits %}&nbsp;<a href="{{ limit_url }}">{{ limit }}</a>{% endfor %}
+ </div>
+ {% else %}
+ <span>&nbsp;</span>
+ {% endif %}
+{% endfor %}
+{% endspaceless %}
+<!-- {{ path }} -->
diff --git a/build/lib/Bcfg2/Server/Reports/reports/templatetags/__init__.py b/build/lib/Bcfg2/Server/Reports/reports/templatetags/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Reports/reports/templatetags/__init__.py
diff --git a/build/lib/Bcfg2/Server/Reports/reports/templatetags/bcfg2_tags.py b/build/lib/Bcfg2/Server/Reports/reports/templatetags/bcfg2_tags.py
new file mode 100644
index 000000000..7fffe289d
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Reports/reports/templatetags/bcfg2_tags.py
@@ -0,0 +1,274 @@
+from django import template
+from django.core.urlresolvers import resolve, reverse, Resolver404, NoReverseMatch
+from django.utils.encoding import smart_unicode, smart_str
+from datetime import datetime, timedelta
+from Bcfg2.Server.Reports.utils import filter_list
+
+register = template.Library()
+
+__PAGE_NAV_LIMITS__ = (10, 25, 50, 100)
+
+@register.inclusion_tag('widgets/page_bar.html', takes_context=True)
+def page_navigator(context):
+ """
+ Creates paginated links.
+
+ Expects the context to be a RequestContext and views.prepare_paginated_list()
+ to have populated page information.
+ """
+ fragment = dict()
+ try:
+ path = context['request'].META['PATH_INFO']
+ total_pages = int(context['total_pages'])
+ records_per_page = int(context['records_per_page'])
+ except KeyError, e:
+ return fragment
+ except ValueError, e:
+ return fragment
+
+ if total_pages < 2:
+ return {}
+
+ try:
+ view, args, kwargs = resolve(path)
+ current_page = int(kwargs.get('page_number',1))
+ fragment['current_page'] = current_page
+ fragment['page_number'] = current_page
+ fragment['total_pages'] = total_pages
+ fragment['records_per_page'] = records_per_page
+ if current_page > 1:
+ kwargs['page_number'] = current_page - 1
+ fragment['prev_page'] = reverse(view, args=args, kwargs=kwargs)
+ if current_page < total_pages:
+ kwargs['page_number'] = current_page + 1
+ fragment['next_page'] = reverse(view, args=args, kwargs=kwargs)
+
+ view_range = 5
+ if total_pages > view_range:
+ pager_start = current_page - 2
+ pager_end = current_page + 2
+ if pager_start < 1:
+ pager_end += (1 - pager_start)
+ pager_start = 1
+ if pager_end > total_pages:
+ pager_start -= (pager_end - total_pages)
+ pager_end = total_pages
+ else:
+ pager_start = 1
+ pager_end = total_pages
+
+ if pager_start > 1:
+ kwargs['page_number'] = 1
+ fragment['first_page'] = reverse(view, args=args, kwargs=kwargs)
+ if pager_end < total_pages:
+ kwargs['page_number'] = total_pages
+ fragment['last_page'] = reverse(view, args=args, kwargs=kwargs)
+
+ pager = []
+ for page in range(pager_start, int(pager_end) + 1):
+ kwargs['page_number'] = page
+ pager.append( (page, reverse(view, args=args, kwargs=kwargs)) )
+
+ kwargs['page_number'] = 1
+ page_limits = []
+ for limit in __PAGE_NAV_LIMITS__:
+ kwargs['page_limit'] = limit
+ page_limits.append( (limit, reverse(view, args=args, kwargs=kwargs)) )
+ # resolver doesn't like this
+ del kwargs['page_number']
+ del kwargs['page_limit']
+ page_limits.append( ('all', reverse(view, args=args, kwargs=kwargs) + "|all") )
+
+ fragment['pager'] = pager
+ fragment['page_limits'] = page_limits
+
+ except Resolver404:
+ path = "404"
+ except NoReverseMatch, nr:
+ path = "NoReverseMatch: %s" % nr
+ except ValueError:
+ path = "ValueError"
+ #FIXME - Handle these
+
+ fragment['path'] = path
+ return fragment
+
+@register.inclusion_tag('widgets/filter_bar.html', takes_context=True)
+def filter_navigator(context):
+ try:
+ path = context['request'].META['PATH_INFO']
+ view, args, kwargs = resolve(path)
+
+ # Strip any page limits and numbers
+ if 'page_number' in kwargs:
+ del kwargs['page_number']
+ if 'page_limit' in kwargs:
+ del kwargs['page_limit']
+
+ filters = []
+ for filter in filter_list:
+ if filter in kwargs:
+ myargs = kwargs.copy()
+ del myargs[filter]
+ filters.append( (filter, reverse(view, args=args, kwargs=myargs) ) )
+ filters.sort(lambda x,y: cmp(x[0], y[0]))
+ return { 'filters': filters }
+ except (Resolver404, NoReverseMatch, ValueError, KeyError):
+ pass
+ return dict()
+
+def _subtract_or_na(mdict, x, y):
+ """
+ Shortcut for build_metric_list
+ """
+ try:
+ return round(mdict[x] - mdict[y], 4)
+ except:
+ return "n/a"
+
+@register.filter
+def build_metric_list(mdict):
+ """
+ Create a list of metric table entries
+
+ Moving this here it simplify the view. Should really handle the case where these
+ are missing...
+ """
+ td_list = []
+ # parse
+ td_list.append( _subtract_or_na(mdict, 'config_parse', 'config_download'))
+ #probe
+ td_list.append( _subtract_or_na(mdict, 'probe_upload', 'start'))
+ #inventory
+ td_list.append( _subtract_or_na(mdict, 'inventory', 'initialization'))
+ #install
+ td_list.append( _subtract_or_na(mdict, 'install', 'inventory'))
+ #cfg download & parse
+ td_list.append( _subtract_or_na(mdict, 'config_parse', 'probe_upload'))
+ #total
+ td_list.append( _subtract_or_na(mdict, 'finished', 'start'))
+ return td_list
+
+@register.filter
+def isstale(timestamp, entry_max=None):
+ """
+ Check for a stale timestamp
+
+ Compares two timestamps and returns True if the
+ difference is greater then 24 hours.
+ """
+ if not entry_max:
+ entry_max = datetime.now()
+ return entry_max - timestamp > timedelta(hours=24)
+
+@register.filter
+def sort_interactions_by_name(value):
+ """
+ Sort an interaction list by client name
+ """
+ inters = list(value)
+ inters.sort(lambda a,b: cmp(a.client.name, b.client.name))
+ return inters
+
+class AddUrlFilter(template.Node):
+ def __init__(self, filter_name, filter_value):
+ self.filter_name = filter_name
+ self.filter_value = filter_value
+ self.fallback_view = 'Bcfg2.Server.Reports.reports.views.render_history_view'
+
+ def render(self, context):
+ link = '#'
+ try:
+ path = context['request'].META['PATH_INFO']
+ view, args, kwargs = resolve(path)
+ filter_value = self.filter_value.resolve(context, True)
+ if filter_value:
+ filter_name = smart_str(self.filter_name)
+ filter_value = smart_unicode(filter_value)
+ kwargs[filter_name] = filter_value
+ # These two don't make sense
+ if filter_name == 'server' and 'hostname' in kwargs:
+ del kwargs['hostname']
+ elif filter_name == 'hostname' and 'server' in kwargs:
+ del kwargs['server']
+ try:
+ link = reverse(view, args=args, kwargs=kwargs)
+ except NoReverseMatch, rm:
+ link = reverse(self.fallback_view, args=None,
+ kwargs={ filter_name: filter_value })
+ except NoReverseMatch, rm:
+ raise rm
+ except (Resolver404, ValueError), e:
+ pass
+ return link
+
+@register.tag
+def add_url_filter(parser, token):
+ """
+ Return a url with the filter added to the current view.
+
+ Takes a new filter and resolves the current view with the new filter
+ applied. Resolves to Bcfg2.Server.Reports.reports.views.client_history
+ by default.
+
+ {% add_url_filter server=interaction.server %}
+ """
+ try:
+ tag_name, filter_pair = token.split_contents()
+ filter_name, filter_value = filter_pair.split('=', 1)
+ filter_name = filter_name.strip()
+ filter_value = parser.compile_filter(filter_value)
+ except ValueError:
+ raise template.TemplateSyntaxError, "%r tag requires exactly one argument" % token.contents.split()[0]
+ if not filter_name or not filter_value:
+ raise template.TemplateSyntaxError, "argument should be a filter=value pair"
+
+ return AddUrlFilter(filter_name, filter_value)
+
+@register.filter
+def sortwell(value):
+ """
+ Sorts a list(or evaluates queryset to list) of bad, extra, or modified items in the best
+ way for presentation
+ """
+
+ configItems = list(value)
+ configItems.sort(lambda x,y: cmp(x.entry.name, y.entry.name))
+ configItems.sort(lambda x,y: cmp(x.entry.kind, y.entry.kind))
+ return configItems
+
+class MediaTag(template.Node):
+ def __init__(self, filter_value):
+ self.filter_value = filter_value
+
+ def render(self, context):
+ base = context['MEDIA_URL']
+ try:
+ request = context['request']
+ try:
+ base = request.environ['bcfg2.media_url']
+ except:
+ if request.path != request.META['PATH_INFO']:
+ offset = request.path.find(request.META['PATH_INFO'])
+ if offset > 0:
+ base = "%s/%s" % (request.path[:offset], \
+ context['MEDIA_URL'].strip('/'))
+ except:
+ pass
+ return "%s/%s" % (base, self.filter_value)
+
+@register.tag
+def to_media_url(parser, token):
+ """
+ Return a url relative to the media_url.
+
+ {% to_media_url /bcfg2.css %}
+ """
+ try:
+ tag_name, filter_value = token.split_contents()
+ filter_value = parser.compile_filter(filter_value)
+ except ValueError:
+ raise template.TemplateSyntaxError, "%r tag requires exactly one argument" % token.contents.split()[0]
+
+ return MediaTag(filter_value)
+
diff --git a/build/lib/Bcfg2/Server/Reports/reports/templatetags/syntax_coloring.py b/build/lib/Bcfg2/Server/Reports/reports/templatetags/syntax_coloring.py
new file mode 100644
index 000000000..43dafb262
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Reports/reports/templatetags/syntax_coloring.py
@@ -0,0 +1,41 @@
+from django import template
+from django.utils.encoding import smart_unicode, smart_str
+from django.utils.html import conditional_escape
+from django.utils.safestring import mark_safe
+
+register = template.Library()
+
+try:
+ from pygments import highlight
+ from pygments.lexers import get_lexer_by_name
+ from pygments.formatters import HtmlFormatter
+ colorize = True
+
+except:
+ colorize = False
+
+@register.filter
+def syntaxhilight(value, arg="diff", autoescape=None):
+ """
+ Returns a syntax-hilighted version of Code; requires code/language arguments
+ """
+
+ if autoescape:
+ value = conditional_escape(value)
+ arg = conditional_escape(arg)
+
+ if colorize:
+ try:
+ output = u'<style type="text/css">' \
+ + smart_unicode(HtmlFormatter().get_style_defs('.highlight')) \
+ + u'</style>'
+
+ lexer = get_lexer_by_name(arg)
+ output += highlight(value, lexer, HtmlFormatter())
+ return mark_safe(output)
+ except:
+ return value
+ else:
+ return mark_safe(u'<div class="note-box">Tip: Install pygments for highlighting</div><pre>%s</pre>' % value)
+syntaxhilight.needs_autoescape = True
+
diff --git a/build/lib/Bcfg2/Server/Reports/reports/urls.py b/build/lib/Bcfg2/Server/Reports/reports/urls.py
new file mode 100644
index 000000000..9970d26a1
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Reports/reports/urls.py
@@ -0,0 +1,55 @@
+from django.conf.urls.defaults import *
+from django.core.urlresolvers import reverse, NoReverseMatch
+from django.http import HttpResponsePermanentRedirect
+from Bcfg2.Server.Reports.utils import filteredUrls, paginatedUrls, timeviewUrls
+
+def newRoot(request):
+ try:
+ grid_view = reverse('reports_grid_view')
+ except NoReverseMatch:
+ grid_view = '/grid'
+ return HttpResponsePermanentRedirect(grid_view)
+
+urlpatterns = patterns('Bcfg2.Server.Reports.reports',
+ (r'^$', newRoot),
+
+ url(r'^manage/?$', 'views.client_manage', name='reports_client_manage'),
+ url(r'^client/(?P<hostname>\S+)/(?P<pk>\d+)/?$', 'views.client_detail', name='reports_client_detail_pk'),
+ url(r'^client/(?P<hostname>\S+)/?$', 'views.client_detail', name='reports_client_detail'),
+ url(r'^elements/(?P<type>\w+)/(?P<pk>\d+)/?$', 'views.config_item', name='reports_item'),
+)
+
+urlpatterns += patterns('Bcfg2.Server.Reports.reports',
+ *timeviewUrls(
+ (r'^grid/?$', 'views.client_index', None, 'reports_grid_view'),
+ (r'^summary/?$', 'views.display_summary', None, 'reports_summary'),
+ (r'^timing/?$', 'views.display_timing', None, 'reports_timing'),
+ (r'^elements/(?P<type>\w+)/?$', 'views.config_item_list', None, 'reports_item_list'),
+))
+
+urlpatterns += patterns('Bcfg2.Server.Reports.reports',
+ *filteredUrls(*timeviewUrls(
+ (r'^detailed/?$',
+ 'views.client_detailed_list', None, 'reports_detailed_list')
+)))
+
+urlpatterns += patterns('Bcfg2.Server.Reports.reports',
+ *paginatedUrls( *filteredUrls(
+ (r'^history/?$',
+ 'views.render_history_view', None, 'reports_history'),
+ (r'^history/(?P<hostname>[\w\-\.]+)/?$',
+ 'views.render_history_view', None, 'reports_client_history'),
+)))
+
+ # Uncomment this for admin:
+ #(r'^admin/', include('django.contrib.admin.urls')),
+
+
+## Uncomment this section if using authentication
+#urlpatterns += patterns('',
+# (r'^login/$', 'django.contrib.auth.views.login',
+# {'template_name': 'auth/login.html'}),
+# (r'^logout/$', 'django.contrib.auth.views.logout',
+# {'template_name': 'auth/logout.html'})
+# )
+
diff --git a/build/lib/Bcfg2/Server/Reports/reports/views.py b/build/lib/Bcfg2/Server/Reports/reports/views.py
new file mode 100644
index 000000000..00d35c092
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Reports/reports/views.py
@@ -0,0 +1,379 @@
+"""
+Report views
+
+Functions to handle all of the reporting views.
+"""
+from django.template import Context, RequestContext, loader
+from django.http import HttpResponse, HttpResponseRedirect, HttpResponseServerError, Http404
+from django.shortcuts import render_to_response, get_object_or_404
+from django.core.urlresolvers import resolve, reverse, Resolver404, NoReverseMatch
+from django.db import connection
+from django.db.backends import util
+
+from Bcfg2.Server.Reports.reports.models import *
+from datetime import datetime, timedelta
+from time import strptime
+import sys
+
+class PaginationError(Exception):
+ """This error is raised when pagination cannot be completed."""
+ pass
+
+def server_error(request):
+ """
+ 500 error handler.
+
+ For now always return the debug response. Mailing isn't appropriate here.
+
+ """
+ from django.views import debug
+ return debug.technical_500_response(request, *sys.exc_info())
+
+def timeview(fn):
+ """
+ Setup a timeview view
+
+ Handles backend posts from the calendar and converts date pieces
+ into a 'timestamp' parameter
+
+ """
+ def _handle_timeview(request, **kwargs):
+ """Send any posts back."""
+ if request.method == 'POST':
+ cal_date = request.POST['cal_date']
+ try:
+ fmt = "%Y/%m/%d"
+ if cal_date.find(' ') > -1:
+ fmt += " %H:%M"
+ timestamp = datetime(*strptime(cal_date, fmt)[0:6])
+ view, args, kw = resolve(request.META['PATH_INFO'])
+ kw['year'] = "%0.4d" % timestamp.year
+ kw['month'] = "%02.d" % timestamp.month
+ kw['day'] = "%02.d" % timestamp.day
+ if cal_date.find(' ') > -1:
+ kw['hour'] = timestamp.hour
+ kw['minute'] = timestamp.minute
+ return HttpResponseRedirect(reverse(view, args=args, kwargs=kw))
+ except KeyError:
+ pass
+ except:
+ pass
+ # FIXME - Handle this
+
+ """Extract timestamp from args."""
+ timestamp = None
+ try:
+ timestamp = datetime(int(kwargs.pop('year')), int(kwargs.pop('month')),
+ int(kwargs.pop('day')), int(kwargs.pop('hour', 0)),
+ int(kwargs.pop('minute', 0)), 0)
+ kwargs['timestamp'] = timestamp
+ except KeyError:
+ pass
+ except:
+ raise
+ return fn(request, **kwargs)
+
+ return _handle_timeview
+
+def config_item(request, pk, type="bad"):
+ """
+ Display a single entry.
+
+ Dispalys information about a single entry.
+
+ """
+ item = get_object_or_404(Entries_interactions, id=pk)
+ timestamp=item.interaction.timestamp
+ time_start=item.interaction.timestamp.replace(\
+ hour=0, minute=0, second=0, microsecond=0)
+ time_end=time_start + timedelta(days=1)
+
+ todays_data = Interaction.objects.filter(\
+ timestamp__gte=time_start,\
+ timestamp__lt=time_end)
+ shared_entries = Entries_interactions.objects.filter(entry=item.entry,\
+ reason=item.reason, type=item.type,
+ interaction__in=[x['id']\
+ for x in todays_data.values('id')])
+
+ associated_list = Interaction.objects.filter(id__in=[x['interaction']\
+ for x in shared_entries.values('interaction')])\
+ .order_by('client__name','timestamp').select_related().all()
+
+ return render_to_response('config_items/item.html',
+ {'item':item,
+ 'isextra': item.type == TYPE_EXTRA,
+ 'mod_or_bad': type,
+ 'associated_list':associated_list,
+ 'timestamp' : timestamp},
+ context_instance=RequestContext(request))
+
+@timeview
+def config_item_list(request, type, timestamp=None):
+ """Render a listing of affected elements"""
+ mod_or_bad = type.lower()
+ type = convert_entry_type_to_id(type)
+ if type < 0:
+ raise Http404
+
+ current_clients = Interaction.objects.get_interaction_per_client_ids(timestamp)
+ item_list_dict = {}
+ seen = dict()
+ for x in Entries_interactions.objects.filter(interaction__in=current_clients, type=type).select_related():
+ if (x.entry, x.reason) in seen:
+ continue
+ seen[(x.entry, x.reason)] = 1
+ if item_list_dict.get(x.entry.kind, None):
+ item_list_dict[x.entry.kind].append(x)
+ else:
+ item_list_dict[x.entry.kind] = [x]
+
+ for kind in item_list_dict:
+ item_list_dict[kind].sort(lambda a,b: cmp(a.entry.name, b.entry.name))
+
+ return render_to_response('config_items/listing.html', {'item_list_dict':item_list_dict,
+ 'mod_or_bad':mod_or_bad,
+ 'timestamp' : timestamp},
+ context_instance=RequestContext(request))
+
+@timeview
+def client_index(request, timestamp=None):
+ """
+ Render a grid view of active clients.
+
+ Keyword parameters:
+ timestamp -- datetime objectto render from
+
+ """
+ list = Interaction.objects.interaction_per_client(timestamp).select_related()\
+ .order_by("client__name").all()
+
+ return render_to_response('clients/index.html',
+ { 'inter_list': list, 'timestamp' : timestamp},
+ context_instance=RequestContext(request))
+
+@timeview
+def client_detailed_list(request, timestamp=None, **kwargs):
+ """
+ Provides a more detailed list view of the clients. Allows for extra
+ filters to be passed in.
+
+ """
+
+ kwargs['interaction_base'] = Interaction.objects.interaction_per_client(timestamp).select_related()
+ kwargs['orderby'] = "client__name"
+ kwargs['page_limit'] = 0
+ return render_history_view(request, 'clients/detailed-list.html', **kwargs)
+
+def client_detail(request, hostname = None, pk = None):
+ context = dict()
+ client = get_object_or_404(Client, name=hostname)
+ if(pk == None):
+ context['interaction'] = client.current_interaction
+ return render_history_view(request, 'clients/detail.html', page_limit=5,
+ client=client, context=context)
+ else:
+ context['interaction'] = client.interactions.get(pk=pk)
+ return render_history_view(request, 'clients/detail.html', page_limit=5,
+ client=client, maxdate=context['interaction'].timestamp, context=context)
+
+def client_manage(request):
+ """Manage client expiration"""
+ message = ''
+ if request.method == 'POST':
+ try:
+ client_name = request.POST.get('client_name', None)
+ client_action = request.POST.get('client_action', None)
+ client = Client.objects.get(name=client_name)
+ if client_action == 'expire':
+ client.expiration = datetime.now();
+ client.save()
+ message = "Expiration for %s set to %s." % \
+ (client_name, client.expiration.strftime("%Y-%m-%d %H:%M:%S"))
+ elif client_action == 'unexpire':
+ client.expiration = None;
+ client.save()
+ message = "%s is now active." % client_name
+ else:
+ message = "Missing action"
+ except Client.DoesNotExist:
+ if not client_name:
+ client_name = "<none>"
+ message = "Couldn't find client \"%s\"" % client_name
+
+ return render_to_response('clients/manage.html',
+ {'clients': Client.objects.order_by('name').all(), 'message': message},
+ context_instance=RequestContext(request))
+
+@timeview
+def display_summary(request, timestamp=None):
+ """
+ Display a summary of the bcfg2 world
+ """
+ query = Interaction.objects.interaction_per_client(timestamp).select_related()
+ node_count = query.count()
+ recent_data = query.all()
+ if not timestamp:
+ timestamp = datetime.now()
+
+ collected_data = dict(clean=[],bad=[],modified=[],extra=[],stale=[],pings=[])
+ for node in recent_data:
+ if timestamp - node.timestamp > timedelta(hours=24):
+ collected_data['stale'].append(node)
+ # If stale check for uptime
+ try:
+ if node.client.pings.latest().status == 'N':
+ collected_data['pings'].append(node)
+ except Ping.DoesNotExist:
+ collected_data['pings'].append(node)
+ continue
+ if node.bad_entry_count() > 0:
+ collected_data['bad'].append(node)
+ else:
+ collected_data['clean'].append(node)
+ if node.modified_entry_count() > 0:
+ collected_data['modified'].append(node)
+ if node.extra_entry_count() > 0:
+ collected_data['extra'].append(node)
+
+ # label, header_text, node_list
+ summary_data = []
+ get_dict = lambda name, label: { 'name': name,
+ 'nodes': collected_data[name],
+ 'label': label }
+ if len(collected_data['clean']) > 0:
+ summary_data.append( get_dict('clean', 'nodes are clean.') )
+ if len(collected_data['bad']) > 0:
+ summary_data.append( get_dict('bad', 'nodes are bad.') )
+ if len(collected_data['modified']) > 0:
+ summary_data.append( get_dict('modified', 'nodes were modified.') )
+ if len(collected_data['extra']) > 0:
+ summary_data.append( get_dict('extra',
+ 'nodes have extra configurations.') )
+ if len(collected_data['stale']) > 0:
+ summary_data.append( get_dict('stale',
+ 'nodes did not run within the last 24 hours.') )
+ if len(collected_data['pings']) > 0:
+ summary_data.append( get_dict('pings',
+ 'are down.') )
+
+ return render_to_response('displays/summary.html',
+ {'summary_data': summary_data, 'node_count': node_count,
+ 'timestamp': timestamp},
+ context_instance=RequestContext(request))
+
+@timeview
+def display_timing(request, timestamp=None):
+ mdict = dict()
+ inters = Interaction.objects.interaction_per_client(timestamp).select_related().all()
+ [mdict.__setitem__(inter, {'name': inter.client.name}) \
+ for inter in inters]
+ for metric in Performance.objects.filter(interaction__in=mdict.keys()).all():
+ for i in metric.interaction.all():
+ mdict[i][metric.metric] = metric.value
+ return render_to_response('displays/timing.html',
+ {'metrics': mdict.values(), 'timestamp': timestamp},
+ context_instance=RequestContext(request))
+
+
+def render_history_view(request, template='clients/history.html', **kwargs):
+ """
+ Provides a detailed history of a clients interactions.
+
+ Renders a detailed history of a clients interactions. Allows for various
+ filters and settings. Automatically sets pagination data into the context.
+
+ Keyword arguments:
+ interaction_base -- Interaction QuerySet to build on
+ (default Interaction.objects)
+ context -- Additional context data to render with
+ page_number -- Page to display (default 1)
+ page_limit -- Number of results per page, if 0 show all (default 25)
+ client -- Client object to render
+ hostname -- Client hostname to lookup and render. Returns a 404 if
+ not found
+ server -- Filter interactions by server
+ state -- Filter interactions by state
+ entry_max -- Most recent interaction to display
+ orderby -- Sort results using this field
+
+ """
+
+ context = kwargs.get('context', dict())
+ max_results = int(kwargs.get('page_limit', 25))
+ page = int(kwargs.get('page_number', 1))
+
+ client=kwargs.get('client', None)
+ if not client and 'hostname' in kwargs:
+ client = get_object_or_404(Client, name=kwargs['hostname'])
+ if client:
+ context['client'] = client
+
+ entry_max = kwargs.get('maxdate', None)
+ context['entry_max'] = entry_max
+
+ # Either filter by client or limit by clients
+ iquery = kwargs.get('interaction_base', Interaction.objects)
+ if client:
+ iquery = iquery.filter(client__exact=client).select_related()
+
+ if 'orderby' in kwargs and kwargs['orderby']:
+ iquery = iquery.order_by(kwargs['orderby'])
+
+ if 'state' in kwargs and kwargs['state']:
+ iquery = iquery.filter(state__exact=kwargs['state'])
+ if 'server' in kwargs and kwargs['server']:
+ iquery = iquery.filter(server__exact=kwargs['server'])
+
+ if entry_max:
+ iquery = iquery.filter(timestamp__lte=entry_max)
+
+ if max_results < 0:
+ max_results = 1
+ entry_list = []
+ if max_results > 0:
+ try:
+ rec_start, rec_end = prepare_paginated_list(request, context, iquery, page, max_results)
+ except PaginationError, page_error:
+ if isinstance(page_error[0], HttpResponse):
+ return page_error[0]
+ return HttpResponseServerError(page_error)
+ context['entry_list'] = iquery.all()[rec_start:rec_end]
+ else:
+ context['entry_list'] = iquery.all()
+
+ return render_to_response(template, context,
+ context_instance=RequestContext(request))
+
+def prepare_paginated_list(request, context, paged_list, page=1, max_results=25):
+ """
+ Prepare context and slice an object for pagination.
+ """
+ if max_results < 1:
+ raise PaginationError, "Max results less then 1"
+ if paged_list == None:
+ raise PaginationError, "Invalid object"
+
+ try:
+ nitems = paged_list.count()
+ except TypeError:
+ nitems = len(paged_list)
+
+ rec_start = (page - 1) * int(max_results)
+ try:
+ total_pages = (nitems / int(max_results)) + 1
+ except:
+ total_pages = 1
+ if page > total_pages:
+ # If we passed beyond the end send back
+ try:
+ view, args, kwargs = resolve(request.META['PATH_INFO'])
+ kwargs['page_number'] = total_pages
+ raise PaginationError, HttpResponseRedirect( reverse(view, kwargs=kwargs) )
+ except (Resolver404, NoReverseMatch, ValueError):
+ raise "Accessing beyond last page. Unable to resolve redirect."
+
+ context['total_pages'] = total_pages
+ context['records_per_page'] = max_results
+ return (rec_start, rec_start + int(max_results))
+
diff --git a/build/lib/Bcfg2/Server/Reports/settings.py b/build/lib/Bcfg2/Server/Reports/settings.py
new file mode 100644
index 000000000..9efe38552
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Reports/settings.py
@@ -0,0 +1,144 @@
+import django
+
+# Django settings for bcfg2 reports project.
+from ConfigParser import ConfigParser, NoSectionError, NoOptionError
+c = ConfigParser()
+c.read(['/etc/bcfg2.conf', '/etc/bcfg2-web.conf'])
+
+try:
+ dset = c.get('statistics', 'web_debug')
+except:
+ dset = 'false'
+
+if dset == "True":
+ DEBUG = True
+else:
+ DEBUG = False
+
+TEMPLATE_DEBUG = DEBUG
+
+ADMINS = (
+ ('Bcfg2', 'admin@email.address'),
+)
+
+MANAGERS = ADMINS
+
+DATABASE_ENGINE = c.get('statistics', 'database_engine')
+# 'postgresql', 'mysql', 'sqlite3' or 'ado_mssql'.
+if c.has_option('statistics', 'database_name'):
+ DATABASE_NAME = c.get('statistics', 'database_name')
+else:
+ DATABASE_NAME = ''
+# Or path to database file if using sqlite3.
+#<repository>/etc/brpt.sqlite is default path
+
+if DATABASE_ENGINE != 'sqlite3':
+ DATABASE_USER = c.get('statistics', 'database_user')
+ # Not used with sqlite3.
+ DATABASE_PASSWORD = c.get('statistics', 'database_password')
+ # Not used with sqlite3.
+ DATABASE_HOST = c.get('statistics', 'database_host')
+ # Set to empty string for localhost. Not used with sqlite3.
+ DATABASE_PORT = c.get('statistics', 'database_port')
+ # Set to empty string for default. Not used with sqlite3.
+if DATABASE_ENGINE == 'sqlite3' and DATABASE_NAME == '':
+ DATABASE_NAME = "%s/etc/brpt.sqlite" % c.get('server', 'repository')
+
+# Local time zone for this installation. All choices can be found here:
+# http://docs.djangoproject.com/en/dev/ref/settings/#time-zone
+try:
+ TIME_ZONE = c.get('statistics', 'time_zone')
+except:
+ if django.VERSION[0] == 1 and django.VERSION[1] > 2:
+ TIME_ZONE = None
+
+# Language code for this installation. All choices can be found here:
+# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
+# http://blogs.law.harvard.edu/tech/stories/storyReader$15
+LANGUAGE_CODE = 'en-us'
+
+SITE_ID = 1
+
+# Absolute path to the directory that holds media.
+# Example: "/home/media/media.lawrence.com/"
+MEDIA_ROOT = ''
+
+# URL that handles the media served from MEDIA_ROOT.
+# Example: "http://media.lawrence.com"
+MEDIA_URL = '/site_media'
+if c.has_option('statistics', 'web_prefix'):
+ MEDIA_URL = c.get('statistics', 'web_prefix').rstrip('/') + MEDIA_URL
+
+# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
+# trailing slash.
+# Examples: "http://foo.com/media/", "/media/".
+ADMIN_MEDIA_PREFIX = '/media/'
+
+# Make this unique, and don't share it with anybody.
+SECRET_KEY = 'eb5+y%oy-qx*2+62vv=gtnnxg1yig_odu0se5$h0hh#pc*lmo7'
+
+# List of callables that know how to import templates from various sources.
+TEMPLATE_LOADERS = (
+ 'django.template.loaders.filesystem.load_template_source',
+ 'django.template.loaders.app_directories.load_template_source',
+ 'django.template.loaders.eggs.load_template_source',
+)
+
+MIDDLEWARE_CLASSES = (
+ 'django.middleware.common.CommonMiddleware',
+ 'django.contrib.sessions.middleware.SessionMiddleware',
+ 'django.contrib.auth.middleware.AuthenticationMiddleware',
+ 'django.middleware.doc.XViewMiddleware',
+)
+
+ROOT_URLCONF = 'Bcfg2.Server.Reports.urls'
+
+# Authentication Settings
+# Use NIS authentication backend defined in backends.py
+AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',
+ 'Bcfg2.Server.Reports.backends.NISBackend')
+# The NIS group authorized to login to BCFG2's reportinvg system
+AUTHORIZED_GROUP = ''
+#create login url area:
+try:
+ import django.contrib.auth
+except ImportError:
+ print('Import of Django module failed. Is Django installed?')
+django.contrib.auth.LOGIN_URL = '/login'
+
+SESSION_EXPIRE_AT_BROWSER_CLOSE = True
+
+
+
+TEMPLATE_DIRS = (
+ # Put strings here, like "/home/html/django_templates".
+ # Always use forward slashes, even on Windows.
+ '/usr/share/python-support/python-django/django/contrib/admin/templates/',
+ 'Bcfg2.Server.Reports.reports'
+)
+
+if django.VERSION[0] == 1 and django.VERSION[1] < 2:
+ TEMPLATE_CONTEXT_PROCESSORS = (
+ 'django.core.context_processors.auth',
+ 'django.core.context_processors.debug',
+ 'django.core.context_processors.i18n',
+ 'django.core.context_processors.media',
+ 'django.core.context_processors.request'
+ )
+else:
+ TEMPLATE_CONTEXT_PROCESSORS = (
+ 'django.contrib.auth.context_processors.auth',
+ 'django.core.context_processors.debug',
+ 'django.core.context_processors.i18n',
+ 'django.core.context_processors.media',
+ 'django.core.context_processors.request'
+ )
+
+INSTALLED_APPS = (
+ 'django.contrib.auth',
+ 'django.contrib.contenttypes',
+ 'django.contrib.sessions',
+ 'django.contrib.sites',
+ 'django.contrib.admin',
+ 'Bcfg2.Server.Reports.reports'
+)
diff --git a/build/lib/Bcfg2/Server/Reports/updatefix.py b/build/lib/Bcfg2/Server/Reports/updatefix.py
new file mode 100644
index 000000000..f8fca1f90
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Reports/updatefix.py
@@ -0,0 +1,184 @@
+import Bcfg2.Server.Reports.settings
+
+from django.db import connection
+import django.core.management
+from Bcfg2.Server.Reports.reports.models import InternalDatabaseVersion, \
+ TYPE_BAD, TYPE_MODIFIED, TYPE_EXTRA
+
+import logging, traceback
+logger = logging.getLogger('Bcfg2.Server.Reports.UpdateFix')
+
+# all update function should go here
+def _merge_database_table_entries():
+ cursor = connection.cursor()
+ insert_cursor = connection.cursor()
+ find_cursor = connection.cursor()
+ cursor.execute("""
+ Select name, kind from reports_bad
+ union
+ select name, kind from reports_modified
+ union
+ select name, kind from reports_extra
+ """)
+ # this fetch could be better done
+ entries_map={}
+ for row in cursor.fetchall():
+ insert_cursor.execute("insert into reports_entries (name, kind) \
+ values (%s, %s)", (row[0], row[1]))
+ entries_map[(row[0], row[1])] = insert_cursor.lastrowid
+
+ cursor.execute("""
+ Select name, kind, reason_id, interaction_id, 1 from reports_bad
+ inner join reports_bad_interactions on reports_bad.id=reports_bad_interactions.bad_id
+ union
+ Select name, kind, reason_id, interaction_id, 2 from reports_modified
+ inner join reports_modified_interactions on reports_modified.id=reports_modified_interactions.modified_id
+ union
+ Select name, kind, reason_id, interaction_id, 3 from reports_extra
+ inner join reports_extra_interactions on reports_extra.id=reports_extra_interactions.extra_id
+ """)
+ for row in cursor.fetchall():
+ key = (row[0], row[1])
+ if entries_map.get(key, None):
+ entry_id = entries_map[key]
+ else:
+ find_cursor.execute("Select id from reports_entries where name=%s and kind=%s", key)
+ rowe = find_cursor.fetchone()
+ entry_id = rowe[0]
+ insert_cursor.execute("insert into reports_entries_interactions \
+ (entry_id, interaction_id, reason_id, type) values (%s, %s, %s, %s)", (entry_id, row[3], row[2], row[4]))
+
+def _interactions_constraint_or_idx():
+ '''sqlite doesn't support alter tables.. or constraints'''
+ cursor = connection.cursor()
+ try:
+ cursor.execute('alter table reports_interaction add constraint reports_interaction_20100601 unique (client_id,timestamp)')
+ except:
+ cursor.execute('create unique index reports_interaction_20100601 on reports_interaction (client_id,timestamp)')
+
+
+def _populate_interaction_entry_counts():
+ '''Populate up the type totals for the interaction table'''
+ cursor = connection.cursor()
+ count_field = { TYPE_BAD: 'bad_entries',
+ TYPE_MODIFIED: 'modified_entries',
+ TYPE_EXTRA: 'extra_entries' }
+
+ for type in count_field.keys():
+ cursor.execute("select count(type), interaction_id "+
+ "from reports_entries_interactions where type = %s group by interaction_id" % type)
+ updates = []
+ for row in cursor.fetchall():
+ updates.append(row)
+ try:
+ cursor.executemany("update reports_interaction set " + count_field[type] + "=%s where id = %s", updates)
+ except Exception, e:
+ print e
+ cursor.close()
+
+
+# be sure to test your upgrade query before reflecting the change in the models
+# the list of function and sql command to do should go here
+_fixes = [_merge_database_table_entries,
+ # this will remove unused tables
+ "drop table reports_bad;",
+ "drop table reports_bad_interactions;",
+ "drop table reports_extra;",
+ "drop table reports_extra_interactions;",
+ "drop table reports_modified;",
+ "drop table reports_modified_interactions;",
+ "drop table reports_repository;",
+ "drop table reports_metadata;",
+ "alter table reports_interaction add server varchar(256) not null default 'N/A';",
+ # fix revision data type to support $VCS hashes
+ "alter table reports_interaction add repo_rev_code varchar(64) default '';",
+ # Performance enhancements for large sites
+ 'alter table reports_interaction add column bad_entries integer not null default -1;',
+ 'alter table reports_interaction add column modified_entries integer not null default -1;',
+ 'alter table reports_interaction add column extra_entries integer not null default -1;',
+ _populate_interaction_entry_counts,
+ _interactions_constraint_or_idx,
+ 'alter table reports_reason add is_binary bool NOT NULL default False;',
+]
+
+# this will calculate the last possible version of the database
+lastversion = len(_fixes)
+
+def rollupdate(current_version):
+ """ function responsible to coordinates all the updates
+ need current_version as integer
+ """
+ ret = None
+ if current_version < lastversion:
+ for i in range(current_version, lastversion):
+ try:
+ if type(_fixes[i]) == str:
+ connection.cursor().execute(_fixes[i])
+ else:
+ _fixes[i]()
+ except:
+ logger.error("Failed to perform db update %s" % (_fixes[i]), exc_info=1)
+ # since array start at 0 but version start at 1 we add 1 to the normal count
+ ret = InternalDatabaseVersion.objects.create(version=i+1)
+ return ret
+ else:
+ return None
+
+def dosync():
+ """Function to do the syncronisation for the models"""
+ # try to detect if it's a fresh new database
+ try:
+ cursor = connection.cursor()
+ # If this table goes missing then don't forget to change it to the new one
+ cursor.execute("Select * from reports_client")
+ # if we get here with no error then the database has existing tables
+ fresh = False
+ except:
+ logger.debug("there was an error while detecting the freshness of the database")
+ #we should get here if the database is new
+ fresh = True
+
+ # ensure database connection are close, so that the management can do it's job right
+ try:
+ cursor.close()
+ connection.close()
+ except:
+ # ignore any errors from missing/invalid dbs
+ pass
+ # Do the syncdb according to the django version
+ if "call_command" in dir(django.core.management):
+ # this is available since django 1.0 alpha.
+ # not yet tested for full functionnality
+ django.core.management.call_command("syncdb", interactive=False, verbosity=0)
+ if fresh:
+ django.core.management.call_command("loaddata", 'initial_version.xml', verbosity=0)
+ elif "syncdb" in dir(django.core.management):
+ # this exist only for django 0.96.*
+ django.core.management.syncdb(interactive=False, verbosity=0)
+ if fresh:
+ logger.debug("loading the initial_version fixtures")
+ django.core.management.load_data(fixture_labels=['initial_version'], verbosity=0)
+ else:
+ logger.warning("Don't forget to run syncdb")
+
+
+def update_database():
+ ''' methode to search where we are in the revision of the database models and update them '''
+ try :
+ logger.debug("Running upgrade of models to the new one")
+ dosync()
+ know_version = InternalDatabaseVersion.objects.order_by('-version')
+ if not know_version:
+ logger.debug("No version, creating initial version")
+ know_version = InternalDatabaseVersion.objects.create(version=0)
+ else:
+ know_version = know_version[0]
+ logger.debug("Presently at %s" % know_version)
+ if know_version.version < lastversion:
+ new_version = rollupdate(know_version.version)
+ if new_version:
+ logger.debug("upgraded to %s" % new_version)
+ except:
+ logger.error("Error while updating the database")
+ for x in traceback.format_exc().splitlines():
+ logger.error(x)
diff --git a/build/lib/Bcfg2/Server/Reports/urls.py b/build/lib/Bcfg2/Server/Reports/urls.py
new file mode 100644
index 000000000..d7ff1eee5
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Reports/urls.py
@@ -0,0 +1,14 @@
+from django.conf.urls.defaults import *
+from django.http import HttpResponsePermanentRedirect
+
+handler500 = 'Bcfg2.Server.Reports.reports.views.server_error'
+
+urlpatterns = patterns('',
+ (r'^', include('Bcfg2.Server.Reports.reports.urls'))
+)
+
+#urlpatterns += patterns("django.views",
+# url(r"media/(?P<path>.*)$", "static.serve", {
+# "document_root": '/Users/tlaszlo/svn/bcfg2/reports/site_media/',
+# })
+#)
diff --git a/build/lib/Bcfg2/Server/Reports/utils.py b/build/lib/Bcfg2/Server/Reports/utils.py
new file mode 100644
index 000000000..b74f09e74
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Reports/utils.py
@@ -0,0 +1,116 @@
+"""Helper functions for reports"""
+from Bcfg2.Server.Reports.reports.models import TYPE_CHOICES
+from django.conf.urls.defaults import *
+import re
+
+"""List of filters provided by filteredUrls"""
+filter_list = ('server', 'state')
+
+class BatchFetch(object):
+ """Fetch Django objects in smaller batches to save memory"""
+
+ def __init__(self, obj, step=10000):
+ self.count = 0
+ self.block_count = 0
+ self.obj = obj
+ self.data = None
+ self.step = step
+ self.max = obj.count()
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ """Return the next object from our array and fetch from the
+ database when needed"""
+ if self.block_count + self.count - self.step == self.max:
+ raise StopIteration
+ if self.block_count == 0 or self.count == self.step:
+ # Without list() this turns into LIMIT 1 OFFSET x queries
+ self.data = list(self.obj.all()[self.block_count: \
+ (self.block_count + self.step)])
+ self.block_count += self.step
+ self.count = 0
+ self.count += 1
+ return self.data[self.count - 1]
+
+def generateUrls(fn):
+ """
+ Parse url tuples and send to functions.
+
+ Decorator for url generators. Handles url tuple parsing
+ before the actual function is called.
+ """
+ def url_gen(*urls):
+ results = []
+ for url_tuple in urls:
+ if isinstance(url_tuple, (list, tuple)):
+ results += fn(*url_tuple)
+ else:
+ raise ValueError("Unable to handle compiled urls")
+ return results
+ return url_gen
+
+@generateUrls
+def paginatedUrls(pattern, view, kwargs=None, name=None):
+ """
+ Takes a group of url tuples and adds paginated urls.
+
+ Extends a url tuple to include paginated urls. Currently doesn't handle url() compiled
+ patterns.
+
+ """
+ results = [(pattern, view, kwargs, name)]
+ tail = ''
+ mtail = re.search('(/+\+?\\*?\??\$?)$', pattern)
+ if mtail:
+ tail = mtail.group(1)
+ pattern = pattern[:len(pattern) - len(tail)]
+ results += [(pattern + "/(?P<page_number>\d+)" + tail, view, kwargs)]
+ results += [(pattern + "/(?P<page_number>\d+)\|(?P<page_limit>\d+)" + tail, view, kwargs)]
+ if not kwargs:
+ kwargs = dict()
+ kwargs['page_limit'] = 0
+ results += [(pattern + "/?\|(?P<page_limit>all)" + tail, view, kwargs)]
+ return results
+
+@generateUrls
+def filteredUrls(pattern, view, kwargs=None, name=None):
+ """
+ Takes a url and adds filtered urls.
+
+ Extends a url tuple to include filtered view urls. Currently doesn't
+ handle url() compiled patterns.
+ """
+ results = [(pattern, view, kwargs, name)]
+ tail = ''
+ mtail = re.search('(/+\+?\\*?\??\$?)$', pattern)
+ if mtail:
+ tail = mtail.group(1)
+ pattern = pattern[:len(pattern) - len(tail)]
+ for filter in ('/state/(?P<state>\w+)',
+ '/server/(?P<server>[\w\-\.]+)',
+ '/server/(?P<server>[\w\-\.]+)/(?P<state>[A-Za-z]+)'):
+ results += [(pattern + filter + tail, view, kwargs)]
+ return results
+
+@generateUrls
+def timeviewUrls(pattern, view, kwargs=None, name=None):
+ """
+ Takes a url and adds timeview urls
+
+ Extends a url tuple to include filtered view urls. Currently doesn't
+ handle url() compiled patterns.
+ """
+ results = [(pattern, view, kwargs, name)]
+ tail = ''
+ mtail = re.search('(/+\+?\\*?\??\$?)$', pattern)
+ if mtail:
+ tail = mtail.group(1)
+ pattern = pattern[:len(pattern) - len(tail)]
+ for filter in ('/(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})/' + \
+ '(?P<hour>\d\d)-(?P<minute>\d\d)',
+ '/(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})'):
+ results += [(pattern + filter + tail, view, kwargs)]
+ return results
+
diff --git a/build/lib/Bcfg2/Server/Snapshots/__init__.py b/build/lib/Bcfg2/Server/Snapshots/__init__.py
new file mode 100644
index 000000000..6018377cb
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Snapshots/__init__.py
@@ -0,0 +1,30 @@
+__all__ = ['models', 'db_from_config', 'setup_session']
+
+import sqlalchemy
+import sqlalchemy.orm
+import ConfigParser
+
+
+def db_from_config(cfile):
+ cp = ConfigParser.ConfigParser()
+ cp.read([cfile])
+ driver = cp.get('snapshots', 'driver')
+ if driver == 'sqlite':
+ path = cp.get('snapshots', 'database')
+ return 'sqlite:///%s' % path
+ elif driver in ['mysql', 'postgres']:
+ user = cp.get('snapshots', 'user')
+ password = cp.get('snapshots', 'password')
+ host = cp.get('snapshots', 'host')
+ db = cp.get('snapshots', 'database')
+ return '%s://%s:%s@%s/%s' % (driver, user, password, host, db)
+ else:
+ raise Exception, "unsupported db driver %s" % driver
+
+
+def setup_session(cfile, debug=False):
+ engine = sqlalchemy.create_engine(db_from_config(cfile),
+ echo=debug)
+ Session = sqlalchemy.orm.sessionmaker()
+ Session.configure(bind=engine)
+ return Session()
diff --git a/build/lib/Bcfg2/Server/Snapshots/model.py b/build/lib/Bcfg2/Server/Snapshots/model.py
new file mode 100644
index 000000000..cbb14be79
--- /dev/null
+++ b/build/lib/Bcfg2/Server/Snapshots/model.py
@@ -0,0 +1,278 @@
+from sqlalchemy import Table, Column, Integer, Unicode, ForeignKey, Boolean, \
+ DateTime, UnicodeText, desc
+import datetime
+import sqlalchemy.exceptions
+from sqlalchemy.orm import relation, backref
+from sqlalchemy.ext.declarative import declarative_base
+
+
+class Uniquer(object):
+ force_rt = True
+
+ @classmethod
+ def by_value(cls, session, **kwargs):
+ if cls.force_rt:
+ try:
+ return session.query(cls).filter_by(**kwargs).one()
+ except sqlalchemy.exceptions.InvalidRequestError:
+ return cls(**kwargs)
+ else:
+ return cls(**kwargs)
+
+ @classmethod
+ def from_record(cls, session, data):
+ return cls.by_value(session, **data)
+
+Base = declarative_base()
+
+
+class Administrator(Uniquer, Base):
+ __tablename__ = 'administrator'
+ id = Column(Integer, primary_key=True)
+ name = Column(Unicode(20), unique=True)
+ email = Column(Unicode(64))
+
+admin_client = Table('admin_client', Base.metadata,
+ Column('admin_id', Integer, ForeignKey('administrator.id')),
+ Column('client_id', Integer, ForeignKey('client.id')))
+
+admin_group = Table('admin_group', Base.metadata,
+ Column('admin_id', Integer, ForeignKey('administrator.id')),
+ Column('group_id', Integer, ForeignKey('group.id')))
+
+
+class Client(Uniquer, Base):
+ __tablename__ = 'client'
+ id = Column(Integer, primary_key=True)
+ name = Column(Unicode(64), unique=True)
+ admins = relation("Administrator", secondary=admin_client,
+ backref='clients')
+ active = Column(Boolean, default=True)
+ online = Column(Boolean, default=True)
+ online_ts = Column(DateTime)
+
+
+class Group(Uniquer, Base):
+ __tablename__ = 'group'
+ id = Column(Integer, primary_key=True)
+ name = Column(Unicode(32), unique=True)
+ admins = relation("Administrator", secondary=admin_group,
+ backref='groups')
+
+
+class ConnectorKeyVal(Uniquer, Base):
+ __tablename__ = 'connkeyval'
+ id = Column(Integer, primary_key=True)
+ connector = Column(Unicode(16))
+ key = Column(Unicode(32))
+ value = Column(UnicodeText)
+
+meta_group = Table('meta_group', Base.metadata,
+ Column('metadata_id', Integer, ForeignKey('metadata.id')),
+ Column('group_id', Integer, ForeignKey('group.id')))
+
+meta_conn = Table('meta_conn', Base.metadata,
+ Column('metadata_id', Integer, ForeignKey('metadata.id')),
+ Column('connkeyval_id', Integer, ForeignKey('connkeyval.id')))
+
+
+class Metadata(Base):
+ __tablename__ = 'metadata'
+ id = Column(Integer, primary_key=True)
+ client_id = Column(Integer, ForeignKey('client.id'))
+ client = relation(Client)
+ groups = relation("Group", secondary=meta_group)
+ keyvals = relation(ConnectorKeyVal, secondary=meta_conn)
+ timestamp = Column(DateTime)
+
+ @classmethod
+ def from_metadata(cls, mysession, mymetadata):
+ client = Client.by_value(mysession, name=unicode(mymetadata.hostname))
+ m = cls(client=client)
+ for group in mymetadata.groups:
+ m.groups.append(Group.by_value(mysession, name=unicode(group)))
+ for connector in mymetadata.connectors:
+ data = getattr(mymetadata, connector)
+ if not isinstance(data, dict):
+ continue
+ for key, value in data.iteritems():
+ if not isinstance(value, str):
+ continue
+ m.keyvals.append(ConnectorKeyVal.by_value(mysession,
+ connector=unicode(connector),
+ key=unicode(key),
+ value=unicode(value)))
+ return m
+
+
+class Package(Base, Uniquer):
+ __tablename__ = 'package'
+ id = Column(Integer, primary_key=True)
+ name = Column(Unicode(24))
+ type = Column(Unicode(16))
+ version = Column(Unicode(16))
+ verification_status = Column(Boolean)
+
+
+class CorrespondenceType(object):
+ mtype = Package
+
+ @classmethod
+ def from_record(cls, mysession, record):
+ (mod, corr, name, s_dict, e_dict) = record
+ if not s_dict:
+ start = None
+ else:
+ start = cls.mtype.by_value(mysession, name=name, **s_dict)
+ if s_dict != e_dict:
+ end = cls.mtype.by_value(mysession, name=name, **e_dict)
+ else:
+ end = start
+ return cls(start=start, end=end, modified=mod, correct=corr)
+
+
+class PackageCorrespondence(Base, CorrespondenceType):
+ mtype = Package
+ __tablename__ = 'package_pair'
+ id = Column(Integer, primary_key=True)
+ start_id = Column(Integer, ForeignKey('package.id'))
+ start = relation(Package, primaryjoin=start_id == Package.id)
+ end_id = Column(Integer, ForeignKey('package.id'), nullable=True)
+ end = relation(Package, primaryjoin=end_id == Package.id)
+ modified = Column(Boolean)
+ correct = Column(Boolean)
+
+package_snap = Table('package_snap', Base.metadata,
+ Column('ppair_id', Integer, ForeignKey('package_pair.id')),
+ Column('snapshot_id', Integer, ForeignKey('snapshot.id')))
+
+
+class Service(Base, Uniquer):
+ __tablename__ = 'service'
+ id = Column(Integer, primary_key=True)
+ name = Column(Unicode(16))
+ type = Column(Unicode(12))
+ status = Column(Boolean)
+
+
+class ServiceCorrespondence(Base, CorrespondenceType):
+ mtype = Service
+ __tablename__ = 'service_pair'
+ id = Column(Integer, primary_key=True)
+ start_id = Column(Integer, ForeignKey('service.id'))
+ start = relation(Service, primaryjoin=start_id == Service.id)
+ end_id = Column(Integer, ForeignKey('service.id'), nullable=True)
+ end = relation(Service, primaryjoin=end_id == Service.id)
+ modified = Column(Boolean)
+ correct = Column(Boolean)
+
+service_snap = Table('service_snap', Base.metadata,
+ Column('spair_id', Integer, ForeignKey('service_pair.id')),
+ Column('snapshot_id', Integer, ForeignKey('snapshot.id')))
+
+
+class File(Base, Uniquer):
+ __tablename__ = 'file'
+ id = Column(Integer, primary_key=True)
+ name = Column(UnicodeText)
+ type = Column(Unicode(12))
+ owner = Column(Unicode(12))
+ group = Column(Unicode(16))
+ perms = Column(Integer(5))
+ contents = Column(UnicodeText)
+
+
+class FileCorrespondence(Base, CorrespondenceType):
+ mtype = File
+ __tablename__ = 'file_pair'
+ id = Column(Integer, primary_key=True)
+ start_id = Column(Integer, ForeignKey('file.id'))
+ start = relation(File, primaryjoin=start_id == File.id)
+ end_id = Column(Integer, ForeignKey('file.id'), nullable=True)
+ end = relation(File, primaryjoin=end_id == File.id)
+ modified = Column(Boolean)
+ correct = Column(Boolean)
+
+file_snap = Table('file_snap', Base.metadata,
+ Column('fpair_id', Integer, ForeignKey('file_pair.id')),
+ Column('snapshot_id', Integer, ForeignKey('snapshot.id')))
+
+extra_pkg_snap = Table('extra_pkg_snap', Base.metadata,
+ Column('package_id', Integer, ForeignKey('package.id')),
+ Column('snapshot_id', Integer, ForeignKey('snapshot.id')))
+
+extra_file_snap = Table('extra_file_snap', Base.metadata,
+ Column('file_id', Integer, ForeignKey('file.id')),
+ Column('snapshot_id', Integer, ForeignKey('snapshot.id')))
+
+extra_service_snap = Table('extra_service_snap', Base.metadata,
+ Column('service_id', Integer, ForeignKey('service.id')),
+ Column('snapshot_id', Integer, ForeignKey('snapshot.id')))
+
+
+class Action(Base):
+ __tablename__ = 'action'
+ id = Column(Integer, primary_key=True)
+ command = Column(UnicodeText)
+ return_code = Column(Integer)
+ output = Column(UnicodeText)
+
+action_snap = Table('action_snap', Base.metadata,
+ Column('action_id', Integer, ForeignKey('action.id')),
+ Column('snapshot_id', Integer, ForeignKey('snapshot.id')))
+
+
+class Snapshot(Base):
+ __tablename__ = 'snapshot'
+ id = Column(Integer, primary_key=True)
+ correct = Column(Boolean)
+ revision = Column(Unicode(36))
+ metadata_id = Column(Integer, ForeignKey('metadata.id'))
+ client_metadata = relation(Metadata, primaryjoin=metadata_id==Metadata.id)
+ timestamp = Column(DateTime, default=datetime.datetime.now)
+ client_id = Column(Integer, ForeignKey('client.id'))
+ client = relation(Client, backref=backref('snapshots'))
+ packages = relation(PackageCorrespondence, secondary=package_snap)
+ services = relation(ServiceCorrespondence, secondary=service_snap)
+ files = relation(FileCorrespondence, secondary=file_snap)
+ actions = relation(Action, secondary=action_snap)
+ extra_packages = relation(Package, secondary=extra_pkg_snap)
+ extra_services = relation(Service, secondary=extra_service_snap)
+ extra_files = relation(File, secondary=extra_file_snap)
+
+ c_dispatch = dict([('Package', ('packages', PackageCorrespondence)),
+ ('Service', ('services', ServiceCorrespondence)),
+ ('Path', ('files', FileCorrespondence))])
+ e_dispatch = dict([('Package', ('extra_packages', Package)),
+ ('Service', ('extra_services', Service)),
+ ('Path', ('extra_files', File))])
+
+ @classmethod
+ def from_data(cls, session, correct, revision, metadata, entries, extra):
+ dbm = Metadata.from_metadata(session, metadata)
+ snap = cls(correct=correct, client_metadata=dbm, revision=revision,
+ timestamp=datetime.datetime.now(), client=dbm.client)
+ for (dispatch, data) in [(cls.c_dispatch, entries),
+ (cls.e_dispatch, extra)]:
+ for key in dispatch:
+ dest, ecls = dispatch[key]
+ for edata in data[key].values():
+ getattr(snap, dest).append(ecls.from_record(session, edata))
+ return snap
+
+ @classmethod
+ def by_client(cls, session, clientname):
+ return session.query(cls).join(cls.client_metadata, Metadata.client).filter(Client.name==clientname)
+
+ @classmethod
+ def get_current(cls, session, clientname):
+ return session.query(Snapshot).join(Snapshot.client_metadata, Metadata.client).filter(Client.name==clientname).order_by(desc(Snapshot.timestamp)).first()
+
+ @classmethod
+ def get_by_date(cls, session, clientname, timestamp):
+ return session.query(Snapshot)\
+ .join(Snapshot.client_metadata, Metadata.client)\
+ .filter(Snapshot.timestamp < timestamp)\
+ .filter(Client.name==clientname)\
+ .order_by(desc(Snapshot.timestamp))\
+ .first()
diff --git a/build/lib/Bcfg2/Server/__init__.py b/build/lib/Bcfg2/Server/__init__.py
new file mode 100644
index 000000000..25f397565
--- /dev/null
+++ b/build/lib/Bcfg2/Server/__init__.py
@@ -0,0 +1,6 @@
+# $Id$
+"""This is the set of modules for Bcfg2.Server."""
+
+__revision__ = '$Revision$'
+__all__ = ["Admin", "Core", "FileMonitor", "Plugin", "Plugins",
+ "Hostbase", "Reports", "Snapshots"]
diff --git a/build/lib/Bcfg2/Statistics.py b/build/lib/Bcfg2/Statistics.py
new file mode 100644
index 000000000..b2240db98
--- /dev/null
+++ b/build/lib/Bcfg2/Statistics.py
@@ -0,0 +1,32 @@
+
+class Statistic(object):
+ def __init__(self, name, initial_value):
+ self.name = name
+ self.min = float(initial_value)
+ self.max = float(initial_value)
+ self.ave = float(initial_value)
+ self.count = 1
+
+ def add_value(self, value):
+ if value < self.min:
+ self.min = value
+ if value > self.max:
+ self.max = value
+ self.count += 1
+ self.ave = (((self.ave * (self.count - 1)) + value) / self.count)
+
+ def get_value(self):
+ return (self.name, (self.min, self.max, self.ave, self.count))
+
+class Statistics(object):
+ def __init__(self):
+ self.data = dict()
+
+ def add_value(self, name, value):
+ if name not in self.data:
+ self.data[name] = Statistic(name, value)
+ else:
+ self.data[name].add_value(value)
+
+ def display(self):
+ return dict([value.get_value() for value in self.data.values()])
diff --git a/build/lib/Bcfg2/__init__.py b/build/lib/Bcfg2/__init__.py
new file mode 100644
index 000000000..d36c0a00a
--- /dev/null
+++ b/build/lib/Bcfg2/__init__.py
@@ -0,0 +1,4 @@
+"""Base modules definition."""
+__revision__ = '$Revision$'
+
+__all__ = ['Server', 'Client', 'Component', 'Logger', 'Options', 'Proxy', 'Statistics']