summaryrefslogtreecommitdiffstats
path: root/src/lib/Server/Plugins/Packages
diff options
context:
space:
mode:
Diffstat (limited to 'src/lib/Server/Plugins/Packages')
-rw-r--r--src/lib/Server/Plugins/Packages/Apt.py139
-rw-r--r--src/lib/Server/Plugins/Packages/Collection.py343
-rw-r--r--src/lib/Server/Plugins/Packages/Pac.py120
-rw-r--r--src/lib/Server/Plugins/Packages/PackagesConfig.py15
-rw-r--r--src/lib/Server/Plugins/Packages/PackagesSources.py97
-rw-r--r--src/lib/Server/Plugins/Packages/Source.py278
-rw-r--r--src/lib/Server/Plugins/Packages/Yum.py688
-rw-r--r--src/lib/Server/Plugins/Packages/__init__.py263
8 files changed, 0 insertions, 1943 deletions
diff --git a/src/lib/Server/Plugins/Packages/Apt.py b/src/lib/Server/Plugins/Packages/Apt.py
deleted file mode 100644
index f76bf7fa1..000000000
--- a/src/lib/Server/Plugins/Packages/Apt.py
+++ /dev/null
@@ -1,139 +0,0 @@
-import re
-import gzip
-from Bcfg2.Server.Plugins.Packages.Collection import Collection
-from Bcfg2.Server.Plugins.Packages.Source import Source
-from Bcfg2.Bcfg2Py3k import cPickle, file
-
-class AptCollection(Collection):
- def get_group(self, group):
- self.logger.warning("Packages: Package groups are not supported by APT")
- return []
-
-class AptSource(Source):
- basegroups = ['apt', 'debian', 'ubuntu', 'nexenta']
- ptype = 'deb'
-
- def __init__(self, basepath, xsource, config):
- Source.__init__(self, basepath, xsource, config)
- self.pkgnames = set()
-
- self.url_map = [{'rawurl': self.rawurl, 'url': self.url,
- 'version': self.version,
- 'components': self.components, 'arches': self.arches}]
-
- def save_state(self):
- cache = file(self.cachefile, 'wb')
- cPickle.dump((self.pkgnames, self.deps, self.provides),
- cache, 2)
- cache.close()
-
- def load_state(self):
- data = file(self.cachefile)
- self.pkgnames, self.deps, self.provides = cPickle.load(data)
-
- def filter_unknown(self, unknown):
- filtered = set([u for u in unknown if u.startswith('choice')])
- unknown.difference_update(filtered)
-
- def get_urls(self):
- if not self.rawurl:
- rv = []
- for part in self.components:
- for arch in self.arches:
- rv.append("%sdists/%s/%s/binary-%s/Packages.gz" %
- (self.url, self.version, part, arch))
- return rv
- else:
- return ["%sPackages.gz" % self.rawurl]
- urls = property(get_urls)
-
- def read_files(self):
- bdeps = dict()
- bprov = dict()
- if self.recommended:
- depfnames = ['Depends', 'Pre-Depends', 'Recommends']
- else:
- depfnames = ['Depends', 'Pre-Depends']
- for fname in self.files:
- if not self.rawurl:
- barch = [x
- for x in fname.split('@')
- if x.startswith('binary-')][0][7:]
- else:
- # RawURL entries assume that they only have one <Arch></Arch>
- # element and that it is the architecture of the source.
- barch = self.arches[0]
- if barch not in bdeps:
- bdeps[barch] = dict()
- bprov[barch] = dict()
- try:
- reader = gzip.GzipFile(fname)
- except:
- self.logger.error("Packages: Failed to read file %s" % fname)
- raise
- for line in reader.readlines():
- words = str(line.strip()).split(':', 1)
- if words[0] == 'Package':
- pkgname = words[1].strip().rstrip()
- self.pkgnames.add(pkgname)
- bdeps[barch][pkgname] = []
- elif words[0] in depfnames:
- vindex = 0
- for dep in words[1].split(','):
- if '|' in dep:
- cdeps = [re.sub('\s+', '',
- re.sub('\(.*\)', '', cdep))
- for cdep in dep.split('|')]
- dyn_dname = "choice-%s-%s-%s" % (pkgname,
- barch,
- vindex)
- vindex += 1
- bdeps[barch][pkgname].append(dyn_dname)
- bprov[barch][dyn_dname] = set(cdeps)
- else:
- raw_dep = re.sub('\(.*\)', '', dep)
- raw_dep = raw_dep.rstrip().strip()
- bdeps[barch][pkgname].append(raw_dep)
- elif words[0] == 'Provides':
- for pkg in words[1].split(','):
- dname = pkg.rstrip().strip()
- if dname not in bprov[barch]:
- bprov[barch][dname] = set()
- bprov[barch][dname].add(pkgname)
-
- self.deps['global'] = dict()
- self.provides['global'] = dict()
- for barch in bdeps:
- self.deps[barch] = dict()
- self.provides[barch] = dict()
- for pkgname in self.pkgnames:
- pset = set()
- for barch in bdeps:
- if pkgname not in bdeps[barch]:
- bdeps[barch][pkgname] = []
- pset.add(tuple(bdeps[barch][pkgname]))
- if len(pset) == 1:
- self.deps['global'][pkgname] = pset.pop()
- else:
- for barch in bdeps:
- self.deps[barch][pkgname] = bdeps[barch][pkgname]
- provided = set()
- for bprovided in list(bprov.values()):
- provided.update(set(bprovided))
- for prov in provided:
- prset = set()
- for barch in bprov:
- if prov not in bprov[barch]:
- continue
- prset.add(tuple(bprov[barch].get(prov, ())))
- if len(prset) == 1:
- self.provides['global'][prov] = prset.pop()
- else:
- for barch in bprov:
- self.provides[barch][prov] = bprov[barch].get(prov, ())
- self.save_state()
-
- def is_package(self, _, pkg):
- return (pkg in self.pkgnames and
- pkg not in self.blacklist and
- (len(self.whitelist) == 0 or pkg in self.whitelist))
diff --git a/src/lib/Server/Plugins/Packages/Collection.py b/src/lib/Server/Plugins/Packages/Collection.py
deleted file mode 100644
index 32eeda1ec..000000000
--- a/src/lib/Server/Plugins/Packages/Collection.py
+++ /dev/null
@@ -1,343 +0,0 @@
-import copy
-import logging
-import Bcfg2.Server.Plugin
-
-logger = logging.getLogger(__name__)
-
-try:
- from hashlib import md5
-except ImportError:
- from md5 import md5
-
-# we have to cache Collection objects so that calling Packages.Refresh
-# or .Reload can tell the collection objects to clean up their cache,
-# but we don't actually use the cache to return a Collection object
-# when one is requested, because that prevents new machines from
-# working, since a Collection object gets created by
-# get_additional_data(), which is called for all clients at server
-# startup. (It would also prevent machines that change groups from
-# working properly; e.g., if you reinstall a machine with a new OS,
-# then returning a cached Collection object would give the wrong
-# sources to that client.)
-collections = dict()
-
-class Collection(Bcfg2.Server.Plugin.Debuggable):
- def __init__(self, metadata, sources, basepath, debug=False):
- """ don't call this directly; use the factory function """
- Bcfg2.Server.Plugin.Debuggable.__init__(self)
- self.debug_flag = debug
- self.metadata = metadata
- self.sources = sources
- self.basepath = basepath
- self.virt_pkgs = dict()
-
- try:
- self.config = sources[0].config
- self.cachepath = sources[0].basepath
- self.ptype = sources[0].ptype
- except IndexError:
- self.config = None
- self.cachepath = None
- self.ptype = "unknown"
-
- self.cachefile = None
-
- @property
- def cachekey(self):
- return md5(self.get_config()).hexdigest()
-
- def get_config(self):
- self.logger.error("Packages: Cannot generate config for host with "
- "multiple source types (%s)" % self.metadata.hostname)
- return ""
-
- def get_relevant_groups(self):
- groups = []
- for source in self.sources:
- groups.extend(source.get_relevant_groups(self.metadata))
- return sorted(list(set(groups)))
-
- @property
- def basegroups(self):
- groups = set()
- for source in self.sources:
- groups.update(source.basegroups)
- return list(groups)
-
- @property
- def cachefiles(self):
- cachefiles = set([self.cachefile])
- for source in self.sources:
- cachefiles.add(source.cachefile)
- return list(cachefiles)
-
- def get_group(self, group, ptype=None):
- for source in self.sources:
- pkgs = source.get_group(self.metadata, group, ptype=ptype)
- if pkgs:
- return pkgs
- self.logger.warning("Packages: '%s' is not a valid group" % group)
- return []
-
- def is_package(self, package):
- for source in self.sources:
- if source.is_package(self.metadata, package):
- return True
- return False
-
- def is_virtual_package(self, package):
- for source in self.sources:
- if source.is_virtual_package(self.metadata, package):
- return True
- return False
-
- def get_deps(self, package):
- for source in self.sources:
- if source.is_package(self.metadata, package):
- return source.get_deps(self.metadata, package)
- return []
-
- def get_provides(self, package):
- for source in self.sources:
- providers = source.get_provides(self.metadata, package)
- if providers:
- return providers
- return []
-
- def get_vpkgs(self):
- """ get virtual packages """
- vpkgs = dict()
- for source in self.sources:
- s_vpkgs = source.get_vpkgs(self.metadata)
- for name, prov_set in list(s_vpkgs.items()):
- if name not in vpkgs:
- vpkgs[name] = set(prov_set)
- else:
- vpkgs[name].update(prov_set)
- return vpkgs
-
- def filter_unknown(self, unknown):
- for source in self.sources:
- source.filter_unknown(unknown)
-
- def magic_groups_match(self):
- for source in self.sources:
- if source.magic_groups_match(self.metadata):
- return True
-
- def build_extra_structures(self, independent):
- pass
-
- def get_additional_data(self):
- sdata = []
- for source in self.sources:
- sdata.extend(copy.deepcopy(source.url_map))
- return sdata
-
- def setup_data(self, force_update=False):
- """ do any collection-level data setup tasks """
- pass
-
- def complete(self, packagelist):
- '''Build the transitive closure of all package dependencies
-
- Arguments:
- packageslist - set of package names
- returns => (set(packages), set(unsatisfied requirements))
- '''
-
- # setup vpkg cache
- pgrps = tuple(self.get_relevant_groups())
- if pgrps not in self.virt_pkgs:
- self.virt_pkgs[pgrps] = self.get_vpkgs()
- vpkg_cache = self.virt_pkgs[pgrps]
-
- # unclassified is set of unsatisfied requirements (may be pkg
- # for vpkg)
- unclassified = set(packagelist)
- vpkgs = set()
- both = set()
- pkgs = set(packagelist)
-
- packages = set()
- examined = set()
- unknown = set()
-
- final_pass = False
- really_done = False
- # do while unclassified or vpkgs or both or pkgs
- while unclassified or pkgs or both or final_pass:
- if really_done:
- break
- if len(unclassified) + len(pkgs) + len(both) == 0:
- # one more pass then exit
- really_done = True
-
- while unclassified:
- current = unclassified.pop()
- examined.add(current)
- is_pkg = False
- if self.is_package(current):
- is_pkg = True
-
- is_vpkg = current in vpkg_cache
-
- if is_pkg and is_vpkg:
- both.add(current)
- elif is_pkg and not is_vpkg:
- pkgs.add(current)
- elif is_vpkg and not is_pkg:
- vpkgs.add(current)
- elif not is_vpkg and not is_pkg:
- unknown.add(current)
-
- while pkgs:
- # direct packages; current can be added, and all deps
- # should be resolved
- current = pkgs.pop()
- self.debug_log("Packages: handling package requirement %s" %
- current)
- packages.add(current)
- deps = self.get_deps(current)
- newdeps = set(deps).difference(examined)
- if newdeps:
- self.debug_log("Packages: Package %s added requirements %s"
- % (current, newdeps))
- unclassified.update(newdeps)
-
- satisfied_vpkgs = set()
- for current in vpkgs:
- # virtual dependencies, satisfied if one of N in the
- # config, or can be forced if only one provider
- if len(vpkg_cache[current]) == 1:
- self.debug_log("Packages: requirement %s satisfied by %s" %
- (current, vpkg_cache[current]))
- unclassified.update(vpkg_cache[current].difference(examined))
- satisfied_vpkgs.add(current)
- else:
- satisfiers = [item for item in vpkg_cache[current]
- if item in packages]
- self.debug_log("Packages: requirement %s satisfied by %s" %
- (current, satisfiers))
- satisfied_vpkgs.add(current)
- vpkgs.difference_update(satisfied_vpkgs)
-
- satisfied_both = set()
- for current in both:
- # packages that are both have virtual providers as
- # well as a package with that name. allow use of virt
- # through explicit specification, then fall back to
- # forcing current on last pass
- satisfiers = [item for item in vpkg_cache[current]
- if item in packages]
- if satisfiers:
- self.debug_log("Packages: requirement %s satisfied by %s" %
- (current, satisfiers))
- satisfied_both.add(current)
- elif current in packagelist or final_pass:
- pkgs.add(current)
- satisfied_both.add(current)
- both.difference_update(satisfied_both)
-
- if len(unclassified) + len(pkgs) == 0:
- final_pass = True
- else:
- final_pass = False
-
- self.filter_unknown(unknown)
-
- return packages, unknown
-
- def __len__(self):
- return len(self.sources)
-
- def __getitem__(self, item):
- return self.sources[item]
-
- def __setitem__(self, item, value):
- self.sources[item] = value
-
- def __delitem__(self, item):
- del self.sources[item]
-
- def append(self, item):
- self.sources.append(item)
-
- def count(self):
- return self.sources.count()
-
- def index(self, item):
- return self.sources.index(item)
-
- def extend(self, items):
- self.sources.extend(items)
-
- def insert(self, index, item):
- self.sources.insert(index, item)
-
- def pop(self, index=None):
- self.sources.pop(index)
-
- def remove(self, item):
- self.sources.remove(item)
-
- def sort(self, cmp=None, key=None, reverse=False):
- self.sources.sort(cmp, key, reverse)
-
-def clear_cache():
- global collections
- collections = dict()
-
-def factory(metadata, sources, basepath, debug=False):
- global collections
-
- if not sources.loaded:
- # if sources.xml has not received a FAM event yet, defer;
- # instantiate a dummy Collection object
- return Collection(metadata, [], basepath)
-
- sclasses = set()
- relevant = list()
-
- for source in sources:
- if source.applies(metadata):
- relevant.append(source)
- sclasses.update([source.__class__])
-
- if len(sclasses) > 1:
- logger.warning("Packages: Multiple source types found for %s: %s" %
- ",".join([s.__name__ for s in sclasses]))
- cclass = Collection
- elif len(sclasses) == 0:
- # you'd think this should be a warning, but it happens all the
- # freaking time if you have a) machines in your clients.xml
- # that do not have the proper groups set up yet (e.g., if you
- # have multiple Bcfg2 servers and Packages-relevant groups set
- # by probes); and b) templates that query all or multiple
- # machines (e.g., with metadata.query.all_clients())
- if debug:
- logger.error("Packages: No sources found for %s" %
- metadata.hostname)
- cclass = Collection
- else:
- stype = sclasses.pop().__name__.replace("Source", "")
- try:
- module = \
- getattr(__import__("Bcfg2.Server.Plugins.Packages.%s" %
- stype.title()).Server.Plugins.Packages,
- stype.title())
- cclass = getattr(module, "%sCollection" % stype.title())
- except ImportError:
- logger.error("Packages: Unknown source type %s" % stype)
- except AttributeError:
- logger.warning("Packages: No collection class found for %s sources"
- % stype)
-
- if debug:
- logger.error("Packages: Using %s for Collection of sources for %s" %
- (cclass.__name__, metadata.hostname))
-
- collection = cclass(metadata, relevant, basepath, debug=debug)
- collections[metadata.hostname] = collection
- return collection
-
diff --git a/src/lib/Server/Plugins/Packages/Pac.py b/src/lib/Server/Plugins/Packages/Pac.py
deleted file mode 100644
index 9db6b0535..000000000
--- a/src/lib/Server/Plugins/Packages/Pac.py
+++ /dev/null
@@ -1,120 +0,0 @@
-import gzip
-import tarfile
-from Bcfg2.Bcfg2Py3k import cPickle, file
-from Bcfg2.Server.Plugins.Packages.Collection import Collection
-from Bcfg2.Server.Plugins.Packages.Source import Source
-
-class PacCollection(Collection):
- def get_group(self, group):
- self.logger.warning("Packages: Package groups are not supported by APT")
- return []
-
-class PacSource(Source):
- basegroups = ['arch', 'parabola']
- ptype = 'pacman'
-
- def __init__(self, basepath, xsource, config):
- Source.__init__(self, basepath, xsource, config)
- self.pkgnames = set()
-
- self.url_map = [{'rawurl': self.rawurl, 'url': self.url,
- 'version': self.version,
- 'components': self.components, 'arches': self.arches}]
-
- def save_state(self):
- cache = file(self.cachefile, 'wb')
- cPickle.dump((self.pkgnames, self.deps, self.provides),
- cache, 2)
- cache.close()
-
- def load_state(self):
- data = file(self.cachefile)
- self.pkgnames, self.deps, self.provides = cPickle.load(data)
-
- def filter_unknown(self, unknown):
- filtered = set([u for u in unknown if u.startswith('choice')])
- unknown.difference_update(filtered)
-
- def get_urls(self):
- if not self.rawurl:
- rv = []
- for part in self.components:
- for arch in self.arches:
- rv.append("%s%s/os/%s/%s.db.tar.gz" %
- (self.url, part, arch, part))
- return rv
- else:
- raise Exception("PacSource : RAWUrl not supported (yet)")
- urls = property(get_urls)
-
- def read_files(self):
- bdeps = dict()
- bprov = dict()
-
- if self.recommended:
- depfnames = ['Depends', 'Pre-Depends', 'Recommends']
- else:
- depfnames = ['Depends', 'Pre-Depends']
-
- for fname in self.files:
- if not self.rawurl:
- barch = [x for x in fname.split('@') if x in self.arches][0]
- else:
- # RawURL entries assume that they only have one <Arch></Arch>
- # element and that it is the architecture of the source.
- barch = self.arches[0]
-
- if barch not in bdeps:
- bdeps[barch] = dict()
- bprov[barch] = dict()
- try:
- self.debug_log("Packages: try to read %s" % fname)
- tar = tarfile.open(fname, "r")
- reader = gzip.GzipFile(fname)
- except:
- self.logger.error("Packages: Failed to read file %s" % fname)
- raise
-
- for tarinfo in tar:
- if tarinfo.isdir():
- self.pkgnames.add(tarinfo.name.rsplit("-", 2)[0])
- self.debug_log("Packages: added %s" %
- tarinfo.name.rsplit("-", 2)[0])
- tar.close()
-
- self.deps['global'] = dict()
- self.provides['global'] = dict()
- for barch in bdeps:
- self.deps[barch] = dict()
- self.provides[barch] = dict()
- for pkgname in self.pkgnames:
- pset = set()
- for barch in bdeps:
- if pkgname not in bdeps[barch]:
- bdeps[barch][pkgname] = []
- pset.add(tuple(bdeps[barch][pkgname]))
- if len(pset) == 1:
- self.deps['global'][pkgname] = pset.pop()
- else:
- for barch in bdeps:
- self.deps[barch][pkgname] = bdeps[barch][pkgname]
- provided = set()
- for bprovided in list(bprov.values()):
- provided.update(set(bprovided))
- for prov in provided:
- prset = set()
- for barch in bprov:
- if prov not in bprov[barch]:
- continue
- prset.add(tuple(bprov[barch].get(prov, ())))
- if len(prset) == 1:
- self.provides['global'][prov] = prset.pop()
- else:
- for barch in bprov:
- self.provides[barch][prov] = bprov[barch].get(prov, ())
- self.save_state()
-
- def is_package(self, _, pkg):
- return (pkg in self.pkgnames and
- pkg not in self.blacklist and
- (len(self.whitelist) == 0 or pkg in self.whitelist))
diff --git a/src/lib/Server/Plugins/Packages/PackagesConfig.py b/src/lib/Server/Plugins/Packages/PackagesConfig.py
deleted file mode 100644
index 7950f15e6..000000000
--- a/src/lib/Server/Plugins/Packages/PackagesConfig.py
+++ /dev/null
@@ -1,15 +0,0 @@
-import Bcfg2.Server.Plugin
-
-class PackagesConfig(Bcfg2.Server.Plugin.SimpleConfig):
- _required = False
-
- def Index(self):
- """ Build local data structures """
- Bcfg2.Server.Plugin.SimpleConfig.Index(self)
-
- if hasattr(self.plugin, "sources") and self.plugin.sources.loaded:
- # only reload Packages plugin if sources have been loaded.
- # otherwise, this is getting called on server startup, and
- # we have to wait until all sources have been indexed
- # before we can call Packages.Reload()
- self.plugin.Reload()
diff --git a/src/lib/Server/Plugins/Packages/PackagesSources.py b/src/lib/Server/Plugins/Packages/PackagesSources.py
deleted file mode 100644
index da79c00e9..000000000
--- a/src/lib/Server/Plugins/Packages/PackagesSources.py
+++ /dev/null
@@ -1,97 +0,0 @@
-import os
-import sys
-import lxml.etree
-import Bcfg2.Server.Plugin
-from Bcfg2.Server.Plugins.Packages.Source import SourceInitError
-
-class PackagesSources(Bcfg2.Server.Plugin.SingleXMLFileBacked,
- Bcfg2.Server.Plugin.StructFile,
- Bcfg2.Server.Plugin.Debuggable):
- __identifier__ = None
-
- def __init__(self, filename, cachepath, fam, packages, config):
- Bcfg2.Server.Plugin.Debuggable.__init__(self)
- try:
- Bcfg2.Server.Plugin.SingleXMLFileBacked.__init__(self,
- filename,
- fam)
- except OSError:
- err = sys.exc_info()[1]
- msg = "Packages: Failed to read configuration file: %s" % err
- if not os.path.exists(self.name):
- msg += " Have you created it?"
- self.logger.error(msg)
- raise Bcfg2.Server.Plugin.PluginInitError(msg)
- Bcfg2.Server.Plugin.StructFile.__init__(self, filename)
- self.cachepath = cachepath
- self.config = config
- if not os.path.exists(self.cachepath):
- # create cache directory if needed
- try:
- os.makedirs(self.cachepath)
- except OSError:
- err = sys.exc_info()[1]
- self.logger.error("Could not create Packages cache at %s: %s" %
- (self.cachepath, err))
- self.pkg_obj = packages
- self.parsed = set()
- self.loaded = False
-
- def toggle_debug(self):
- Bcfg2.Server.Plugin.Debuggable.toggle_debug(self)
- for source in self.entries:
- source.toggle_debug()
-
- def HandleEvent(self, event=None):
- Bcfg2.Server.Plugin.SingleXMLFileBacked.HandleEvent(self, event=event)
- if event.filename != self.name:
- self.parsed.add(os.path.basename(event.filename))
-
- if sorted(list(self.parsed)) == sorted(self.extras):
- self.logger.info("Reloading Packages plugin")
- self.pkg_obj.Reload()
- self.loaded = True
-
- def Index(self):
- Bcfg2.Server.Plugin.SingleXMLFileBacked.Index(self)
- self.entries = []
- for xsource in self.xdata.findall('.//Source'):
- source = self.source_from_xml(xsource)
- if source is not None:
- self.entries.append(source)
-
- def source_from_xml(self, xsource):
- """ create a *Source object from its XML representation in
- sources.xml """
- stype = xsource.get("type")
- if stype is None:
- self.logger.error("Packages: No type specified for source, "
- "skipping")
- return None
-
- try:
- module = getattr(__import__("Bcfg2.Server.Plugins.Packages.%s" %
- stype.title()).Server.Plugins.Packages,
- stype.title())
- cls = getattr(module, "%sSource" % stype.title())
- except (ImportError, AttributeError):
- self.logger.error("Packages: Unknown source type %s" % stype)
- return None
-
- try:
- source = cls(self.cachepath, xsource, self.config)
- except SourceInitError:
- err = sys.exc_info()[1]
- self.logger.error("Packages: %s" % err)
- source = None
-
- return source
-
- def __getitem__(self, key):
- return self.entries[key]
-
- def __repr__(self):
- return "PackagesSources: %s" % repr(self.entries)
-
- def __str__(self):
- return "PackagesSources: %s" % str(self.entries)
diff --git a/src/lib/Server/Plugins/Packages/Source.py b/src/lib/Server/Plugins/Packages/Source.py
deleted file mode 100644
index 1dfeecc40..000000000
--- a/src/lib/Server/Plugins/Packages/Source.py
+++ /dev/null
@@ -1,278 +0,0 @@
-import os
-import re
-import sys
-import base64
-import Bcfg2.Server.Plugin
-from Bcfg2.Bcfg2Py3k import HTTPError, HTTPBasicAuthHandler, \
- HTTPPasswordMgrWithDefaultRealm, install_opener, build_opener, \
- urlopen, file, cPickle
-
-try:
- from hashlib import md5
-except ImportError:
- from md5 import md5
-
-def fetch_url(url):
- if '@' in url:
- mobj = re.match('(\w+://)([^:]+):([^@]+)@(.*)$', url)
- if not mobj:
- raise ValueError
- user = mobj.group(2)
- passwd = mobj.group(3)
- url = mobj.group(1) + mobj.group(4)
- auth = HTTPBasicAuthHandler(HTTPPasswordMgrWithDefaultRealm())
- auth.add_password(None, url, user, passwd)
- install_opener(build_opener(auth))
- return urlopen(url).read()
-
-
-class SourceInitError(Exception):
- pass
-
-
-class Source(Bcfg2.Server.Plugin.Debuggable):
- mrepo_re = re.compile(r'/RPMS\.([^/]+)')
- pulprepo_re = re.compile(r'pulp/repos/([^/]+)')
- genericrepo_re = re.compile(r'https?://[^/]+/(.+?)/?$')
- basegroups = []
-
- def __init__(self, basepath, xsource, config):
- Bcfg2.Server.Plugin.Debuggable.__init__(self)
- self.basepath = basepath
- self.xsource = xsource
- self.config = config
-
- try:
- self.version = xsource.find('Version').text
- except AttributeError:
- pass
-
- for key, tag in [('components', 'Component'), ('arches', 'Arch'),
- ('blacklist', 'Blacklist'),
- ('whitelist', 'Whitelist')]:
- self.__dict__[key] = [item.text for item in xsource.findall(tag)]
-
- self.gpgkeys = [el.text for el in xsource.findall("GPGKey")]
-
- self.recommended = xsource.get('recommended', 'false').lower() == 'true'
-
- self.rawurl = xsource.get('rawurl', '')
- if self.rawurl and not self.rawurl.endswith("/"):
- self.rawurl += "/"
- self.url = xsource.get('url', '')
- if self.url and not self.url.endswith("/"):
- self.url += "/"
- self.version = xsource.get('version', '')
-
- # build the set of conditions to see if this source applies to
- # a given set of metadata
- self.conditions = []
- self.groups = [] # provided for some limited backwards compat
- for el in xsource.iterancestors():
- if el.tag == "Group":
- if el.get("negate", "false").lower() == "true":
- self.conditions.append(lambda m, el=el:
- el.get("name") not in m.groups)
- else:
- self.groups.append(el.get("name"))
- self.conditions.append(lambda m, el=el:
- el.get("name") in m.groups)
- elif el.tag == "Client":
- if el.get("negate", "false").lower() == "true":
- self.conditions.append(lambda m, el=el:
- el.get("name") != m.hostname)
- else:
- self.conditions.append(lambda m, el=el:
- el.get("name") == m.hostname)
-
- self.deps = dict()
- self.provides = dict()
-
- self.cachefile = os.path.join(self.basepath,
- "cache-%s" % self.cachekey)
- self.url_map = []
-
- @property
- def cachekey(self):
- return md5(cPickle.dumps([self.version, self.components, self.url,
- self.rawurl, self.arches])).hexdigest()
-
- def get_relevant_groups(self, metadata):
- return sorted(list(set([g for g in metadata.groups
- if (g in self.basegroups or
- g in self.groups or
- g in self.arches)])))
-
- def load_state(self):
- pass
-
- def setup_data(self, force_update=False):
- should_read = True
- should_download = False
- if os.path.exists(self.cachefile):
- try:
- self.load_state()
- should_read = False
- except:
- self.logger.error("Packages: Cachefile %s load failed; "
- "falling back to file read" % self.cachefile)
- if should_read:
- try:
- self.read_files()
- except:
- self.logger.error("Packages: File read failed; "
- "falling back to file download")
- should_download = True
-
- if should_download or force_update:
- try:
- self.update()
- self.read_files()
- except:
- self.logger.error("Packages: Failed to load data for Source "
- "of %s. Some Packages will be missing." %
- self.urls)
-
- def get_repo_name(self, url_map):
- # try to find a sensible name for a repo
- if url_map['component']:
- rname = url_map['component']
- else:
- name = None
- for repo_re in (self.mrepo_re,
- self.pulprepo_re,
- self.genericrepo_re):
- match = repo_re.search(url_map['url'])
- if match:
- break
- if name is None:
- # couldn't figure out the name from the URL or URL map
- # (which probably means its a screwy URL), so we just
- # generate a random one
- name = base64.b64encode(os.urandom(16))[:-2]
- rname = "%s-%s" % (self.groups[0], name)
- # see yum/__init__.py in the yum source, lines 441-449, for
- # the source of this regex. yum doesn't like anything but
- # string.ascii_letters, string.digits, and [-_.:]. There
- # doesn't seem to be a reason for this, because yum.
- return re.sub(r'[^A-Za-z0-9-_.:]', '-', rname)
-
- def __str__(self):
- if self.rawurl:
- return "%s at %s" % (self.__class__.__name__, self.rawurl)
- elif self.url:
- return "%s at %s" % (self.__class__.__name__, self.url)
- else:
- return self.__class__.__name__
-
- def get_urls(self):
- return []
- urls = property(get_urls)
-
- def get_files(self):
- return [self.escape_url(url) for url in self.urls]
- files = property(get_files)
-
- def get_vpkgs(self, metadata):
- agroups = ['global'] + [a for a in self.arches
- if a in metadata.groups]
- vdict = dict()
- for agrp in agroups:
- for key, value in list(self.provides[agrp].items()):
- if key not in vdict:
- vdict[key] = set(value)
- else:
- vdict[key].update(value)
- return vdict
-
- def is_virtual_package(self, metadata, package):
- """ called to determine if a package is a virtual package.
- this is only invoked if the package is not listed in the dict
- returned by get_vpkgs """
- return False
-
- def escape_url(self, url):
- return os.path.join(self.basepath, url.replace('/', '@'))
-
- def file_init(self):
- pass
-
- def read_files(self):
- pass
-
- def filter_unknown(self, unknown):
- pass
-
- def update(self):
- for url in self.urls:
- self.logger.info("Packages: Updating %s" % url)
- fname = self.escape_url(url)
- try:
- data = fetch_url(url)
- file(fname, 'w').write(data)
- except ValueError:
- self.logger.error("Packages: Bad url string %s" % url)
- raise
- except HTTPError:
- err = sys.exc_info()[1]
- self.logger.error("Packages: Failed to fetch url %s. HTTP "
- "response code=%s" % (url, err.code))
- raise
-
- def applies(self, metadata):
- # check base groups
- if not self.magic_groups_match(metadata):
- return False
-
- # check Group/Client tags from sources.xml
- for condition in self.conditions:
- if not condition(metadata):
- return False
-
- return True
-
- def get_arches(self, metadata):
- return ['global'] + [a for a in self.arches if a in metadata.groups]
-
- def get_deps(self, metadata, pkgname):
- for arch in self.get_arches(metadata):
- if pkgname in self.deps[arch]:
- return self.deps[arch][pkgname]
- return []
-
- def get_provides(self, metadata, required):
- for arch in self.get_arches(metadata):
- if required in self.provides[arch]:
- return self.provides[arch][required]
- return []
-
- def is_package(self, metadata, _):
- return False
-
- def get_package(self, metadata, package):
- return package
-
- def get_group(self, metadata, group, ptype=None):
- return []
-
- def magic_groups_match(self, metadata):
- """ check to see if this source applies to the given host
- metadata by checking 'magic' (base) groups only, or if magic
- groups are off """
- # we always check that arch matches
- found_arch = False
- for arch in self.arches:
- if arch in metadata.groups:
- found_arch = True
- break
- if not found_arch:
- return False
-
- if self.config.getboolean("global", "magic_groups",
- default=True) == False:
- return True
- else:
- for group in self.basegroups:
- if group in metadata.groups:
- return True
- return False
diff --git a/src/lib/Server/Plugins/Packages/Yum.py b/src/lib/Server/Plugins/Packages/Yum.py
deleted file mode 100644
index e13b28251..000000000
--- a/src/lib/Server/Plugins/Packages/Yum.py
+++ /dev/null
@@ -1,688 +0,0 @@
-import os
-import sys
-import time
-import copy
-import glob
-import socket
-import random
-import logging
-import threading
-import lxml.etree
-from UserDict import DictMixin
-from subprocess import Popen, PIPE, STDOUT
-import Bcfg2.Server.Plugin
-from Bcfg2.Bcfg2Py3k import StringIO, cPickle, HTTPError, ConfigParser, file
-from Bcfg2.Server.Plugins.Packages.Collection import Collection
-from Bcfg2.Server.Plugins.Packages.Source import SourceInitError, Source, \
- fetch_url
-
-logger = logging.getLogger(__name__)
-
-try:
- from pulp.client.consumer.config import ConsumerConfig
- from pulp.client.api.repository import RepositoryAPI
- from pulp.client.api.consumer import ConsumerAPI
- from pulp.client.api import server
- has_pulp = True
-except ImportError:
- has_pulp = False
-
-try:
- import yum
- has_yum = True
-except ImportError:
- has_yum = False
- logger.info("Packages: No yum libraries found; forcing use of internal "
- "dependency resolver")
-
-try:
- import json
-except ImportError:
- import simplejson as json
-
-XP = '{http://linux.duke.edu/metadata/common}'
-RP = '{http://linux.duke.edu/metadata/rpm}'
-RPO = '{http://linux.duke.edu/metadata/repo}'
-FL = '{http://linux.duke.edu/metadata/filelists}'
-
-PULPSERVER = None
-PULPCONFIG = None
-
-
-def _setup_pulp(config):
- global PULPSERVER, PULPCONFIG
- if not has_pulp:
- logger.error("Packages: Cannot create Pulp collection: Pulp libraries not "
- "found")
- raise Bcfg2.Server.Plugin.PluginInitError
-
- if PULPSERVER is None:
- try:
- username = config.get("pulp", "username")
- password = config.get("pulp", "password")
- except ConfigParser.NoSectionError:
- logger.error("Packages: No [pulp] section found in Packages/packages.conf")
- raise Bcfg2.Server.Plugin.PluginInitError
- except ConfigParser.NoOptionError:
- err = sys.exc_info()[1]
- logger.error("Packages: Required option not found in "
- "Packages/packages.conf: %s" % err)
- raise Bcfg2.Server.Plugin.PluginInitError
-
- PULPCONFIG = ConsumerConfig()
- serveropts = PULPCONFIG.server
-
- PULPSERVER = server.PulpServer(serveropts['host'],
- int(serveropts['port']),
- serveropts['scheme'],
- serveropts['path'])
- PULPSERVER.set_basic_auth_credentials(username, password)
- server.set_active_server(PULPSERVER)
- return PULPSERVER
-
-
-class YumCollection(Collection):
- # options that are included in the [yum] section but that should
- # not be included in the temporary yum.conf we write out
- option_blacklist = ["use_yum_libraries", "helper"]
-
- def __init__(self, metadata, sources, basepath, debug=False):
- Collection.__init__(self, metadata, sources, basepath, debug=debug)
- self.keypath = os.path.join(self.basepath, "keys")
-
- if len(sources):
- config = sources[0].config
- self.use_yum = has_yum and config.getboolean("yum",
- "use_yum_libraries",
- default=False)
- else:
- self.use_yum = False
-
- if self.use_yum:
- self.cachefile = os.path.join(self.cachepath,
- "cache-%s" % self.cachekey)
- if not os.path.exists(self.cachefile):
- os.mkdir(self.cachefile)
-
- self.configdir = os.path.join(self.basepath, "yum")
- if not os.path.exists(self.configdir):
- os.mkdir(self.configdir)
- self.cfgfile = os.path.join(self.configdir,
- "%s-yum.conf" % self.cachekey)
- self.write_config()
-
- self.helper = self.config.get("yum", "helper",
- default="/usr/sbin/bcfg2-yum-helper")
- if has_pulp:
- _setup_pulp(self.config)
-
- def write_config(self):
- if not os.path.exists(self.cfgfile):
- yumconf = self.get_config(raw=True)
- yumconf.add_section("main")
-
- mainopts = dict(cachedir=self.cachefile,
- keepcache="0",
- sslverify="0",
- debuglevel="0",
- reposdir="/dev/null")
- try:
- for opt in self.config.options("yum"):
- if opt not in self.option_blacklist:
- mainopts[opt] = self.config.get("yum", opt)
- except ConfigParser.NoSectionError:
- pass
-
- for opt, val in list(mainopts.items()):
- yumconf.set("main", opt, val)
-
- yumconf.write(open(self.cfgfile, 'w'))
-
- def get_config(self, raw=False):
- config = ConfigParser.SafeConfigParser()
- for source in self.sources:
- # get_urls() loads url_map as a side-effect
- source.get_urls()
- for url_map in source.url_map:
- if url_map['arch'] in self.metadata.groups:
- basereponame = source.get_repo_name(url_map)
- reponame = basereponame
-
- added = False
- while not added:
- try:
- config.add_section(reponame)
- added = True
- except ConfigParser.DuplicateSectionError:
- match = re.match("-(\d)", reponame)
- if match:
- rid = int(match.group(1)) + 1
- else:
- rid = 1
- reponame = "%s-%d" % (basereponame, rid)
-
- config.set(reponame, "name", reponame)
- config.set(reponame, "baseurl", url_map['url'])
- config.set(reponame, "enabled", "1")
- if len(source.gpgkeys):
- config.set(reponame, "gpgcheck", "1")
- config.set(reponame, "gpgkey",
- " ".join(source.gpgkeys))
- else:
- config.set(reponame, "gpgcheck", "0")
-
- if len(source.blacklist):
- config.set(reponame, "exclude",
- " ".join(source.blacklist))
- if len(source.whitelist):
- config.set(reponame, "includepkgs",
- " ".join(source.whitelist))
-
- if raw:
- return config
- else:
- # configparser only writes to file, so we have to use a
- # StringIO object to get the data out as a string
- buf = StringIO()
- config.write(buf)
- return "# This config was generated automatically by the Bcfg2 " \
- "Packages plugin\n\n" + buf.getvalue()
-
- def build_extra_structures(self, independent):
- """ build list of gpg keys to be added to the specification by
- validate_structures() """
- needkeys = set()
- for source in self.sources:
- for key in source.gpgkeys:
- needkeys.add(key)
-
- if len(needkeys):
- if has_yum:
- # this must be be has_yum, not use_yum, because
- # regardless of whether the user wants to use the yum
- # resolver we want to include gpg key data
- keypkg = lxml.etree.Element('BoundPackage', name="gpg-pubkey",
- type=self.ptype, origin='Packages')
- else:
- self.logger.warning("GPGKeys were specified for yum sources in "
- "sources.xml, but no yum libraries were "
- "found")
- self.logger.warning("GPG key version/release data cannot be "
- "determined automatically")
- self.logger.warning("Install yum libraries, or manage GPG keys "
- "manually")
- keypkg = None
-
- for key in needkeys:
- # figure out the path of the key on the client
- keydir = self.config.get("global", "gpg_keypath",
- default="/etc/pki/rpm-gpg")
- remotekey = os.path.join(keydir, os.path.basename(key))
- localkey = os.path.join(self.keypath, os.path.basename(key))
- kdata = open(localkey).read()
-
- # copy the key to the client
- keypath = lxml.etree.Element("BoundPath", name=remotekey,
- encoding='ascii',
- owner='root', group='root',
- type='file', perms='0644',
- important='true')
- keypath.text = kdata
-
- # hook to add version/release info if possible
- self._add_gpg_instances(keypkg, kdata, localkey, remotekey)
- independent.append(keypath)
- if keypkg is not None:
- independent.append(keypkg)
-
- # see if there are any pulp sources to handle
- has_pulp_sources = False
- for source in self.sources:
- if source.pulp_id:
- has_pulp_sources = True
- break
-
- if has_pulp_sources:
- consumerapi = ConsumerAPI()
- consumer = self._get_pulp_consumer(consumerapi=consumerapi)
- if consumer is None:
- consumer = consumerapi.create(self.metadata.hostname,
- self.metadata.hostname)
- lxml.etree.SubElement(independent, "BoundAction",
- name="pulp-update", timing="pre",
- when="always", status="check",
- command="pulp-consumer consumer update")
-
- for source in self.sources:
- # each pulp source can only have one arch, so we don't
- # have to check the arch in url_map
- if (source.pulp_id and
- source.pulp_id not in consumer['repoids']):
- consumerapi.bind(self.metadata.hostname, source.pulp_id)
-
- crt = lxml.etree.SubElement(independent, "BoundPath",
- name="/etc/pki/consumer/cert.pem",
- type="file", owner="root",
- group="root", perms="0644")
- crt.text = consumerapi.certificate(self.metadata.hostname)
-
- def _get_pulp_consumer(self, consumerapi=None):
- if consumerapi is None:
- consumerapi = ConsumerAPI()
- consumer = None
- try:
- consumer = consumerapi.consumer(self.metadata.hostname)
- except server.ServerRequestError:
- # consumer does not exist
- pass
- except socket.error:
- err = sys.exc_info()[1]
- self.logger.error("Packages: Could not contact Pulp server: %s" %
- err)
- except:
- err = sys.exc_info()[1]
- self.logger.error("Packages: Unknown error querying Pulp server: %s"
- % err)
- return consumer
-
- def _add_gpg_instances(self, keyentry, keydata, localkey, remotekey):
- """ add gpg keys to the specification to ensure they get
- installed """
- # this must be be has_yum, not use_yum, because regardless of
- # whether the user wants to use the yum resolver we want to
- # include gpg key data
- if not has_yum:
- return
-
- try:
- kinfo = yum.misc.getgpgkeyinfo(keydata)
- version = yum.misc.keyIdToRPMVer(kinfo['keyid'])
- release = yum.misc.keyIdToRPMVer(kinfo['timestamp'])
-
- lxml.etree.SubElement(keyentry, 'Instance',
- version=version,
- release=release,
- simplefile=remotekey)
- except ValueError:
- err = sys.exc_info()[1]
- self.logger.error("Packages: Could not read GPG key %s: %s" %
- (localkey, err))
-
- def is_package(self, package):
- if not self.use_yum:
- return Collection.is_package(self, package)
- elif isinstance(package, tuple):
- if package[1] is None and package[2] == (None, None, None):
- package = package[0]
- else:
- return None
- else:
- # this should really never get called; it's just provided
- # for API completeness
- return self.call_helper("is_package", package)
-
- def is_virtual_package(self, package):
- if not self.use_yum:
- return Collection.is_virtual_package(self, package)
- else:
- # this should really never get called; it's just provided
- # for API completeness
- return self.call_helper("is_virtual_package", package)
-
- def get_deps(self, package):
- if not self.use_yum:
- return Collection.get_deps(self, package)
- else:
- # this should really never get called; it's just provided
- # for API completeness
- return self.call_helper("get_deps", package)
-
- def get_provides(self, required, all=False, silent=False):
- if not self.use_yum:
- return Collection.get_provides(self, package)
- else:
- # this should really never get called; it's just provided
- # for API completeness
- return self.call_helper("get_provides", package)
-
- def get_group(self, group, ptype="default"):
- if not self.use_yum:
- self.logger.warning("Packages: Package groups are not supported by "
- "Bcfg2's internal Yum dependency generator")
- return []
-
- if group.startswith("@"):
- group = group[1:]
-
- pkgs = self.call_helper("get_group", dict(group=group, type=ptype))
- return pkgs
-
- def complete(self, packagelist):
- if not self.use_yum:
- return Collection.complete(self, packagelist)
-
- packages = set()
- unknown = set(packagelist)
-
- if unknown:
- result = \
- self.call_helper("complete",
- dict(packages=list(unknown),
- groups=list(self.get_relevant_groups())))
- if result and "packages" in result and "unknown" in result:
- # we stringify every package because it gets returned
- # in unicode; set.update() doesn't work if some
- # elements are unicode and other are strings. (I.e.,
- # u'foo' and 'foo' get treated as unique elements.)
- packages.update([str(p) for p in result['packages']])
- unknown = set([str(p) for p in result['unknown']])
-
- self.filter_unknown(unknown)
-
- return packages, unknown
-
- def call_helper(self, command, input=None):
- """ Make a call to bcfg2-yum-helper. The yum libs have
- horrific memory leaks, so apparently the right way to get
- around that in long-running processes it to have a short-lived
- helper. No, seriously -- check out the yum-updatesd code.
- It's pure madness. """
- # it'd be nice if we could change this to be more verbose if
- # -v was given to bcfg2-server, but Collection objects don't
- # get the 'setup' variable, so we don't know how verbose
- # bcfg2-server is. It'd also be nice if we could tell yum to
- # log to syslog. So would a unicorn.
- cmd = [self.helper, "-c", self.cfgfile]
- if self.debug_flag:
- cmd.append("-v")
- cmd.append(command)
- self.debug_log("Packages: running %s" % " ".join(cmd))
- try:
- helper = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
- except OSError:
- err = sys.exc_info()[1]
- self.logger.error("Packages: Failed to execute %s: %s" %
- (" ".join(cmd), err))
- return None
-
- if input:
- idata = json.dumps(input)
- (stdout, stderr) = helper.communicate(idata)
- else:
- (stdout, stderr) = helper.communicate()
- rv = helper.wait()
- if rv:
- self.logger.error("Packages: error running bcfg2-yum-helper "
- "(returned %d): %s" % (rv, stderr))
- elif self.debug_flag:
- self.debug_log("Packages: debug info from bcfg2-yum-helper: %s" %
- stderr)
- try:
- return json.loads(stdout)
- except ValueError:
- err = sys.exc_info()[1]
- self.logger.error("Packages: error reading bcfg2-yum-helper "
- "output: %s" % err)
- return None
-
- def setup_data(self, force_update=False):
- if not self.use_yum:
- return Collection.setup_data(self, force_update)
-
- if force_update:
- # we call this twice: one to clean up data from the old
- # config, and once to clean up data from the new config
- self.call_helper("clean")
-
- os.unlink(self.cfgfile)
- self.write_config()
-
- if force_update:
- self.call_helper("clean")
-
-
-class YumSource(Source):
- basegroups = ['yum', 'redhat', 'centos', 'fedora']
- ptype = 'yum'
-
- def __init__(self, basepath, xsource, config):
- Source.__init__(self, basepath, xsource, config)
- self.pulp_id = None
- if has_pulp and xsource.get("pulp_id"):
- self.pulp_id = xsource.get("pulp_id")
-
- _setup_pulp(self.config)
- repoapi = RepositoryAPI()
- try:
- self.repo = repoapi.repository(self.pulp_id)
- self.gpgkeys = [os.path.join(PULPCONFIG.cds['keyurl'], key)
- for key in repoapi.listkeys(self.pulp_id)]
- except server.ServerRequestError:
- err = sys.exc_info()[1]
- if err[0] == 401:
- msg = "Packages: Error authenticating to Pulp: %s" % err[1]
- elif err[0] == 404:
- msg = "Packages: Pulp repo id %s not found: %s" % \
- (self.pulp_id, err[1])
- else:
- msg = "Packages: Error %d fetching pulp repo %s: %s" % \
- (err[0], self.pulp_id, err[1])
- raise SourceInitError(msg)
- except socket.error:
- err = sys.exc_info()[1]
- raise SourceInitError("Could not contact Pulp server: %s" % err)
- except:
- err = sys.exc_info()[1]
- raise SourceInitError("Unknown error querying Pulp server: %s" %
- err)
- self.rawurl = "%s/%s" % (PULPCONFIG.cds['baseurl'],
- self.repo['relative_path'])
- self.arches = [self.repo['arch']]
-
- if not self.rawurl:
- self.baseurl = self.url + "%(version)s/%(component)s/%(arch)s/"
- else:
- self.baseurl = self.rawurl
- self.packages = dict()
- self.deps = dict([('global', dict())])
- self.provides = dict([('global', dict())])
- self.filemap = dict([(x, dict())
- for x in ['global'] + self.arches])
- self.needed_paths = set()
- self.file_to_arch = dict()
-
- self.use_yum = has_yum and config.getboolean("yum", "use_yum_libraries",
- default=False)
-
- def save_state(self):
- if not self.use_yum:
- cache = file(self.cachefile, 'wb')
- cPickle.dump((self.packages, self.deps, self.provides,
- self.filemap, self.url_map), cache, 2)
- cache.close()
-
-
- def load_state(self):
- if not self.use_yum:
- data = file(self.cachefile)
- (self.packages, self.deps, self.provides,
- self.filemap, self.url_map) = cPickle.load(data)
-
- def get_urls(self):
- surls = list()
- self.url_map = []
- for arch in self.arches:
- if self.url:
- usettings = [{'version':self.version, 'component':comp,
- 'arch':arch}
- for comp in self.components]
- else: # rawurl given
- usettings = [{'version':self.version, 'component':None,
- 'arch':arch}]
-
- for setting in usettings:
- setting['url'] = self.baseurl % setting
- self.url_map.append(copy.deepcopy(setting))
- surls.append((arch, [setting['url'] for setting in usettings]))
- urls = []
- for (sarch, surl_list) in surls:
- for surl in surl_list:
- urls.extend(self._get_urls_from_repodata(surl, sarch))
- return urls
- urls = property(get_urls)
-
- def _get_urls_from_repodata(self, url, arch):
- if self.use_yum:
- return [url]
-
- rmdurl = '%srepodata/repomd.xml' % url
- try:
- repomd = fetch_url(rmdurl)
- xdata = lxml.etree.XML(repomd)
- except ValueError:
- self.logger.error("Packages: Bad url string %s" % rmdurl)
- return []
- except HTTPError:
- err = sys.exc_info()[1]
- self.logger.error("Packages: Failed to fetch url %s. code=%s" %
- (rmdurl, err.code))
- return []
- except lxml.etree.XMLSyntaxError:
- err = sys.exc_info()[1]
- self.logger.error("Packages: Failed to process metadata at %s: %s" %
- (rmdurl, err))
- return []
-
- urls = []
- for elt in xdata.findall(RPO + 'data'):
- if elt.get('type') in ['filelists', 'primary']:
- floc = elt.find(RPO + 'location')
- fullurl = url + floc.get('href')
- urls.append(fullurl)
- self.file_to_arch[self.escape_url(fullurl)] = arch
- return urls
-
- def read_files(self):
- # we have to read primary.xml first, and filelists.xml afterwards;
- primaries = list()
- filelists = list()
- for fname in self.files:
- if fname.endswith('primary.xml.gz'):
- primaries.append(fname)
- elif fname.endswith('filelists.xml.gz'):
- filelists.append(fname)
-
- for fname in primaries:
- farch = self.file_to_arch[fname]
- fdata = lxml.etree.parse(fname).getroot()
- self.parse_primary(fdata, farch)
- for fname in filelists:
- farch = self.file_to_arch[fname]
- fdata = lxml.etree.parse(fname).getroot()
- self.parse_filelist(fdata, farch)
-
- # merge data
- sdata = list(self.packages.values())
- try:
- self.packages['global'] = copy.deepcopy(sdata.pop())
- except IndexError:
- logger.error("Packages: No packages in repo")
- while sdata:
- self.packages['global'] = \
- self.packages['global'].intersection(sdata.pop())
-
- for key in self.packages:
- if key == 'global':
- continue
- self.packages[key] = \
- self.packages[key].difference(self.packages['global'])
- self.save_state()
-
- def parse_filelist(self, data, arch):
- if arch not in self.filemap:
- self.filemap[arch] = dict()
- for pkg in data.findall(FL + 'package'):
- for fentry in pkg.findall(FL + 'file'):
- if fentry.text in self.needed_paths:
- if fentry.text in self.filemap[arch]:
- self.filemap[arch][fentry.text].add(pkg.get('name'))
- else:
- self.filemap[arch][fentry.text] = \
- set([pkg.get('name')])
-
- def parse_primary(self, data, arch):
- if arch not in self.packages:
- self.packages[arch] = set()
- if arch not in self.deps:
- self.deps[arch] = dict()
- if arch not in self.provides:
- self.provides[arch] = dict()
- for pkg in data.getchildren():
- if not pkg.tag.endswith('package'):
- continue
- pkgname = pkg.find(XP + 'name').text
- self.packages[arch].add(pkgname)
-
- pdata = pkg.find(XP + 'format')
- self.deps[arch][pkgname] = set()
- pre = pdata.find(RP + 'requires')
- if pre is not None:
- for entry in pre.getchildren():
- self.deps[arch][pkgname].add(entry.get('name'))
- if entry.get('name').startswith('/'):
- self.needed_paths.add(entry.get('name'))
- pro = pdata.find(RP + 'provides')
- if pro != None:
- for entry in pro.getchildren():
- prov = entry.get('name')
- if prov not in self.provides[arch]:
- self.provides[arch][prov] = list()
- self.provides[arch][prov].append(pkgname)
-
- def is_package(self, metadata, item):
- arch = [a for a in self.arches if a in metadata.groups]
- if not arch:
- return False
- return ((item in self.packages['global'] or
- item in self.packages[arch[0]]) and
- item not in self.blacklist and
- (len(self.whitelist) == 0 or item in self.whitelist))
-
- def get_vpkgs(self, metadata):
- if self.use_yum:
- return dict()
-
- rv = Source.get_vpkgs(self, metadata)
- for arch, fmdata in list(self.filemap.items()):
- if arch not in metadata.groups and arch != 'global':
- continue
- for filename, pkgs in list(fmdata.items()):
- rv[filename] = pkgs
- return rv
-
- def filter_unknown(self, unknown):
- if self.use_yum:
- filtered = set()
- for unk in unknown:
- try:
- if unk.startswith('rpmlib'):
- filtered.update(unk)
- except AttributeError:
- try:
- if unk[0].startswith('rpmlib'):
- filtered.update(unk)
- except (IndexError, AttributeError):
- pass
- else:
- filtered = set([u for u in unknown if u.startswith('rpmlib')])
- unknown.difference_update(filtered)
-
- def setup_data(self, force_update=False):
- if not self.use_yum:
- Source.setup_data(self, force_update=force_update)
-
- def get_repo_name(self, url_map):
- if self.pulp_id:
- return self.pulp_id
- else:
- return Source.get_repo_name(self, url_map)
diff --git a/src/lib/Server/Plugins/Packages/__init__.py b/src/lib/Server/Plugins/Packages/__init__.py
deleted file mode 100644
index da5832e90..000000000
--- a/src/lib/Server/Plugins/Packages/__init__.py
+++ /dev/null
@@ -1,263 +0,0 @@
-import os
-import sys
-import time
-import copy
-import glob
-import shutil
-import lxml.etree
-import Bcfg2.Logger
-import Bcfg2.Server.Plugin
-from Bcfg2.Bcfg2Py3k import ConfigParser, urlopen
-from Bcfg2.Server.Plugins.Packages import Collection
-from Bcfg2.Server.Plugins.Packages.PackagesSources import PackagesSources
-from Bcfg2.Server.Plugins.Packages.PackagesConfig import PackagesConfig
-
-class Packages(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.StructureValidator,
- Bcfg2.Server.Plugin.Generator,
- Bcfg2.Server.Plugin.Connector):
- name = 'Packages'
- conflicts = ['Pkgmgr']
- experimental = True
- __rmi__ = Bcfg2.Server.Plugin.Plugin.__rmi__ + ['Refresh', 'Reload']
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.StructureValidator.__init__(self)
- Bcfg2.Server.Plugin.Generator.__init__(self)
- Bcfg2.Server.Plugin.Connector.__init__(self)
- Bcfg2.Server.Plugin.Probing.__init__(self)
-
- self.sentinels = set()
- self.cachepath = os.path.join(self.data, 'cache')
- self.keypath = os.path.join(self.data, 'keys')
- if not os.path.exists(self.keypath):
- # create key directory if needed
- os.makedirs(self.keypath)
-
- # set up config files
- self.config = PackagesConfig(self)
- self.sources = PackagesSources(os.path.join(self.data, "sources.xml"),
- self.cachepath, core.fam, self,
- self.config)
-
- def toggle_debug(self):
- Bcfg2.Server.Plugin.Plugin.toggle_debug(self)
- self.sources.toggle_debug()
-
- @property
- def disableResolver(self):
- try:
- return not self.config.getboolean("global", "resolver")
- except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
- return False
- except ValueError:
- # for historical reasons we also accept "enabled" and
- # "disabled", which are not handled according to the
- # Python docs but appear to be handled properly by
- # ConfigParser in at least some versions
- return self.config.get("global", "resolver",
- default="enabled").lower() == "disabled"
-
- @property
- def disableMetaData(self):
- try:
- return not self.config.getboolean("global", "resolver")
- except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
- return False
- except ValueError:
- # for historical reasons we also accept "enabled" and
- # "disabled"
- return self.config.get("global", "metadata",
- default="enabled").lower() == "disabled"
-
- def create_config(self, entry, metadata):
- """ create yum/apt config for the specified host """
- attrib = {'encoding': 'ascii',
- 'owner': 'root',
- 'group': 'root',
- 'type': 'file',
- 'perms': '0644'}
-
- collection = self._get_collection(metadata)
- entry.text = collection.get_config()
- for (key, value) in list(attrib.items()):
- entry.attrib.__setitem__(key, value)
-
- def HandleEntry(self, entry, metadata):
- if entry.tag == 'Package':
- collection = self._get_collection(metadata)
- entry.set('version', 'auto')
- entry.set('type', collection.ptype)
- elif entry.tag == 'Path':
- if (entry.get("name") == self.config.get("global", "yum_config",
- default="") or
- entry.get("name") == self.config.get("global", "apt_config",
- default="")):
- self.create_config(entry, metadata)
-
- def HandlesEntry(self, entry, metadata):
- if entry.tag == 'Package':
- if self.config.getboolean("global", "magic_groups",
- default=True) == True:
- collection = self._get_collection(metadata)
- if collection.magic_groups_match():
- return True
- else:
- return True
- elif entry.tag == 'Path':
- # managed entries for yum/apt configs
- if (entry.get("name") == self.config.get("global", "yum_config",
- default="") or
- entry.get("name") == self.config.get("global", "apt_config",
- default="")):
- return True
- return False
-
- def validate_structures(self, metadata, structures):
- '''Ensure client configurations include all needed prerequisites
-
- Arguments:
- metadata - client metadata instance
- structures - a list of structure-stage entry combinations
- '''
- collection = self._get_collection(metadata)
- indep = lxml.etree.Element('Independent')
- self._build_packages(metadata, indep, structures,
- collection=collection)
- collection.build_extra_structures(indep)
- structures.append(indep)
-
- def _build_packages(self, metadata, independent, structures,
- collection=None):
- """ build list of packages that need to be included in the
- specification by validate_structures() """
- if self.disableResolver:
- # Config requests no resolver
- return
-
- if collection is None:
- collection = self._get_collection(metadata)
- # initial is the set of packages that are explicitly specified
- # in the configuration
- initial = set()
- # base is the set of initial packages with groups expanded
- base = set()
- to_remove = []
- for struct in structures:
- for pkg in struct.xpath('//Package | //BoundPackage'):
- if pkg.get("name"):
- initial.add(pkg.get("name"))
- elif pkg.get("group"):
- try:
- if pkg.get("type"):
- gpkgs = collection.get_group(pkg.get("group"),
- ptype=pkg.get("type"))
- else:
- gpkgs = collection.get_group(pkg.get("group"))
- base.update(gpkgs)
- except TypeError:
- raise
- self.logger.error("Could not resolve group %s" %
- pkg.get("group"))
- to_remove.append(pkg)
- else:
- self.logger.error("Packages: Malformed Package: %s" %
- lxml.etree.tostring(pkg))
- base.update(initial)
- for el in to_remove:
- el.getparent().remove(el)
-
- packages, unknown = collection.complete(base)
- if unknown:
- self.logger.info("Packages: Got %d unknown entries" % len(unknown))
- self.logger.info("Packages: %s" % list(unknown))
- newpkgs = list(packages.difference(initial))
- self.debug_log("Packages: %d initial, %d complete, %d new" %
- (len(initial), len(packages), len(newpkgs)))
- newpkgs.sort()
- for pkg in newpkgs:
- lxml.etree.SubElement(independent, 'BoundPackage', name=pkg,
- version='auto', type=collection.ptype,
- origin='Packages')
-
- def Refresh(self):
- '''Packages.Refresh() => True|False\nReload configuration
- specification and download sources\n'''
- self._load_config(force_update=True)
- return True
-
- def Reload(self):
- '''Packages.Refresh() => True|False\nReload configuration
- specification and sources\n'''
- self._load_config()
- return True
-
- def _load_config(self, force_update=False):
- '''
- Load the configuration data and setup sources
-
- Keyword args:
- force_update Force downloading repo data
- '''
- self._load_sources(force_update)
- self._load_gpg_keys(force_update)
-
- def _load_sources(self, force_update):
- """ Load sources from the config """
- self.sentinels = set()
- cachefiles = set()
-
- for collection in list(Collection.collections.values()):
- cachefiles.update(collection.cachefiles)
- if not self.disableMetaData:
- collection.setup_data(force_update)
- self.sentinels.update(collection.basegroups)
-
- Collection.clear_cache()
-
- for source in self.sources:
- cachefiles.add(source.cachefile)
- if not self.disableMetaData:
- source.setup_data(force_update)
-
- for cfile in glob.glob(os.path.join(self.cachepath, "cache-*")):
- if cfile not in cachefiles:
- try:
- if os.path.isdir(cfile):
- shutil.rmtree(cfile)
- else:
- os.unlink(cfile)
- except OSError:
- err = sys.exc_info()[1]
- self.logger.error("Packages: Could not remove cache file "
- "%s: %s" % (cfile, err))
-
- def _load_gpg_keys(self, force_update):
- """ Load gpg keys from the config """
- keyfiles = []
- keys = []
- for source in self.sources:
- for key in source.gpgkeys:
- localfile = os.path.join(self.keypath,
- os.path.basename(key.rstrip("/")))
- if localfile not in keyfiles:
- keyfiles.append(localfile)
- if ((force_update and key not in keys) or
- not os.path.exists(localfile)):
- self.logger.info("Packages: Downloading and parsing %s" % key)
- response = urlopen(key)
- open(localfile, 'w').write(response.read())
- keys.append(key)
-
- for kfile in glob.glob(os.path.join(self.keypath, "*")):
- if kfile not in keyfiles:
- os.unlink(kfile)
-
- def _get_collection(self, metadata):
- return Collection.factory(metadata, self.sources, self.data,
- debug=self.debug_flag)
-
- def get_additional_data(self, metadata):
- collection = self._get_collection(metadata)
- return dict(sources=collection.get_additional_data())