summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/lib/Bcfg2Py3k.py5
-rw-r--r--src/lib/Client/Frame.py37
-rw-r--r--src/lib/Client/Tools/APT.py1
-rw-r--r--src/lib/Client/Tools/DebInit.py4
-rw-r--r--src/lib/Client/Tools/POSIX.py14
-rw-r--r--src/lib/Client/Tools/YUMng.py15
-rw-r--r--src/lib/Client/Tools/__init__.py3
-rw-r--r--src/lib/Options.py9
-rw-r--r--src/lib/SSLServer.py29
-rw-r--r--src/lib/Server/Admin/Init.py137
-rw-r--r--src/lib/Server/Admin/__init__.py5
-rw-r--r--src/lib/Server/Core.py2
-rw-r--r--src/lib/Server/Lint/Bundles.py6
-rw-r--r--src/lib/Server/Lint/Comments.py2
-rw-r--r--src/lib/Server/Lint/MergeFiles.py9
-rw-r--r--src/lib/Server/Lint/RequiredAttrs.py123
-rw-r--r--src/lib/Server/Lint/Validate.py18
-rw-r--r--src/lib/Server/Lint/__init__.py2
-rw-r--r--src/lib/Server/Plugin.py49
-rw-r--r--src/lib/Server/Plugins/Cfg.py2
-rw-r--r--src/lib/Server/Plugins/Defaults.py51
-rw-r--r--src/lib/Server/Plugins/GroupPatterns.py21
-rw-r--r--src/lib/Server/Plugins/Packages.py1320
-rw-r--r--src/lib/Server/Plugins/Packages/Apt.py142
-rw-r--r--src/lib/Server/Plugins/Packages/Collection.py336
-rw-r--r--src/lib/Server/Plugins/Packages/Pac.py122
-rw-r--r--src/lib/Server/Plugins/Packages/PackagesConfig.py28
-rw-r--r--src/lib/Server/Plugins/Packages/PackagesSources.py66
-rw-r--r--src/lib/Server/Plugins/Packages/Source.py262
-rw-r--r--src/lib/Server/Plugins/Packages/Yum.py950
-rw-r--r--src/lib/Server/Plugins/Packages/__init__.py226
-rw-r--r--src/lib/Server/Plugins/Pkgmgr.py5
-rw-r--r--src/lib/Server/Plugins/SGenshi.py26
-rw-r--r--src/lib/Server/Reports/settings.py14
-rwxr-xr-xsrc/sbin/bcfg2-admin2
35 files changed, 2481 insertions, 1562 deletions
diff --git a/src/lib/Bcfg2Py3k.py b/src/lib/Bcfg2Py3k.py
index c9e48a49b..606379d1f 100644
--- a/src/lib/Bcfg2Py3k.py
+++ b/src/lib/Bcfg2Py3k.py
@@ -79,3 +79,8 @@ if sys.hexversion >= 0x03000000:
else:
def fprint(s, f):
print >> f, s
+
+if sys.hexversion >= 0x03000000:
+ from io import FileIO as file
+else:
+ file = file
diff --git a/src/lib/Client/Frame.py b/src/lib/Client/Frame.py
index dec3b42c2..eca8960c1 100644
--- a/src/lib/Client/Frame.py
+++ b/src/lib/Client/Frame.py
@@ -172,23 +172,23 @@ class Frame:
# Need to process decision stuff early so that dryrun mode works with it
self.whitelist = [entry for entry in self.states \
if not self.states[entry]]
- if self.setup['decision'] == 'whitelist':
- dwl = self.setup['decision_list']
- w_to_rem = [e for e in self.whitelist \
- if not matches_white_list(e, dwl)]
- if w_to_rem:
- self.logger.info("In whitelist mode: suppressing installation of:")
- self.logger.info(["%s:%s" % (e.tag, e.get('name')) for e in w_to_rem])
- self.whitelist = [x for x in self.whitelist \
- if x not in w_to_rem]
-
- elif self.setup['decision'] == 'blacklist':
- b_to_rem = [e for e in self.whitelist \
- if not passes_black_list(e, self.setup['decision_list'])]
- if b_to_rem:
- self.logger.info("In blacklist mode: suppressing installation of:")
- self.logger.info(["%s:%s" % (e.tag, e.get('name')) for e in b_to_rem])
- self.whitelist = [x for x in self.whitelist if x not in b_to_rem]
+ if not self.setup['file']:
+ if self.setup['decision'] == 'whitelist':
+ dwl = self.setup['decision_list']
+ w_to_rem = [e for e in self.whitelist \
+ if not matches_white_list(e, dwl)]
+ if w_to_rem:
+ self.logger.info("In whitelist mode: suppressing installation of:")
+ self.logger.info(["%s:%s" % (e.tag, e.get('name')) for e in w_to_rem])
+ self.whitelist = [x for x in self.whitelist \
+ if x not in w_to_rem]
+ elif self.setup['decision'] == 'blacklist':
+ b_to_rem = [e for e in self.whitelist \
+ if not passes_black_list(e, self.setup['decision_list'])]
+ if b_to_rem:
+ self.logger.info("In blacklist mode: suppressing installation of:")
+ self.logger.info(["%s:%s" % (e.tag, e.get('name')) for e in b_to_rem])
+ self.whitelist = [x for x in self.whitelist if x not in b_to_rem]
# take care of important entries first
if not self.dryrun and not self.setup['bundle']:
@@ -206,7 +206,8 @@ class Frame:
continue
try:
self.states[cfile] = tl[0].InstallPath(cfile)
- tl[0].modified.append(cfile)
+ if self.states[cfile]:
+ tl[0].modified.append(cfile)
except:
self.logger.error("Unexpected tool failure",
exc_info=1)
diff --git a/src/lib/Client/Tools/APT.py b/src/lib/Client/Tools/APT.py
index d268fe9f4..338ec98fd 100644
--- a/src/lib/Client/Tools/APT.py
+++ b/src/lib/Client/Tools/APT.py
@@ -8,6 +8,7 @@ warnings.filterwarnings("ignore", "apt API not stable yet",
warnings.filterwarnings("ignore", "Accessed deprecated property Package.installedVersion, please see the Version class for alternatives.", DeprecationWarning)
warnings.filterwarnings("ignore", "Accessed deprecated property Package.candidateVersion, please see the Version class for alternatives.", DeprecationWarning)
warnings.filterwarnings("ignore", "Deprecated, please use 'is_installed' instead", DeprecationWarning)
+warnings.filterwarnings("ignore", "Deprecated, please use 'mark_delete()' instead", DeprecationWarning)
warnings.filterwarnings("ignore", "Attribute 'IsUpgradable' of the 'apt_pkg.DepCache' object is deprecated, use 'is_upgradable' instead.", DeprecationWarning)
warnings.filterwarnings("ignore", "Attribute 'VersionList' of the 'apt_pkg.Package' object is deprecated, use 'version_list' instead.", DeprecationWarning)
warnings.filterwarnings("ignore", "Attribute 'VerStr' of the 'apt_pkg.Version' object is deprecated, use 'ver_str' instead.", DeprecationWarning)
diff --git a/src/lib/Client/Tools/DebInit.py b/src/lib/Client/Tools/DebInit.py
index d6ce16c52..022332602 100644
--- a/src/lib/Client/Tools/DebInit.py
+++ b/src/lib/Client/Tools/DebInit.py
@@ -35,10 +35,12 @@ class DebInit(Bcfg2.Client.Tools.SvcTool):
if entry.get('sequence'):
if (deb_version in DEBIAN_OLD_STYLE_BOOT_SEQUENCE or
- deb_version.startswith('5')):
+ deb_version.startswith('5') or
+ os.path.exists('/etc/init.d/.legacy-bootordering')):
start_sequence = int(entry.get('sequence'))
kill_sequence = 100 - start_sequence
else:
+ start_sequence = None
self.logger.warning("Your debian version boot sequence is "
"dependency based \"sequence\" attribute "
"will be ignored.")
diff --git a/src/lib/Client/Tools/POSIX.py b/src/lib/Client/Tools/POSIX.py
index a7a0c4f63..372d4d9e4 100644
--- a/src/lib/Client/Tools/POSIX.py
+++ b/src/lib/Client/Tools/POSIX.py
@@ -115,13 +115,6 @@ class POSIX(Bcfg2.Client.Tools.Tool):
setup.parse([])
ppath = setup['ppath']
max_copies = setup['max_copies']
- """
- Python uses the OS mknod(2) implementation which modifies the mode
- based on the umask of the running process (at least on some Linuxes
- that were tested). We set this to zero so that POSIX-related paths
- will be created as specified in the Bcfg2 configuration.
- """
- os.umask(0)
def canInstall(self, entry):
"""Check if entry is complete for installation."""
@@ -257,6 +250,13 @@ class POSIX(Bcfg2.Client.Tools.Tool):
os.mknod(entry.get('name'), mode, device)
else:
os.mknod(entry.get('name'), mode)
+ """
+ Python uses the OS mknod(2) implementation which modifies the
+ mode based on the umask of the running process. Therefore, the
+ following chmod(2) call is needed to make sure the permissions
+ are set as specified by the user.
+ """
+ os.chmod(entry.get('name'), mode)
os.chown(entry.get('name'), normUid(entry), normGid(entry))
return True
except KeyError:
diff --git a/src/lib/Client/Tools/YUMng.py b/src/lib/Client/Tools/YUMng.py
index 24605ca44..04174b3a1 100644
--- a/src/lib/Client/Tools/YUMng.py
+++ b/src/lib/Client/Tools/YUMng.py
@@ -147,6 +147,14 @@ class YUMng(Bcfg2.Client.Tools.PkgTool):
def __init__(self, logger, setup, config):
self.yb = yum.YumBase()
+
+ if setup['debug']:
+ self.yb.preconf.debuglevel = 3
+ elif setup['verbose']:
+ self.yb.preconf.debuglevel = 2
+ else:
+ self.yb.preconf.debuglevel = 1
+
Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config)
self.ignores = [entry.get('name') for struct in config \
for entry in struct \
@@ -488,9 +496,10 @@ class YUMng(Bcfg2.Client.Tools.PkgTool):
package_fail = True
stat['version_fail'] = True
# Just chose the first pkg for the error message
- self.logger.info(" Wrong version installed. "\
- "Want %s, but have %s" % (nevraString(nevra),
- nevraString(POs[0])))
+ self.logger.info(" %s: Wrong version installed. "
+ "Want %s, but have %s" % (entry.get("name"),
+ nevraString(nevra),
+ nevraString(POs[0])))
qtext_versions.append("U(%s)" % str(POs[0]))
continue
diff --git a/src/lib/Client/Tools/__init__.py b/src/lib/Client/Tools/__init__.py
index 88609c2f6..9d0c69892 100644
--- a/src/lib/Client/Tools/__init__.py
+++ b/src/lib/Client/Tools/__init__.py
@@ -110,7 +110,8 @@ class Tool:
try:
func = getattr(self, "Install%s" % (entry.tag))
states[entry] = func(entry)
- self.modified.append(entry)
+ if states[entry]:
+ self.modified.append(entry)
except:
self.logger.error("Unexpected failure of install method for entry type %s" \
% (entry.tag), exc_info=1)
diff --git a/src/lib/Options.py b/src/lib/Options.py
index 6b3110107..fcd9107a9 100644
--- a/src/lib/Options.py
+++ b/src/lib/Options.py
@@ -201,7 +201,8 @@ INSTALL_PREFIX = Option('Installation location', cf=('server', 'prefix'),
default=DEFAULT_INSTALL_PREFIX, odesc='</path>')
SENDMAIL_PATH = Option('Path to sendmail', cf=('reports', 'sendmailpath'),
default='/usr/lib/sendmail')
-INTERACTIVE = Option('Prompt the user for each change', default=False,
+INTERACTIVE = Option('Run interactively, prompting the user for each change',
+ default=False,
cmd='-I', )
ENCODING = Option('Encoding of cfg files',
default='UTF-8',
@@ -253,7 +254,6 @@ SERVER_REPOSITORY = Option('Server repository path', '/var/lib/bcfg2',
SERVER_PLUGINS = Option('Server plugin list', cf=('server', 'plugins'),
# default server plugins
default=[
- 'Base',
'Bundler',
'Cfg',
'Metadata',
@@ -313,7 +313,10 @@ CLIENT_DRYRUN = Option('Do not actually change the system',
CLIENT_EXTRA_DISPLAY = Option('enable extra entry output',
default=False, cmd='-e', )
CLIENT_PARANOID = Option('Make automatic backups of config files',
- default=False, cmd='-P', cf=('client', 'paranoid'))
+ default=False,
+ cmd='-P',
+ cook=get_bool,
+ cf=('client', 'paranoid'))
CLIENT_DRIVERS = Option('Specify tool driver set', cmd='-D',
cf=('client', 'drivers'),
odesc="<driver1,driver2>", cook=list_split,
diff --git a/src/lib/SSLServer.py b/src/lib/SSLServer.py
index 21bf48d3e..6d053b802 100644
--- a/src/lib/SSLServer.py
+++ b/src/lib/SSLServer.py
@@ -47,11 +47,10 @@ class XMLRPCDispatcher (SimpleXMLRPCServer.SimpleXMLRPCDispatcher):
params = (address, ) + params
response = self.instance._dispatch(method, params, self.funcs)
# py3k compatibility
- if isinstance(response, bool) or isinstance(response, str) \
- or isinstance(response, list):
- response = (response, )
- else:
+ if type(response) not in [bool, str, list, dict]:
response = (response.decode('utf-8'), )
+ else:
+ response = (response, )
raw_response = xmlrpclib.dumps(response, methodresponse=1,
allow_none=self.allow_none,
encoding=self.encoding)
@@ -289,21 +288,27 @@ class XMLRPCRequestHandler (SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
except:
(type, msg) = sys.exc_info()[:2]
if str(type) == 'socket.error' and msg[0] == 32:
- self.logger.warning("Connection dropped from %s" % self.client_address[0])
+ self.logger.warning("Connection dropped from %s" %
+ self.client_address[0])
elif str(type) == 'socket.error' and msg[0] == 104:
- self.logger.warning("Connection reset by peer: %s" % self.client_address[0])
+ self.logger.warning("Connection reset by peer: %s" %
+ self.client_address[0])
elif str(type) == 'ssl.SSLError':
- self.logger.warning("SSLError handling client %s: %s" % \
- (self.client_address[0], msg))
+ self.logger.warning("SSLError handling client %s: %s" %
+ (self.client_address[0], msg))
else:
- self.logger.error("Error sending response (%s): %s" % \
- (type, msg))
+ self.logger.error("Error sending response (%s): %s" %
+ (type, msg))
def finish(self):
# shut down the connection
if not self.wfile.closed:
- self.wfile.flush()
- self.wfile.close()
+ try:
+ self.wfile.flush()
+ self.wfile.close()
+ except socket.error:
+ err = sys.exc_info()[1]
+ self.logger.warning("Error closing connection: %s" % err)
self.rfile.close()
diff --git a/src/lib/Server/Admin/Init.py b/src/lib/Server/Admin/Init.py
index 9a6ad9de9..aba6bbd32 100644
--- a/src/lib/Server/Admin/Init.py
+++ b/src/lib/Server/Admin/Init.py
@@ -36,9 +36,9 @@ web_debug = True
[communication]
protocol = %s
password = %s
-certificate = %s/%s
-key = %s/%s
-ca = %s/%s
+certificate = %s
+key = %s
+ca = %s
[components]
bcfg2 = %s
@@ -103,12 +103,15 @@ plugin_list = ['Account',
'TGenshi']
# Default list of plugins to use
-default_plugins = ['Bundler',
- 'Cfg',
- 'Metadata',
- 'Pkgmgr',
- 'Rules',
- 'SSHbase']
+default_plugins = Bcfg2.Options.SERVER_PLUGINS.default
+
+
+def get_input(prompt):
+ """py3k compatible function to get input"""
+ try:
+ return raw_input(prompt)
+ except NameError:
+ return input(prompt)
def gen_password(length):
@@ -144,12 +147,7 @@ def create_key(hostname, keypath, certpath, country, state, location):
def create_conf(confpath, confdata, keypath):
# Don't overwrite existing bcfg2.conf file
if os.path.exists(confpath):
- # py3k compatibility
- try:
- result = raw_input("\nWarning: %s already exists. "
- "Overwrite? [y/N]: " % confpath)
- except NameError:
- result = input("\nWarning: %s already exists. "
+ result = get_input("\nWarning: %s already exists. "
"Overwrite? [y/N]: " % confpath)
if result not in ['Y', 'y']:
print("Leaving %s unchanged" % confpath)
@@ -211,13 +209,8 @@ class Init(Bcfg2.Server.Admin.Mode):
def _prompt_hostname(self):
"""Ask for the server hostname."""
- # py3k compatibility
- try:
- data = raw_input("What is the server's hostname [%s]: " %
- socket.getfqdn())
- except NameError:
- data = input("What is the server's hostname [%s]: " %
- socket.getfqdn())
+ data = get_input("What is the server's hostname [%s]: " %
+ socket.getfqdn())
if data != '':
self.shostname = data
else:
@@ -225,36 +218,21 @@ class Init(Bcfg2.Server.Admin.Mode):
def _prompt_config(self):
"""Ask for the configuration file path."""
- # py3k compatibility
- try:
- newconfig = raw_input("Store Bcfg2 configuration in [%s]: " %
- self.configfile)
- except NameError:
- newconfig = input("Store Bcfg2 configuration in [%s]: " %
- self.configfile)
+ newconfig = get_input("Store Bcfg2 configuration in [%s]: " %
+ self.configfile)
if newconfig != '':
- self.configfile = newconfig
+ self.configfile = os.path.abspath(newconfig)
def _prompt_repopath(self):
"""Ask for the repository path."""
while True:
- # py3k compatibility
- try:
- newrepo = raw_input("Location of Bcfg2 repository [%s]: " %
- self.repopath)
- except NameError:
- newrepo = input("Location of Bcfg2 repository [%s]: " %
- self.repopath)
+ newrepo = get_input("Location of Bcfg2 repository [%s]: " %
+ self.repopath)
if newrepo != '':
- self.repopath = newrepo
+ self.repopath = os.path.abspath(newrepo)
if os.path.isdir(self.repopath):
- # py3k compatibility
- try:
- response = raw_input("Directory %s exists. Overwrite? [y/N]:" \
- % self.repopath)
- except NameError:
- response = input("Directory %s exists. Overwrite? [y/N]:" \
- % self.repopath)
+ response = get_input("Directory %s exists. Overwrite? [y/N]:" \
+ % self.repopath)
if response.lower().strip() == 'y':
break
else:
@@ -270,13 +248,8 @@ class Init(Bcfg2.Server.Admin.Mode):
def _prompt_server(self):
"""Ask for the server name."""
- # py3k compatibility
- try:
- newserver = raw_input("Input the server location [%s]: " %
- self.server_uri)
- except NameError:
- newserver = input("Input the server location [%s]: " %
- self.server_uri)
+ newserver = get_input("Input the server location [%s]: " %
+ self.server_uri)
if newserver != '':
self.server_uri = newserver
@@ -288,32 +261,19 @@ class Init(Bcfg2.Server.Admin.Mode):
prompt += ': '
while True:
try:
- # py3k compatibility
- try:
- osidx = int(raw_input(prompt))
- except NameError:
- osidx = int(input(prompt))
+ osidx = int(get_input(prompt))
self.os_sel = os_list[osidx - 1][1]
break
except ValueError:
continue
def _prompt_plugins(self):
- # py3k compatibility
- try:
- default = raw_input("Use default plugins? (%s) [Y/n]: " %
- ''.join(default_plugins)).lower()
- except NameError:
- default = input("Use default plugins? (%s) [Y/n]: " %
+ default = get_input("Use default plugins? (%s) [Y/n]: " %
''.join(default_plugins)).lower()
if default != 'y' or default != '':
while True:
plugins_are_valid = True
- # py3k compatibility
- try:
- plug_str = raw_input("Specify plugins: ")
- except NameError:
- plug_str = input("Specify plugins: ")
+ plug_str = get_input("Specify plugins: ")
plugins = plug_str.split(',')
for plugin in plugins:
plugin = plugin.strip()
@@ -327,42 +287,26 @@ class Init(Bcfg2.Server.Admin.Mode):
"""Ask for the key details (country, state, and location)."""
print("The following questions affect SSL certificate generation.")
print("If no data is provided, the default values are used.")
- # py3k compatibility
- try:
- newcountry = raw_input("Country name (2 letter code) for certificate: ")
- except NameError:
- newcountry = input("Country name (2 letter code) for certificate: ")
+ newcountry = get_input("Country name (2 letter code) for certificate: ")
if newcountry != '':
if len(newcountry) == 2:
self.country = newcountry
else:
while len(newcountry) != 2:
- # py3k compatibility
- try:
- newcountry = raw_input("2 letter country code (eg. US): ")
- except NameError:
- newcountry = input("2 letter country code (eg. US): ")
+ newcountry = get_input("2 letter country code (eg. US): ")
if len(newcountry) == 2:
self.country = newcountry
break
else:
self.country = 'US'
- # py3k compatibility
- try:
- newstate = raw_input("State or Province Name (full name) for certificate: ")
- except NameError:
- newstate = input("State or Province Name (full name) for certificate: ")
+ newstate = get_input("State or Province Name (full name) for certificate: ")
if newstate != '':
self.state = newstate
else:
self.state = 'Illinois'
- # py3k compatibility
- try:
- newlocation = raw_input("Locality Name (eg, city) for certificate: ")
- except NameError:
- newlocation = input("Locality Name (eg, city) for certificate: ")
+ newlocation = get_input("Locality Name (eg, city) for certificate: ")
if newlocation != '':
self.location = newlocation
else:
@@ -389,26 +333,27 @@ class Init(Bcfg2.Server.Admin.Mode):
def init_repo(self):
"""Setup a new repo and create the content of the configuration file."""
- keypath = os.path.dirname(os.path.abspath(self.configfile))
+ keypath = os.path.dirname(self.configfile)
+ kpath = os.path.join(keypath, 'bcfg2.key')
+ cpath = os.path.join(keypath, 'bcfg2.crt')
+
confdata = config % (self.repopath,
- ','.join(self.opts['plugins']),
+ ','.join(self.plugins),
self.opts['sendmail'],
self.opts['proto'],
self.password,
- keypath, 'bcfg2.crt',
- keypath, 'bcfg2.key',
- keypath, 'bcfg2.crt',
+ cpath,
+ kpath,
+ cpath,
self.server_uri)
# Create the configuration file and SSL key
create_conf(self.configfile, confdata, keypath)
- kpath = keypath + '/bcfg2.key'
- cpath = keypath + '/bcfg2.crt'
create_key(self.shostname, kpath, cpath, self.country,
self.state, self.location)
# Create the repository
- path = "%s/%s" % (self.repopath, 'etc')
+ path = os.path.join(self.repopath, 'etc')
try:
os.makedirs(path)
self._init_plugins()
diff --git a/src/lib/Server/Admin/__init__.py b/src/lib/Server/Admin/__init__.py
index 41c485d6c..96d9703ba 100644
--- a/src/lib/Server/Admin/__init__.py
+++ b/src/lib/Server/Admin/__init__.py
@@ -61,7 +61,10 @@ class Mode(object):
def get_repo_path(self):
"""Return repository path"""
- return self.cfp.get('server', 'repository')
+ try:
+ return self.cfp.get('server', 'repository')
+ except ConfigParser.NoSectionError:
+ self.errExit("Unable to find server section in bcfg2.conf")
def load_stats(self, client):
stats = lxml.etree.parse("%s/etc/statistics.xml" %
diff --git a/src/lib/Server/Core.py b/src/lib/Server/Core.py
index 2d735133b..daa439db1 100644
--- a/src/lib/Server/Core.py
+++ b/src/lib/Server/Core.py
@@ -313,7 +313,7 @@ class Core(Component):
except:
logger.error("error in BindStructure", exc_info=1)
self.validate_goals(meta, config)
- logger.info("Generated config for %s in %.03fs" % \
+ logger.info("Generated config for %s in %.03f seconds" % \
(client, time.time() - start))
return config
diff --git a/src/lib/Server/Lint/Bundles.py b/src/lib/Server/Lint/Bundles.py
index 67ae14fbd..472915cfd 100644
--- a/src/lib/Server/Lint/Bundles.py
+++ b/src/lib/Server/Lint/Bundles.py
@@ -10,11 +10,9 @@ class Bundles(Bcfg2.Server.Lint.ServerPlugin):
self.missing_bundles()
for bundle in self.core.plugins['Bundler'].entries.values():
if self.HandlesFile(bundle.name):
- if (Bcfg2.Server.Plugins.Bundler.have_genshi and
- type(bundle) is
+ if (not Bcfg2.Server.Plugins.Bundler.have_genshi or
+ type(bundle) is not
Bcfg2.Server.Plugins.SGenshi.SGenshiTemplateFile):
- self.sgenshi_groups(bundle)
- else:
self.bundle_names(bundle)
def missing_bundles(self):
diff --git a/src/lib/Server/Lint/Comments.py b/src/lib/Server/Lint/Comments.py
index 1ea5f295e..19fae1b08 100644
--- a/src/lib/Server/Lint/Comments.py
+++ b/src/lib/Server/Lint/Comments.py
@@ -57,7 +57,7 @@ class Comments(Bcfg2.Server.Lint.ServerPlugin):
try:
xdata = lxml.etree.XML(bundle.data)
rtype = "bundler"
- except AttributeError:
+ except (lxml.etree.XMLSyntaxError, AttributeError):
xdata = lxml.etree.parse(bundle.template.filepath).getroot()
rtype = "sgenshi"
diff --git a/src/lib/Server/Lint/MergeFiles.py b/src/lib/Server/Lint/MergeFiles.py
index 27e7aa99a..52fea3d9b 100644
--- a/src/lib/Server/Lint/MergeFiles.py
+++ b/src/lib/Server/Lint/MergeFiles.py
@@ -1,7 +1,6 @@
import os
from copy import deepcopy
from difflib import SequenceMatcher
-import Bcfg2.Options
import Bcfg2.Server.Lint
class MergeFiles(Bcfg2.Server.Lint.ServerPlugin):
@@ -27,10 +26,10 @@ class MergeFiles(Bcfg2.Server.Lint.ServerPlugin):
def check_probes(self):
probes = self.core.plugins['Probes'].probes.entries
for mset in self.get_similar(probes):
- self.LintError("merge-cfg",
- "The following probes are similar: %s. "
- "Consider merging them into a single probe." %
- ", ".join([p for p in mset]))
+ self.LintError("merge-cfg",
+ "The following probes are similar: %s. "
+ "Consider merging them into a single probe." %
+ ", ".join([p for p in mset]))
def get_similar(self, entries):
if "threshold" in self.config:
diff --git a/src/lib/Server/Lint/RequiredAttrs.py b/src/lib/Server/Lint/RequiredAttrs.py
index 9f00a4e24..55206d2ba 100644
--- a/src/lib/Server/Lint/RequiredAttrs.py
+++ b/src/lib/Server/Lint/RequiredAttrs.py
@@ -1,7 +1,7 @@
import os.path
import lxml.etree
import Bcfg2.Server.Lint
-import Bcfg2.Server.Plugins.Packages
+from Bcfg2.Server.Plugins.Packages import Apt, Yum
class RequiredAttrs(Bcfg2.Server.Lint.ServerPlugin):
""" verify attributes for configuration entries (as defined in
@@ -10,41 +10,54 @@ class RequiredAttrs(Bcfg2.Server.Lint.ServerPlugin):
def __init__(self, *args, **kwargs):
Bcfg2.Server.Lint.ServerPlugin.__init__(self, *args, **kwargs)
self.required_attrs = {
- 'device': ['name', 'owner', 'group', 'dev_type'],
- 'directory': ['name', 'owner', 'group', 'perms'],
- 'file': ['name', 'owner', 'group', 'perms'],
- 'hardlink': ['name', 'to'],
- 'symlink': ['name', 'to'],
- 'ignore': ['name'],
- 'nonexistent': ['name'],
- 'permissions': ['name', 'owner', 'group', 'perms'],
- 'vcs': ['vcstype', 'revision', 'sourceurl']}
+ 'Path': {
+ 'device': ['name', 'owner', 'group', 'dev_type'],
+ 'directory': ['name', 'owner', 'group', 'perms'],
+ 'file': ['name', 'owner', 'group', 'perms', '__text__'],
+ 'hardlink': ['name', 'to'],
+ 'symlink': ['name', 'to'],
+ 'ignore': ['name'],
+ 'nonexistent': ['name'],
+ 'permissions': ['name', 'owner', 'group', 'perms'],
+ 'vcs': ['vcstype', 'revision', 'sourceurl']},
+ 'Service': {
+ 'chkconfig': ['name'],
+ 'deb': ['name'],
+ 'rc-update': ['name'],
+ 'smf': ['name', 'FMRI'],
+ 'upstart': ['name']},
+ 'Action': ['name', 'timing', 'when', 'status', 'command'],
+ 'Package': ['name']}
def Run(self):
- self.check_rules()
- self.check_bundles()
self.check_packages()
+ if "Defaults" in self.core.plugins:
+ self.logger.info("Defaults plugin enabled; skipping required "
+ "attribute checks")
+ else:
+ self.check_rules()
+ self.check_bundles()
def check_packages(self):
""" check package sources for Source entries with missing attrs """
if 'Packages' in self.core.plugins:
for source in self.core.plugins['Packages'].sources:
- if isinstance(source, Bcfg2.Server.Plugins.Packages.PulpSource):
- if not source.id:
+ if isinstance(source, Yum.YumSource):
+ if (not source.pulp_id and not source.url and
+ not source.rawurl):
self.LintError("required-attrs-missing",
- "The required attribute id is missing "
- "from a Pulp source: %s" %
- self.RenderXML(source.xsource))
- else:
- if not source.url and not source.rawurl:
- self.LintError("required-attrs-missing",
- "A %s source must have either a url or "
- "rawurl attribute: %s" %
+ "A %s source must have either a url, "
+ "rawurl, or pulp_id attribute: %s" %
(source.ptype,
self.RenderXML(source.xsource)))
+ elif not source.url and not source.rawurl:
+ self.LintError("required-attrs-missing",
+ "A %s source must have either a url or "
+ "rawurl attribute: %s" %
+ (source.ptype,
+ self.RenderXML(source.xsource)))
- if (not isinstance(source,
- Bcfg2.Server.Plugins.Packages.APTSource) and
+ if (not isinstance(source, Apt.AptSource) and
source.recommended):
self.LintError("extra-attrs",
"The recommended attribute is not "
@@ -67,25 +80,37 @@ class RequiredAttrs(Bcfg2.Server.Lint.ServerPlugin):
for bundle in self.core.plugins['Bundler'].entries.values():
try:
xdata = lxml.etree.XML(bundle.data)
- except AttributeError:
+ except (lxml.etree.XMLSyntaxError, AttributeError):
xdata = lxml.etree.parse(bundle.template.filepath).getroot()
- for path in xdata.xpath("//BoundPath"):
+ for path in xdata.xpath("//*[substring(name(), 1, 5) = 'Bound']"):
self.check_entry(path, bundle.name)
def check_entry(self, entry, filename):
""" generic entry check """
if self.HandlesFile(filename):
- pathname = entry.get('name')
- pathtype = entry.get('type')
- pathset = set(entry.attrib.keys())
- try:
- required_attrs = set(self.required_attrs[pathtype] + ['type'])
- except KeyError:
- self.LintError("unknown-path-type",
- "Unknown path type %s: %s" %
- (pathtype, self.RenderXML(entry)))
- return
+ name = entry.get('name')
+ tag = entry.tag
+ if tag.startswith("Bound"):
+ tag = tag[5:]
+ if tag not in self.required_attrs:
+ self.LintError("unknown-entry-tag",
+ "Unknown entry tag '%s': %s" %
+ (entry.tag, self.RenderXML(entry)))
+
+ if isinstance(self.required_attrs[tag], dict):
+ etype = entry.get('type')
+ if etype in self.required_attrs[tag]:
+ required_attrs = set(self.required_attrs[tag][etype] +
+ ['type'])
+ else:
+ self.LintError("unknown-entry-type",
+ "Unknown %s type %s: %s" %
+ (tag, etype, self.RenderXML(entry)))
+ return
+ else:
+ required_attrs = set(self.required_attrs[tag])
+ attrs = set(entry.attrib.keys())
if 'dev_type' in required_attrs:
dev_type = entry.get('dev_type')
@@ -93,17 +118,21 @@ class RequiredAttrs(Bcfg2.Server.Lint.ServerPlugin):
# check if major/minor are specified
required_attrs |= set(['major', 'minor'])
- if pathtype == 'file' and not entry.text:
- self.LintError("required-attrs-missing",
- "Text missing for %s %s in %s: %s" %
- (entry.tag, pathname, filename,
- self.RenderXML(entry)))
+ if '__text__' in required_attrs:
+ required_attrs.remove('__text__')
+ if (not entry.text and
+ not entry.get('empty', 'false').lower() == 'true'):
+ self.LintError("required-attrs-missing",
+ "Text missing for %s %s in %s: %s" %
+ (entry.tag, name, filename,
+ self.RenderXML(entry)))
- if not pathset.issuperset(required_attrs):
+ if not attrs.issuperset(required_attrs):
self.LintError("required-attrs-missing",
- "The required attributes %s are missing for %s %sin %s:\n%s" %
- (",".join([attr
- for attr in
- required_attrs.difference(pathset)]),
- entry.tag, pathname, filename,
+ "The following required attribute(s) are "
+ "missing for %s %s in %s: %s\n%s" %
+ (entry.tag, name, filename,
+ ", ".join([attr
+ for attr in
+ required_attrs.difference(attrs)]),
self.RenderXML(entry)))
diff --git a/src/lib/Server/Lint/Validate.py b/src/lib/Server/Lint/Validate.py
index ebf621c22..19fd61d25 100644
--- a/src/lib/Server/Lint/Validate.py
+++ b/src/lib/Server/Lint/Validate.py
@@ -5,7 +5,6 @@ import os
from subprocess import Popen, PIPE, STDOUT
import sys
-import Bcfg2.Options
import Bcfg2.Server.Lint
class Validate(Bcfg2.Server.Lint.ServerlessPlugin):
@@ -21,6 +20,7 @@ class Validate(Bcfg2.Server.Lint.ServerlessPlugin):
"%s/Pkgmgr/*.xml":"%s/pkglist.xsd",
"%s/Base/*.xml":"%s/base.xsd",
"%s/Rules/*.xml":"%s/rules.xsd",
+ "%s/Defaults/*.xml":"%s/defaults.xsd",
"%s/etc/report-configuration.xml":"%s/report-configuration.xsd",
"%s/Svcmgr/*.xml":"%s/services.xsd",
"%s/Deps/*.xml":"%s/deps.xsd",
@@ -45,21 +45,21 @@ class Validate(Bcfg2.Server.Lint.ServerlessPlugin):
if filelist:
# avoid loading schemas for empty file lists
+ schemafile = schemaname % schemadir
try:
- schema = lxml.etree.XMLSchema(lxml.etree.parse(schemaname %
- schemadir))
+ schema = lxml.etree.XMLSchema(lxml.etree.parse(schemafile))
except IOError:
e = sys.exc_info()[1]
- self.LintError("input-output-error", e.message)
+ self.LintError("input-output-error", str(e))
continue
- except:
+ except lxml.etree.XMLSchemaParseError:
+ e = sys.exc_info()[1]
self.LintError("schema-failed-to-parse",
- "Failed to process schema %s" %
- (schemaname % schemadir))
+ "Failed to process schema %s: %s" %
+ (schemafile, e))
continue
for filename in filelist:
- self.validate(filename, schemaname % schemadir,
- schema=schema)
+ self.validate(filename, schemafile, schema=schema)
self.check_properties()
diff --git a/src/lib/Server/Lint/__init__.py b/src/lib/Server/Lint/__init__.py
index f15c90557..f47059ac4 100644
--- a/src/lib/Server/Lint/__init__.py
+++ b/src/lib/Server/Lint/__init__.py
@@ -107,7 +107,7 @@ class ErrorHandler (object):
"duplicate-package":"error",
"multiple-default-groups":"error",
"required-infoxml-attrs-missing":"error",
- "unknown-path-type":"error",
+ "unknown-entry-type":"error",
"required-attrs-missing":"error",
"extra-attrs":"warning",
"schema-failed-to-parse":"warning",
diff --git a/src/lib/Server/Plugin.py b/src/lib/Server/Plugin.py
index bf55ad271..1a6399d48 100644
--- a/src/lib/Server/Plugin.py
+++ b/src/lib/Server/Plugin.py
@@ -365,7 +365,8 @@ class FileBacked(object):
self.data = BUILTIN_FILE_TYPE(self.name).read()
self.Index()
except IOError:
- logger.error("Failed to read file %s" % (self.name))
+ err = sys.exc_info()[1]
+ logger.error("Failed to read file %s: %s" % (self.name, err))
def Index(self):
"""Update local data structures based on current file state"""
@@ -518,11 +519,10 @@ class DirectoryBacked(object):
if ((event.filename[-1] == '~') or
(event.filename[:2] == '.#') or
(event.filename[-4:] == '.swp') or
- (event.filename in ['SCCS', '.svn', '4913'])):
+ (event.filename in ['SCCS', '.svn', '4913']) or
+ (not self.patterns.match(event.filename))):
return
if action in ['exists', 'created']:
- if not self.patterns.match(event.filename):
- return
self.add_entry(relpath, event)
elif action == 'changed':
if relpath in self.entries:
@@ -572,7 +572,38 @@ class SingleXMLFileBacked(XMLFileBacked):
"""This object is a coherent cache for an independent XML file."""
def __init__(self, filename, fam):
XMLFileBacked.__init__(self, filename)
- fam.AddMonitor(filename, self)
+ self.extras = []
+ self.fam = fam
+ self.fam.AddMonitor(filename, self)
+
+ def Index(self):
+ """Build local data structures."""
+ try:
+ self.xdata = lxml.etree.XML(self.data, base_url=self.name)
+ except lxml.etree.XMLSyntaxError:
+ err = sys.exc_info()[1]
+ logger.error("Failed to parse %s: %s" % (self.name, err))
+ raise Bcfg2.Server.Plugin.PluginInitError
+
+ included = [ent.get('href')
+ for ent in self.xdata.findall('./{http://www.w3.org/2001/XInclude}include')]
+ if included:
+ for name in included:
+ if name not in self.extras:
+ self.fam.AddMonitor(os.path.join(os.path.dirname(self.name),
+ name),
+ self)
+ self.extras.append(name)
+ try:
+ self.xdata.getroottree().xinclude()
+ except lxml.etree.XIncludeError:
+ err = sys.exc_info()[1]
+ logger.error("XInclude failed on %s: %s" % (self.name, err))
+
+
+ self.entries = self.xdata.getchildren()
+ if self.__identifier__ is not None:
+ self.label = self.xdata.attrib[self.__identifier__]
class StructFile(XMLFileBacked):
@@ -789,10 +820,10 @@ class PrioDir(Plugin, Generator, XMLDirectoryBacked):
def get_attrs(self, entry, metadata):
""" get a list of attributes to add to the entry during the bind """
- if False in [src.Cache(metadata)
- for src in list(self.entries.values())]:
- self.logger.error("Called before data loaded")
- raise PluginExecutionError
+ for src in list(self.entries.values()):
+ if src.Cache(metadata) == False:
+ self.logger.error("Called before data loaded")
+ raise PluginExecutionError
matching = [src for src in list(self.entries.values())
if (src.cache and
entry.tag in src.cache[1] and
diff --git a/src/lib/Server/Plugins/Cfg.py b/src/lib/Server/Plugins/Cfg.py
index 2c0c69926..beea2c747 100644
--- a/src/lib/Server/Plugins/Cfg.py
+++ b/src/lib/Server/Plugins/Cfg.py
@@ -196,7 +196,7 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet):
if specific.all:
return bfname
elif specific.group:
- return "%s.G%d_%s" % (bfname, specific.prio, specific.group)
+ return "%s.G%02d_%s" % (bfname, specific.prio, specific.group)
elif specific.hostname:
return "%s.H_%s" % (bfname, specific.hostname)
diff --git a/src/lib/Server/Plugins/Defaults.py b/src/lib/Server/Plugins/Defaults.py
new file mode 100644
index 000000000..23104946e
--- /dev/null
+++ b/src/lib/Server/Plugins/Defaults.py
@@ -0,0 +1,51 @@
+"""This generator provides rule-based entry mappings."""
+__revision__ = '$Revision$'
+
+import re
+import Bcfg2.Server.Plugin
+import Bcfg2.Server.Plugins.Rules
+
+class Defaults(Bcfg2.Server.Plugins.Rules.Rules,
+ Bcfg2.Server.Plugin.StructureValidator):
+ """Set default attributes on bound entries"""
+ name = 'Defaults'
+ __version__ = '$Id$'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+
+ # Rules is a Generator that happens to implement all of the
+ # functionality we want, so we overload it, but Defaults should
+ # _not_ handle any entries; it does its stuff in the structure
+ # validation phase. so we overload Handle(s)Entry and HandleEvent
+ # to ensure that Defaults handles no entries, even though it's a
+ # Generator.
+
+ def HandlesEntry(self, entry, metadata):
+ return False
+
+ def HandleEntry(self, entry, metadata):
+ raise PluginExecutionError
+
+ def HandleEvent(self, event):
+ Bcfg2.Server.Plugin.XMLDirectoryBacked.HandleEvent(self, event)
+
+ def validate_structures(self, metadata, structures):
+ """ Apply defaults """
+ for struct in structures:
+ for entry in struct.iter():
+ if entry.tag.startswith("Bound"):
+ is_bound = True
+ entry.tag = entry.tag[5:]
+ else:
+ is_bound = False
+ try:
+ try:
+ self.BindEntry(entry, metadata)
+ except Bcfg2.Server.Plugin.PluginExecutionError:
+ # either no matching defaults (which is okay),
+ # or multiple matching defaults (which is not
+ # okay, but is logged). either way, we don't
+ # care about the error.
+ pass
+ finally:
+ if is_bound:
+ entry.tag = "Bound" + entry.tag
diff --git a/src/lib/Server/Plugins/GroupPatterns.py b/src/lib/Server/Plugins/GroupPatterns.py
index 7faead39a..76a628931 100644
--- a/src/lib/Server/Plugins/GroupPatterns.py
+++ b/src/lib/Server/Plugins/GroupPatterns.py
@@ -70,18 +70,16 @@ class PatternMap(object):
class PatternFile(Bcfg2.Server.Plugin.SingleXMLFileBacked):
+ __identifier__ = None
+
def __init__(self, filename, fam):
Bcfg2.Server.Plugin.SingleXMLFileBacked.__init__(self, filename, fam)
self.patterns = []
def Index(self):
+ Bcfg2.Server.Plugin.SingleXMLFileBacked.Index(self)
self.patterns = []
- try:
- parsed = lxml.etree.XML(self.data)
- except:
- Bcfg2.Server.Plugin.logger.error("Failed to read file %s" % self.name)
- return
- for entry in parsed.findall('GroupPattern'):
+ for entry in self.xdata.xpath('//GroupPattern'):
try:
groups = [g.text for g in entry.findall('Group')]
for pat_ent in entry.findall('NamePattern'):
@@ -91,9 +89,8 @@ class PatternFile(Bcfg2.Server.Plugin.SingleXMLFileBacked):
rng = range_ent.text
self.patterns.append(PatternMap(None, rng, groups))
except:
- Bcfg2.Server.Plugin.logger.error(\
- "GroupPatterns: Failed to initialize pattern %s" % \
- (entry.get('pattern')))
+ self.logger.error("GroupPatterns: Failed to initialize pattern "
+ "%s" % entry.get('pattern'))
def process_patterns(self, hostname):
ret = []
@@ -103,9 +100,9 @@ class PatternFile(Bcfg2.Server.Plugin.SingleXMLFileBacked):
if gn is not None:
ret.extend(gn)
except:
- Bcfg2.Server.Plugin.logger.error(\
- "GroupPatterns: Failed to process pattern %s for %s" % \
- (pattern.pattern, hostname), exc_info=1)
+ self.logger.error("GroupPatterns: Failed to process pattern %s "
+ "for %s" % (pattern.pattern, hostname),
+ exc_info=1)
return ret
diff --git a/src/lib/Server/Plugins/Packages.py b/src/lib/Server/Plugins/Packages.py
deleted file mode 100644
index 155b78581..000000000
--- a/src/lib/Server/Plugins/Packages.py
+++ /dev/null
@@ -1,1320 +0,0 @@
-import os
-import re
-import sys
-import copy
-import gzip
-import glob
-import base64
-import logging
-import tarfile
-import lxml.etree
-
-# Compatibility imports
-from Bcfg2.Bcfg2Py3k import cPickle
-from Bcfg2.Bcfg2Py3k import HTTPBasicAuthHandler
-from Bcfg2.Bcfg2Py3k import HTTPPasswordMgrWithDefaultRealm
-from Bcfg2.Bcfg2Py3k import HTTPError
-from Bcfg2.Bcfg2Py3k import install_opener
-from Bcfg2.Bcfg2Py3k import build_opener
-from Bcfg2.Bcfg2Py3k import urlopen
-from Bcfg2.Bcfg2Py3k import ConfigParser
-
-# py3k compatibility
-if sys.hexversion >= 0x03000000:
- from io import FileIO as BUILTIN_FILE_TYPE
-else:
- BUILTIN_FILE_TYPE = file
-
-try:
- import yum.misc
- has_yum = True
-except ImportError:
- has_yum = False
-
-try:
- import pulp.client.server
- import pulp.client.config
- import pulp.client.api.repository
- import pulp.client.api.consumer
- has_pulp = True
-except ImportError:
- has_pulp = False
-
-try:
- from hashlib import md5
-except ImportError:
- from md5 import md5
-
-import Bcfg2.Logger
-import Bcfg2.Server.Plugin
-
-# build sources.list?
-# caching for yum
-
-class NoData(Exception):
- pass
-
-
-class SomeData(Exception):
- pass
-
-logger = logging.getLogger('Packages')
-
-
-def source_from_xml(xsource, cachepath):
- """ create a *Source object from its XML representation in
- sources.xml """
- stype = xsource.get("type")
- if stype is None:
- logger.error("No type specified for source, skipping")
- return None
-
- try:
- cls = globals()["%sSource" % stype.upper()]
- except KeyError:
- logger.error("Unknown source type %s")
- return None
-
- return cls(cachepath, xsource)
-
-
-def _fetch_url(url):
- if '@' in url:
- mobj = re.match('(\w+://)([^:]+):([^@]+)@(.*)$', url)
- if not mobj:
- raise ValueError
- user = mobj.group(2)
- passwd = mobj.group(3)
- url = mobj.group(1) + mobj.group(4)
- auth = HTTPBasicAuthHandler(HTTPPasswordMgrWithDefaultRealm())
- auth.add_password(None, url, user, passwd)
- install_opener(build_opener(auth))
- return urlopen(url).read()
-
-
-class Source(object):
- basegroups = []
-
- def __init__(self, basepath, xsource):
- self.basepath = basepath
- self.xsource = xsource
-
- try:
- self.version = xsource.find('Version').text
- except AttributeError:
- pass
-
- for key, tag in [('components', 'Component'), ('arches', 'Arch'),
- ('blacklist', 'Blacklist'),
- ('whitelist', 'Whitelist')]:
- self.__dict__[key] = [item.text for item in xsource.findall(tag)]
-
- self.gpgkeys = [el.text for el in xsource.findall("GPGKey")]
-
- self.recommended = xsource.get('recommended', 'false').lower() == 'true'
- self.id = xsource.get('id')
-
- self.rawurl = xsource.get('rawurl', '')
- if self.rawurl and not self.rawurl.endswith("/"):
- self.rawurl += "/"
- self.url = xsource.get('url', '')
- if self.url and not self.url.endswith("/"):
- self.url += "/"
- self.version = xsource.get('version', '')
-
- # build the set of conditions to see if this source applies to
- # a given set of metadata
- self.conditions = []
- self.groups = [] # provided for some limited backwards compat
- for el in xsource.iterancestors():
- if el.tag == "Group":
- if el.get("negate", "false").lower() == "true":
- self.conditions.append(lambda m, el=el:
- el.get("name") not in m.groups)
- else:
- self.groups.append(el.get("name"))
- self.conditions.append(lambda m, el=el:
- el.get("name") in m.groups)
- elif el.tag == "Client":
- if el.get("negate", "false").lower() == "true":
- self.conditions.append(lambda m, el=el:
- el.get("name") != m.hostname)
- else:
- self.conditions.append(lambda m, el=el:
- el.get("name") == m.hostname)
-
- self.deps = dict()
- self.provides = dict()
-
- self.cachefile = \
- os.path.join(self.basepath,
- "cache-%s" %
- md5(cPickle.dumps([self.version, self.components,
- self.url, self.rawurl,
- self.arches])).hexdigest())
- self.url_map = []
-
- def load_state(self):
- pass
-
- def setup_data(self, force_update=False):
- should_read = True
- should_download = False
- if os.path.exists(self.cachefile):
- try:
- self.load_state()
- should_read = False
- except:
- logger.error("Cachefile %s load failed; "
- "falling back to file read" % self.cachefile)
- if should_read:
- try:
- self.read_files()
- except:
- logger.error("Packages: File read failed; "
- "falling back to file download")
- should_download = True
-
- if should_download or force_update:
- try:
- self.update()
- self.read_files()
- except:
- logger.error("Failed to update source", exc_info=1)
-
- def get_urls(self):
- return []
- urls = property(get_urls)
-
- def get_files(self):
- return [self.escape_url(url) for url in self.urls]
- files = property(get_files)
-
- def get_vpkgs(self, meta):
- agroups = ['global'] + [a for a in self.arches if a in meta.groups]
- vdict = dict()
- for agrp in agroups:
- for key, value in list(self.provides[agrp].items()):
- if key not in vdict:
- vdict[key] = set(value)
- else:
- vdict[key].update(value)
- return vdict
-
- def escape_url(self, url):
- return os.path.join(self.basepath, url.replace('/', '@'))
-
- def file_init(self):
- pass
-
- def read_files(self):
- pass
-
- def update(self):
- for url in self.urls:
- logger.info("Packages: Updating %s" % url)
- fname = self.escape_url(url)
- try:
- data = _fetch_url(url)
- except ValueError:
- logger.error("Packages: Bad url string %s" % url)
- continue
- except HTTPError:
- err = sys.exc_info()[1]
- logger.error("Packages: Failed to fetch url %s. code=%s" %
- (url, err.code))
- continue
- BUILTIN_FILE_TYPE(fname, 'w').write(data)
-
- def applies(self, metadata):
- # check base groups
- if len([g for g in self.basegroups if g in metadata.groups]) == 0:
- return False
-
- # check Group/Client tags from sources.xml
- for condition in self.conditions:
- if not condition(metadata):
- return False
-
- return True
-
- def get_arches(self, metadata):
- return ['global'] + [a for a in self.arches if a in metadata.groups]
-
- def get_deps(self, metadata, pkgname):
- for arch in self.get_arches(metadata):
- if pkgname in self.deps[arch]:
- return self.deps[arch][pkgname]
- raise NoData
-
- def get_provides(self, metadata, required):
- for arch in self.get_arches(metadata):
- if required in self.provides[arch]:
- return self.provides[arch][required]
- raise NoData
-
- def is_package(self, metadata, _):
- return False
-
-
-class YUMSource(Source):
- xp = '{http://linux.duke.edu/metadata/common}'
- rp = '{http://linux.duke.edu/metadata/rpm}'
- rpo = '{http://linux.duke.edu/metadata/repo}'
- fl = '{http://linux.duke.edu/metadata/filelists}'
- basegroups = ['yum', 'redhat', 'centos', 'fedora']
- ptype = 'yum'
-
- def __init__(self, basepath, xsource):
- Source.__init__(self, basepath, xsource)
- if not self.rawurl:
- self.baseurl = self.url + "%(version)s/%(component)s/%(arch)s/"
- else:
- self.baseurl = self.rawurl
- self.packages = dict()
- self.deps = dict([('global', dict())])
- self.provides = dict([('global', dict())])
- self.filemap = dict([(x, dict()) for x in ['global'] + self.arches])
- self.needed_paths = set()
- self.file_to_arch = dict()
-
- def save_state(self):
- cache = BUILTIN_FILE_TYPE(self.cachefile, 'wb')
- cPickle.dump((self.packages, self.deps, self.provides,
- self.filemap, self.url_map), cache, 2)
- cache.close()
-
- def load_state(self):
- data = BUILTIN_FILE_TYPE(self.cachefile)
- (self.packages, self.deps, self.provides,
- self.filemap, self.url_map) = cPickle.load(data)
-
- def get_urls(self):
- surls = list()
- self.url_map = []
- for arch in self.arches:
- if self.url:
- usettings = [{'version':self.version, 'component':comp,
- 'arch':arch}
- for comp in self.components]
- else: # rawurl given
- usettings = [{'version':self.version, 'component':None,
- 'arch':arch}]
-
- for setting in usettings:
- setting['url'] = self.baseurl % setting
- self.url_map.append(copy.deepcopy(setting))
- surls.append((arch, [setting['url'] for setting in usettings]))
- urls = []
- for (sarch, surl_list) in surls:
- for surl in surl_list:
- urls.extend(self._get_urls_from_repodata(surl, sarch))
- return urls
- urls = property(get_urls)
-
- def _get_urls_from_repodata(self, url, arch):
- rmdurl = '%srepodata/repomd.xml' % url
- try:
- repomd = _fetch_url(rmdurl)
- xdata = lxml.etree.XML(repomd)
- except ValueError:
- logger.error("Packages: Bad url string %s" % rmdurl)
- return []
- except HTTPError:
- err = sys.exc_info()[1]
- logger.error("Packages: Failed to fetch url %s. code=%s" %
- (rmdurl, err.code))
- return []
- except lxml.etree.XMLSyntaxError:
- err = sys.exc_info()[1]
- logger.error("Packages: Failed to process metadata at %s: %s" %
- (rmdurl, err))
- return []
-
- urls = []
- for elt in xdata.findall(self.rpo + 'data'):
- if elt.get('type') in ['filelists', 'primary']:
- floc = elt.find(self.rpo + 'location')
- fullurl = url + floc.get('href')
- urls.append(fullurl)
- self.file_to_arch[self.escape_url(fullurl)] = arch
- return urls
-
- def read_files(self):
- # we have to read primary.xml first, and filelists.xml afterwards;
- primaries = list()
- filelists = list()
- for fname in self.files:
- if fname.endswith('primary.xml.gz'):
- primaries.append(fname)
- elif fname.endswith('filelists.xml.gz'):
- filelists.append(fname)
-
- for fname in primaries:
- farch = self.file_to_arch[fname]
- fdata = lxml.etree.parse(fname).getroot()
- self.parse_primary(fdata, farch)
- for fname in filelists:
- farch = self.file_to_arch[fname]
- fdata = lxml.etree.parse(fname).getroot()
- self.parse_filelist(fdata, farch)
-
- # merge data
- sdata = list(self.packages.values())
- try:
- self.packages['global'] = copy.deepcopy(sdata.pop())
- except IndexError:
- logger.error("No packages in repo")
- while sdata:
- self.packages['global'] = \
- self.packages['global'].intersection(sdata.pop())
-
- for key in self.packages:
- if key == 'global':
- continue
- self.packages[key] = \
- self.packages[key].difference(self.packages['global'])
- self.save_state()
-
- def parse_filelist(self, data, arch):
- if arch not in self.filemap:
- self.filemap[arch] = dict()
- for pkg in data.findall(self.fl + 'package'):
- for fentry in pkg.findall(self.fl + 'file'):
- if fentry.text in self.needed_paths:
- if fentry.text in self.filemap[arch]:
- self.filemap[arch][fentry.text].add(pkg.get('name'))
- else:
- self.filemap[arch][fentry.text] = set([pkg.get('name')])
-
- def parse_primary(self, data, arch):
- if arch not in self.packages:
- self.packages[arch] = set()
- if arch not in self.deps:
- self.deps[arch] = dict()
- if arch not in self.provides:
- self.provides[arch] = dict()
- for pkg in data.getchildren():
- if not pkg.tag.endswith('package'):
- continue
- pkgname = pkg.find(self.xp + 'name').text
- self.packages[arch].add(pkgname)
-
- pdata = pkg.find(self.xp + 'format')
- pre = pdata.find(self.rp + 'requires')
- self.deps[arch][pkgname] = set()
- for entry in pre.getchildren():
- self.deps[arch][pkgname].add(entry.get('name'))
- if entry.get('name').startswith('/'):
- self.needed_paths.add(entry.get('name'))
- pro = pdata.find(self.rp + 'provides')
- if pro != None:
- for entry in pro.getchildren():
- prov = entry.get('name')
- if prov not in self.provides[arch]:
- self.provides[arch][prov] = list()
- self.provides[arch][prov].append(pkgname)
-
- def is_package(self, metadata, item):
- arch = [a for a in self.arches if a in metadata.groups]
- if not arch:
- return False
- return ((item in self.packages['global'] or
- item in self.packages[arch[0]]) and
- item not in self.blacklist and
- (len(self.whitelist) == 0 or item in self.whitelist))
-
- def get_vpkgs(self, metadata):
- rv = Source.get_vpkgs(self, metadata)
- for arch, fmdata in list(self.filemap.items()):
- if arch not in metadata.groups and arch != 'global':
- continue
- for filename, pkgs in list(fmdata.items()):
- rv[filename] = pkgs
- return rv
-
- def filter_unknown(self, unknown):
- filtered = set([u for u in unknown if u.startswith('rpmlib')])
- unknown.difference_update(filtered)
-
-
-class PulpSource(Source):
- basegroups = ['yum', 'redhat', 'centos', 'fedora']
- ptype = 'yum'
-
- def __init__(self, basepath, xsource):
- Source.__init__(self, basepath, xsource)
- if not has_pulp:
- logger.error("Cannot create pulp source: pulp libraries not found")
- raise Bcfg2.Server.Plugin.PluginInitError
-
- self._config = pulp.client.config.Config()
-
- self._repoapi = pulp.client.api.repository.RepositoryAPI()
- self._repo = self._repoapi.repository(self.id)
- if self._repo is None:
- logger.error("Repo id %s not found")
- else:
- self.baseurl = "%s/%s" % (self._config.cds.baseurl,
- self._repo['relative_path'])
-
- self.gpgkeys = ["%s/%s" % (self._config.cds.keyurl, key)
- for key in self._repoapi.listkeys(self.id)]
-
- self.url_map = [{'version': self.version, 'component': None,
- 'arch': self.arches[0], 'url': self.baseurl}]
-
- def save_state(self):
- cache = BUILTIN_FILE_TYPE(self.cachefile, 'wb')
- cPickle.dump((self.packages, self.deps, self.provides, self._config,
- self.filemap, self.url_map, self._repoapi, self._repo),
- cache, 2)
- cache.close()
-
- def load_state(self):
- cache = BUILTIN_FILE_TYPE(self.cachefile)
- (self.packages, self.deps, self.provides, self._config, self.filemap,
- self.url_map, self._repoapi, self._repo) = cPickle.load(cache)
- cache.close()
-
- def read_files(self):
- """ ignore the yum files; we can get this information directly
- from pulp """
- for pkg in self._repoapi.packages(self.id):
- try:
- self.packages[pkg['arch']].append(pkg['name'])
- except KeyError:
- self.packages[pkg['arch']] = [pkg['name']]
- self.save_state()
-
-
-class APTSource(Source):
- basegroups = ['apt', 'debian', 'ubuntu', 'nexenta']
- ptype = 'deb'
-
- def __init__(self, basepath, xsource):
- Source.__init__(self, basepath, xsource)
- self.pkgnames = set()
-
- self.url_map = [{'rawurl': self.rawurl, 'url': self.url,
- 'version': self.version,
- 'components': self.components, 'arches': self.arches}]
-
- def save_state(self):
- cache = BUILTIN_FILE_TYPE(self.cachefile, 'wb')
- cPickle.dump((self.pkgnames, self.deps, self.provides),
- cache, 2)
- cache.close()
-
- def load_state(self):
- data = BUILTIN_FILE_TYPE(self.cachefile)
- self.pkgnames, self.deps, self.provides = cPickle.load(data)
-
- def filter_unknown(self, unknown):
- filtered = set([u for u in unknown if u.startswith('choice')])
- unknown.difference_update(filtered)
-
- def get_urls(self):
- if not self.rawurl:
- rv = []
- for part in self.components:
- for arch in self.arches:
- rv.append("%sdists/%s/%s/binary-%s/Packages.gz" %
- (self.url, self.version, part, arch))
- return rv
- else:
- return ["%sPackages.gz" % self.rawurl]
- urls = property(get_urls)
-
- def read_files(self):
- bdeps = dict()
- bprov = dict()
- if self.recommended:
- depfnames = ['Depends', 'Pre-Depends', 'Recommends']
- else:
- depfnames = ['Depends', 'Pre-Depends']
- for fname in self.files:
- if not self.rawurl:
- barch = [x
- for x in fname.split('@')
- if x.startswith('binary-')][0][7:]
- else:
- # RawURL entries assume that they only have one <Arch></Arch>
- # element and that it is the architecture of the source.
- barch = self.arches[0]
- if barch not in bdeps:
- bdeps[barch] = dict()
- bprov[barch] = dict()
- try:
- reader = gzip.GzipFile(fname)
- except:
- print("Failed to read file %s" % fname)
- raise
- for line in reader.readlines():
- words = str(line.strip()).split(':', 1)
- if words[0] == 'Package':
- pkgname = words[1].strip().rstrip()
- self.pkgnames.add(pkgname)
- bdeps[barch][pkgname] = []
- elif words[0] in depfnames:
- vindex = 0
- for dep in words[1].split(','):
- if '|' in dep:
- cdeps = [re.sub('\s+', '',
- re.sub('\(.*\)', '', cdep))
- for cdep in dep.split('|')]
- dyn_dname = "choice-%s-%s-%s" % (pkgname,
- barch,
- vindex)
- vindex += 1
- bdeps[barch][pkgname].append(dyn_dname)
- bprov[barch][dyn_dname] = set(cdeps)
- else:
- raw_dep = re.sub('\(.*\)', '', dep)
- raw_dep = raw_dep.rstrip().strip()
- bdeps[barch][pkgname].append(raw_dep)
- elif words[0] == 'Provides':
- for pkg in words[1].split(','):
- dname = pkg.rstrip().strip()
- if dname not in bprov[barch]:
- bprov[barch][dname] = set()
- bprov[barch][dname].add(pkgname)
-
- self.deps['global'] = dict()
- self.provides['global'] = dict()
- for barch in bdeps:
- self.deps[barch] = dict()
- self.provides[barch] = dict()
- for pkgname in self.pkgnames:
- pset = set()
- for barch in bdeps:
- if pkgname not in bdeps[barch]:
- bdeps[barch][pkgname] = []
- pset.add(tuple(bdeps[barch][pkgname]))
- if len(pset) == 1:
- self.deps['global'][pkgname] = pset.pop()
- else:
- for barch in bdeps:
- self.deps[barch][pkgname] = bdeps[barch][pkgname]
- provided = set()
- for bprovided in list(bprov.values()):
- provided.update(set(bprovided))
- for prov in provided:
- prset = set()
- for barch in bprov:
- if prov not in bprov[barch]:
- continue
- prset.add(tuple(bprov[barch].get(prov, ())))
- if len(prset) == 1:
- self.provides['global'][prov] = prset.pop()
- else:
- for barch in bprov:
- self.provides[barch][prov] = bprov[barch].get(prov, ())
- self.save_state()
-
- def is_package(self, _, pkg):
- return (pkg in self.pkgnames and
- pkg not in self.blacklist and
- (len(self.whitelist) == 0 or pkg in self.whitelist))
-
-
-class PACSource(Source):
- basegroups = ['arch', 'parabola']
- ptype = 'pacman'
-
- def __init__(self, basepath, xsource):
- Source.__init__(self, basepath, xsource)
- self.pkgnames = set()
-
- self.url_map = [{'rawurl': self.rawurl, 'url': self.url,
- 'version': self.version,
- 'components': self.components, 'arches': self.arches}]
-
- def save_state(self):
- cache = BUILTIN_FILE_TYPE(self.cachefile, 'wb')
- cPickle.dump((self.pkgnames, self.deps, self.provides),
- cache, 2)
- cache.close()
-
- def load_state(self):
- data = BUILTIN_FILE_TYPE(self.cachefile)
- self.pkgnames, self.deps, self.provides = cPickle.load(data)
-
- def filter_unknown(self, unknown):
- filtered = set([u for u in unknown if u.startswith('choice')])
- unknown.difference_update(filtered)
-
- def get_urls(self):
- if not self.rawurl:
- rv = []
- for part in self.components:
- for arch in self.arches:
- rv.append("%s%s/os/%s/%s.db.tar.gz" %
- (self.url, part, arch, part))
- return rv
- else:
- raise Exception("PACSource : RAWUrl not supported (yet)")
- urls = property(get_urls)
-
- def read_files(self):
- bdeps = dict()
- bprov = dict()
-
- if self.recommended:
- depfnames = ['Depends', 'Pre-Depends', 'Recommends']
- else:
- depfnames = ['Depends', 'Pre-Depends']
-
- for fname in self.files:
- if not self.rawurl:
- barch = [x for x in fname.split('@') if x in self.arches][0]
- else:
- # RawURL entries assume that they only have one <Arch></Arch>
- # element and that it is the architecture of the source.
- barch = self.arches[0]
-
- if barch not in bdeps:
- bdeps[barch] = dict()
- bprov[barch] = dict()
- try:
- print("try to read : " + fname)
- tar = tarfile.open(fname, "r")
- reader = gzip.GzipFile(fname)
- except:
- print("Failed to read file %s" % fname)
- raise
-
- for tarinfo in tar:
- if tarinfo.isdir():
- self.pkgnames.add(tarinfo.name.rsplit("-", 2)[0])
- print("added : " + tarinfo.name.rsplit("-", 2)[0])
- tar.close()
-
- self.deps['global'] = dict()
- self.provides['global'] = dict()
- for barch in bdeps:
- self.deps[barch] = dict()
- self.provides[barch] = dict()
- for pkgname in self.pkgnames:
- pset = set()
- for barch in bdeps:
- if pkgname not in bdeps[barch]:
- bdeps[barch][pkgname] = []
- pset.add(tuple(bdeps[barch][pkgname]))
- if len(pset) == 1:
- self.deps['global'][pkgname] = pset.pop()
- else:
- for barch in bdeps:
- self.deps[barch][pkgname] = bdeps[barch][pkgname]
- provided = set()
- for bprovided in list(bprov.values()):
- provided.update(set(bprovided))
- for prov in provided:
- prset = set()
- for barch in bprov:
- if prov not in bprov[barch]:
- continue
- prset.add(tuple(bprov[barch].get(prov, ())))
- if len(prset) == 1:
- self.provides['global'][prov] = prset.pop()
- else:
- for barch in bprov:
- self.provides[barch][prov] = bprov[barch].get(prov, ())
- self.save_state()
-
- def is_package(self, _, pkg):
- return (pkg in self.pkgnames and
- pkg not in self.blacklist and
- (len(self.whitelist) == 0 or pkg in self.whitelist))
-
-
-class PackagesSources(Bcfg2.Server.Plugin.SingleXMLFileBacked,
- Bcfg2.Server.Plugin.StructFile):
- def __init__(self, filename, cachepath, fam, packages):
- Bcfg2.Server.Plugin.SingleXMLFileBacked.__init__(self, filename, fam)
- Bcfg2.Server.Plugin.StructFile.__init__(self, filename)
- self.cachepath = cachepath
- if not os.path.exists(self.cachepath):
- # create cache directory if needed
- os.makedirs(self.cachepath)
- self.extras = []
- self.fam = fam
- self.pkg_obj = packages
-
- def Index(self):
- try:
- self.xdata = lxml.etree.XML(self.data, base_url=self.name)
- except lxml.etree.XMLSyntaxError:
- err = sys.exc_info()[1]
- logger.error("Packages: Error processing sources: %s" % err)
- raise Bcfg2.Server.Plugin.PluginInitError
-
- included = [ent.get('href')
- for ent in self.xdata.findall('./{http://www.w3.org/2001/XInclude}include')]
- if included:
- for name in included:
- if name not in self.extras:
- self.add_monitor(name)
- try:
- self.xdata.getroottree().xinclude()
- except lxml.etree.XIncludeError:
- err = sys.exc_info()[1]
- logger.error("Packages: Error processing sources: %s" % err)
-
- if self.__identifier__ is not None:
- self.label = self.xdata.attrib[self.__identifier__]
-
- self.entries = []
- for xsource in self.xdata.findall('.//Source'):
- source = source_from_xml(xsource, self.cachepath)
- if source is not None:
- self.entries.append(source)
-
- self.pkg_obj.Reload()
-
- def add_monitor(self, fname):
- """Add a fam monitor for an included file"""
- self.fam.AddMonitor(os.path.join(os.path.dirname(self.name), fname),
- self)
- self.extras.append(fname)
-
-
-class PackagesConfig(Bcfg2.Server.Plugin.FileBacked,
- ConfigParser.SafeConfigParser):
- def __init__(self, filename, fam):
- Bcfg2.Server.Plugin.FileBacked.__init__(self, filename)
- ConfigParser.SafeConfigParser.__init__(self)
- # packages.conf isn't strictly necessary, so only set a
- # monitor if it exists. if it gets added, that will require a
- # server restart
- if os.path.exists(filename):
- fam.AddMonitor(filename, self)
-
- def Index(self):
- """ Build local data structures """
- for section in self.sections():
- self.remove_section(section)
- self.read(self.name)
-
-
-class Packages(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.StructureValidator,
- Bcfg2.Server.Plugin.Generator,
- Bcfg2.Server.Plugin.Connector):
- name = 'Packages'
- conflicts = ['Pkgmgr']
- experimental = True
- __rmi__ = Bcfg2.Server.Plugin.Plugin.__rmi__ + ['Refresh', 'Reload']
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.StructureValidator.__init__(self)
- Bcfg2.Server.Plugin.Generator.__init__(self)
- Bcfg2.Server.Plugin.Connector.__init__(self)
- Bcfg2.Server.Plugin.Probing.__init__(self)
-
- self.sentinels = set()
- self.virt_pkgs = dict()
- self.ptypes = dict()
- self.cachepath = os.path.join(self.data, 'cache')
- self.keypath = os.path.join(self.data, 'keys')
- if not os.path.exists(self.keypath):
- # create key directory if needed
- os.makedirs(self.keypath)
-
- # set up config files
- self.config = PackagesConfig(os.path.join(self.data, "packages.conf"),
- core.fam)
- self.sources = PackagesSources(os.path.join(self.data, "sources.xml"),
- self.cachepath, core.fam, self)
-
- @property
- def disableResolver(self):
- try:
- return self.config.get("global", "resolver").lower() == "disabled"
- except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
- return False
-
- @property
- def disableMetaData(self):
- try:
- return self.config.get("global", "metadata").lower() == "disabled"
- except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
- return False
-
- def create_apt_conf(self, entry, metadata):
- """ create apt config for the specified host """
- raise NotImplementedError
-
- def create_yum_conf(self, entry, metadata):
- """ create yum config for the specified host """
- yum_attrib = {'encoding': 'ascii',
- 'owner': 'root',
- 'group': 'root',
- 'type': 'file',
- 'perms': '0644'}
-
- stanzas = []
- reponame_re = re.compile(r'.*/(?:RPMS\.)?([^/]+)')
- for source in self.get_matching_sources(metadata):
- for url_map in source.url_map:
- if url_map['arch'] in metadata.groups:
- # try to find a sensible name for the repo
- name = None
- if source.id:
- reponame = source.id
- else:
- match = reponame_re.search(url_map['url'])
- if url_map['component']:
- name = url_map['component']
- elif match:
- name = match.group(1)
- else:
- # couldn't figure out the name from the
- # source ID, URL or URL map (which
- # probably means its a screwy URL), so we
- # just generate a random one
- name = base64.b64encode(os.urandom(16))[:-2]
- reponame = "%s-%s" % (source.groups[0], name)
-
- stanza = ["[%s]" % reponame,
- "name=%s" % reponame,
- "baseurl=%s" % url_map['url'],
- "enabled=1"]
- if len(source.gpgkeys):
- stanza.append("gpgcheck=1")
- stanza.append("gpgkey=%s" %
- " ".join(source.gpgkeys))
- else:
- stanza.append("gpgcheck=0")
- stanzas.append("\n".join(stanza))
-
- entry.text = "%s\n" % "\n\n".join(stanzas)
- for (key, value) in list(yum_attrib.items()):
- entry.attrib.__setitem__(key, value)
-
- def get_relevant_groups(self, meta):
- mgrps = []
- for source in self.get_matching_sources(meta):
- mgrps.extend(list(set([g for g in meta.groups
- if (g in source.basegroups or
- g in source.groups or
- g in source.arches)])))
- mgrps.sort()
- return tuple(mgrps)
-
- def _setup_pulp(self):
- try:
- rouser = self.config.get("pulp", "rouser")
- ropass = self.config.get("pulp", "ropass")
- except ConfigParser.NoSectionError:
- logger.error("No [pulp] section found in Packages/packages.conf")
- raise Bcfg2.Server.Plugin.PluginInitError
- except ConfigParser.NoOptionError:
- err = sys.exc_info()[1]
- logger.error("Required option not found in "
- "Packages/packages.conf: %s" % err)
- raise Bcfg2.Server.Plugin.PluginInitError
-
- pulpconfig = pulp.client.config.Config()
- serveropts = pulpconfig.server
-
- self._server = pulp.client.server.PulpServer(serveropts['host'],
- int(serveropts['port']),
- serveropts['scheme'],
- serveropts['path'])
- self._server.set_basic_auth_credentials(rouser, ropass)
- pulp.client.server.set_active_server(self._server)
-
- def build_vpkgs_entry(self, meta):
- # build single entry for all matching sources
- vpkgs = dict()
- for source in self.get_matching_sources(meta):
- s_vpkgs = source.get_vpkgs(meta)
- for name, prov_set in list(s_vpkgs.items()):
- if name not in vpkgs:
- vpkgs[name] = set(prov_set)
- else:
- vpkgs[name].update(prov_set)
- return vpkgs
-
- def get_matching_sources(self, meta):
- return [s for s in self.sources if s.applies(meta)]
-
- def get_ptype(self, metadata):
- """ return the package type relevant to this client """
- if metadata.hostname not in self.ptypes:
- for source in self.sources:
- for grp in metadata.groups:
- if grp in source.basegroups:
- self.ptypes[metadata.hostname] = source.ptype
- break
- try:
- return self.ptypes[metadata.hostname]
- except KeyError:
- return None
-
- def HandleEntry(self, entry, metadata):
- if entry.tag == 'Package':
- entry.set('version', 'auto')
- entry.set('type', self.get_ptype(metadata))
- elif entry.tag == 'Path':
- if (self.config.has_option("global", "yum_config") and
- entry.get("name") == self.config.get("global", "yum_config")):
- self.create_yum_conf(entry, metadata)
- elif (self.config.has_option("global", "apt_config") and
- entry.get("name") == self.config.get("global", "apt_config")):
- self.create_apt_conf(entry, metadata)
-
- def HandlesEntry(self, entry, metadata):
- if entry.tag == 'Package':
- for grp in metadata.groups:
- if grp in self.sentinels:
- return True
- elif entry.tag == 'Path':
- # managed entries for yum/apt configs
- if ((self.config.has_option("global", "yum_config") and
- entry.get("name") == self.config.get("global",
- "yum_config")) or
- (self.config.has_option("global", "apt_config") and
- entry.get("name") == self.config.get("global", "apt_config"))):
- return True
- return False
-
- def complete(self, meta, input_requirements, debug=False):
- '''Build the transitive closure of all package dependencies
-
- Arguments:
- meta - client metadata instance
- packages - set of package names
- debug - print out debug information for the decision making process
- returns => (set(packages), set(unsatisfied requirements), package type)
- '''
- sources = self.get_matching_sources(meta)
- # reverse list so that priorities correspond to file order
- sources.reverse()
- if len(sources) == 0:
- self.logger.error("Packages: No matching sources for client %s; "
- "improper group memberships?" % meta.hostname)
- return set(), set(), 'failed'
- ptype = self.get_ptype(meta)
- if ptype is None:
- return set(), set(), 'failed'
-
- # setup vpkg cache
- pgrps = self.get_relevant_groups(meta)
- if pgrps not in self.virt_pkgs:
- self.virt_pkgs[pgrps] = self.build_vpkgs_entry(meta)
- vpkg_cache = self.virt_pkgs[pgrps]
-
- # unclassified is set of unsatisfied requirements (may be pkg for vpkg)
- unclassified = set(input_requirements)
- vpkgs = set()
- both = set()
- pkgs = set(input_requirements)
-
- packages = set()
- examined = set()
- unknown = set()
-
- final_pass = False
- really_done = False
- # do while unclassified or vpkgs or both or pkgs
- while unclassified or pkgs or both or final_pass:
- if really_done:
- break
- if len(unclassified) + len(pkgs) + len(both) == 0:
- # one more pass then exit
- really_done = True
-
- while unclassified:
- current = unclassified.pop()
- examined.add(current)
- is_pkg = False
- for source in sources:
- if source.is_package(meta, current):
- is_pkg = True
- break
-
- is_vpkg = current in vpkg_cache
-
- if is_pkg and is_vpkg:
- both.add(current)
- elif is_pkg and not is_vpkg:
- pkgs.add(current)
- elif is_vpkg and not is_pkg:
- vpkgs.add(current)
- elif not is_vpkg and not is_pkg:
- unknown.add(current)
-
- while pkgs:
- # direct packages; current can be added, and all deps
- # should be resolved
- current = pkgs.pop()
- if debug:
- self.logger.debug("Packages: handling package requirement "
- "%s" % current)
- deps = ()
- for source in sources:
- if source.is_package(meta, current):
- try:
- deps = source.get_deps(meta, current)
- break
- except:
- continue
- packages.add(current)
- newdeps = set(deps).difference(examined)
- if debug and newdeps:
- self.logger.debug("Packages: Package %s added "
- "requirements %s" % (current, newdeps))
- unclassified.update(newdeps)
-
- satisfied_vpkgs = set()
- for current in vpkgs:
- # virtual dependencies, satisfied if one of N in the
- # config, or can be forced if only one provider
- if len(vpkg_cache[current]) == 1:
- if debug:
- self.logger.debug("Packages: requirement %s satisfied "
- "by %s" % (current,
- vpkg_cache[current]))
- unclassified.update(vpkg_cache[current].difference(examined))
- satisfied_vpkgs.add(current)
- elif [item for item in vpkg_cache[current] if item in packages]:
- if debug:
- self.logger.debug("Packages: requirement %s satisfied "
- "by %s" %
- (current,
- [item for item in vpkg_cache[current]
- if item in packages]))
- satisfied_vpkgs.add(current)
- vpkgs.difference_update(satisfied_vpkgs)
-
- satisfied_both = set()
- for current in both:
- # packages that are both have virtual providers as
- # well as a package with that name. allow use of virt
- # through explicit specification, then fall back to
- # forcing current on last pass
- if [item for item in vpkg_cache[current] if item in packages]:
- if debug:
- self.logger.debug("Packages: requirement %s satisfied "
- "by %s" %
- (current,
- [item for item in vpkg_cache[current]
- if item in packages]))
- satisfied_both.add(current)
- elif current in input_requirements or final_pass:
- pkgs.add(current)
- satisfied_both.add(current)
- both.difference_update(satisfied_both)
-
- if len(unclassified) + len(pkgs) == 0:
- final_pass = True
- else:
- final_pass = False
-
- for source in sources:
- source.filter_unknown(unknown)
-
- return packages, unknown, ptype
-
- def validate_structures(self, metadata, structures):
- '''Ensure client configurations include all needed prerequisites
-
- Arguments:
- metadata - client metadata instance
- structures - a list of structure-stage entry combinations
- '''
- indep = lxml.etree.Element('Independent')
- self._build_packages(metadata, indep, structures)
- self._build_gpgkeys(metadata, indep)
- self._build_pulp_entries(metadata, indep)
- structures.append(indep)
-
- def _build_pulp_entries(self, metadata, independent):
- """ build list of Pulp actions that need to be included in the
- specification by validate_structures() """
- if not has_pulp:
- return
-
- # if there are no Pulp sources for this host, we don't need to
- # worry about registering it
- build_actions = False
- for source in self.get_matching_sources(metadata):
- if isinstance(source, PulpSource):
- build_actions = True
- break
-
- if not build_actions:
- self.logger.debug("No Pulp sources apply to %s, skipping Pulp "
- "registration" % metadata.hostname)
- return
-
- consumerapi = pulp.client.api.consumer.ConsumerAPI()
- try:
- consumer = consumerapi.consumer(metadata.hostname)
- except pulp.client.server.ServerRequestError:
- try:
- reguser = self.config.get("pulp", "reguser")
- regpass = self.config.get("pulp", "regpass")
- reg_cmd = ("pulp-client -u '%s' -p '%s' consumer create "
- "--id='%s'" % (reguser, regpass, metadata.hostname))
- lxml.etree.SubElement(independent, "BoundAction",
- name="pulp-register", timing="pre",
- when="always", status="check",
- command=reg_cmd)
- except ConfigParser.NoOptionError:
- err = sys.exc_info()[1]
- self.logger.error("Required option not found in "
- "Packages/packages.conf: %s. Pulp consumers "
- "will not be registered" % err)
- return
-
- for source in self.get_matching_sources(metadata):
- # each pulp source can only have one arch, so we don't
- # have to check the arch in url_map
- if source.id not in consumer['repoids']:
- bind_cmd = "pulp-client consumer bind --repoid=%s" % source.id
- lxml.etree.SubElement(independent, "BoundAction",
- name="pulp-bind-%s" % source.id,
- timing="pre", when="always",
- status="check", command=bind_cmd)
-
- def _build_packages(self, metadata, independent, structures):
- """ build list of packages that need to be included in the
- specification by validate_structures() """
- if self.disableResolver:
- # Config requests no resolver
- return
-
- initial = set([pkg.get('name')
- for struct in structures
- for pkg in struct.findall('Package') + \
- struct.findall('BoundPackage')])
- packages, unknown, ptype = self.complete(metadata, initial,
- debug=self.debug_flag)
- if unknown:
- self.logger.info("Got unknown entries")
- self.logger.info(list(unknown))
- newpkgs = list(packages.difference(initial))
- newpkgs.sort()
- for pkg in newpkgs:
- lxml.etree.SubElement(independent, 'BoundPackage', name=pkg,
- type=ptype, version='auto', origin='Packages')
-
- def _build_gpgkeys(self, metadata, independent):
- """ build list of gpg keys to be added to the specification by
- validate_structures() """
- needkeys = set()
- for source in self.get_matching_sources(metadata):
- for key in source.gpgkeys:
- needkeys.add(key)
-
- if len(needkeys):
- keypkg = lxml.etree.Element('BoundPackage', name="gpg-pubkey",
- type=self.get_ptype(metadata),
- origin='Packages')
-
- for key in needkeys:
- # figure out the path of the key on the client
- try:
- keydir = self.config.get("global", "gpg_keypath")
- except ConfigParser.NoOptionError:
- keydir = "/etc/pki/rpm-gpg"
- except ConfigParser.NoSectionError:
- keydir = "/etc/pki/rpm-gpg"
- remotekey = os.path.join(keydir, os.path.basename(key))
- localkey = os.path.join(self.keypath, os.path.basename(key))
- kdata = open(localkey).read()
-
- # copy the key to the client
- keypath = lxml.etree.Element("BoundPath", name=remotekey,
- encoding='ascii',
- owner='root', group='root',
- type='file', perms='0644',
- important='true')
- keypath.text = kdata
- independent.append(keypath)
-
- if has_yum:
- # add the key to the specification to ensure it
- # gets installed
- try:
- kinfo = yum.misc.getgpgkeyinfo(kdata)
- version = yum.misc.keyIdToRPMVer(kinfo['keyid'])
- release = yum.misc.keyIdToRPMVer(kinfo['timestamp'])
-
- lxml.etree.SubElement(keypkg, 'Instance',
- version=version,
- release=release,
- simplefile=remotekey)
- except ValueError:
- err = sys.exc_info()[1]
- self.logger.error("Could not read GPG key %s: %s" %
- (localkey, err))
- else:
- self.logger.info("Yum libraries not found; GPG keys will "
- "not be handled automatically")
- independent.append(keypkg)
-
- def Refresh(self):
- '''Packages.Refresh() => True|False\nReload configuration
- specification and download sources\n'''
- self._load_config(force_update=True)
- return True
-
- def Reload(self):
- '''Packages.Refresh() => True|False\nReload configuration
- specification and sources\n'''
- self._load_config()
- return True
-
- def _load_config(self, force_update=False):
- '''
- Load the configuration data and setup sources
-
- Keyword args:
- force_update Force downloading repo data
- '''
- self._load_sources(force_update)
- self._load_gpg_keys(force_update)
-
- def _load_sources(self, force_update):
- """ Load sources from the config """
- self.virt_pkgs = dict()
- self.sentinels = set()
-
- cachefiles = []
- for source in self.sources:
- cachefiles.append(source.cachefile)
- if not self.disableMetaData:
- source.setup_data(force_update)
- self.sentinels.update(source.basegroups)
-
- for cfile in glob.glob(os.path.join(self.cachepath, "cache-*")):
- if cfile not in cachefiles:
- os.unlink(cfile)
-
- def _load_gpg_keys(self, force_update):
- """ Load gpg keys from the config """
- keyfiles = []
- for source in self.sources:
- for key in source.gpgkeys:
- localfile = os.path.join(self.keypath, os.path.basename(key))
- if localfile not in keyfiles:
- keyfiles.append(localfile)
- if force_update or not os.path.exists(localfile):
- logger.debug("Downloading and parsing %s" % key)
- response = urlopen(key)
- open(localfile, 'w').write(response.read())
-
- for kfile in glob.glob(os.path.join(self.keypath, "*")):
- if kfile not in keyfiles:
- os.unlink(kfile)
-
- def get_additional_data(self, meta):
- sdata = []
- [sdata.extend(copy.deepcopy(src.url_map))
- for src in self.get_matching_sources(meta)]
- return dict(sources=sdata)
diff --git a/src/lib/Server/Plugins/Packages/Apt.py b/src/lib/Server/Plugins/Packages/Apt.py
new file mode 100644
index 000000000..5c80200a4
--- /dev/null
+++ b/src/lib/Server/Plugins/Packages/Apt.py
@@ -0,0 +1,142 @@
+import re
+import gzip
+import logging
+from Bcfg2.Server.Plugins.Packages.Collection import Collection
+from Bcfg2.Server.Plugins.Packages.Source import Source
+from Bcfg2.Bcfg2Py3k import cPickle, file
+
+logger = logging.getLogger("Packages")
+
+class AptCollection(Collection):
+ def get_group(self, group):
+ self.logger.warning("Package groups are not supported by APT")
+ return []
+
+class AptSource(Source):
+ basegroups = ['apt', 'debian', 'ubuntu', 'nexenta']
+ ptype = 'deb'
+
+ def __init__(self, basepath, xsource, config):
+ Source.__init__(self, basepath, xsource, config)
+ self.pkgnames = set()
+
+ self.url_map = [{'rawurl': self.rawurl, 'url': self.url,
+ 'version': self.version,
+ 'components': self.components, 'arches': self.arches}]
+
+ def save_state(self):
+ cache = file(self.cachefile, 'wb')
+ cPickle.dump((self.pkgnames, self.deps, self.provides),
+ cache, 2)
+ cache.close()
+
+ def load_state(self):
+ data = file(self.cachefile)
+ self.pkgnames, self.deps, self.provides = cPickle.load(data)
+
+ def filter_unknown(self, unknown):
+ filtered = set([u for u in unknown if u.startswith('choice')])
+ unknown.difference_update(filtered)
+
+ def get_urls(self):
+ if not self.rawurl:
+ rv = []
+ for part in self.components:
+ for arch in self.arches:
+ rv.append("%sdists/%s/%s/binary-%s/Packages.gz" %
+ (self.url, self.version, part, arch))
+ return rv
+ else:
+ return ["%sPackages.gz" % self.rawurl]
+ urls = property(get_urls)
+
+ def read_files(self):
+ bdeps = dict()
+ bprov = dict()
+ if self.recommended:
+ depfnames = ['Depends', 'Pre-Depends', 'Recommends']
+ else:
+ depfnames = ['Depends', 'Pre-Depends']
+ for fname in self.files:
+ if not self.rawurl:
+ barch = [x
+ for x in fname.split('@')
+ if x.startswith('binary-')][0][7:]
+ else:
+ # RawURL entries assume that they only have one <Arch></Arch>
+ # element and that it is the architecture of the source.
+ barch = self.arches[0]
+ if barch not in bdeps:
+ bdeps[barch] = dict()
+ bprov[barch] = dict()
+ try:
+ reader = gzip.GzipFile(fname)
+ except:
+ print("Failed to read file %s" % fname)
+ raise
+ for line in reader.readlines():
+ words = str(line.strip()).split(':', 1)
+ if words[0] == 'Package':
+ pkgname = words[1].strip().rstrip()
+ self.pkgnames.add(pkgname)
+ bdeps[barch][pkgname] = []
+ elif words[0] in depfnames:
+ vindex = 0
+ for dep in words[1].split(','):
+ if '|' in dep:
+ cdeps = [re.sub('\s+', '',
+ re.sub('\(.*\)', '', cdep))
+ for cdep in dep.split('|')]
+ dyn_dname = "choice-%s-%s-%s" % (pkgname,
+ barch,
+ vindex)
+ vindex += 1
+ bdeps[barch][pkgname].append(dyn_dname)
+ bprov[barch][dyn_dname] = set(cdeps)
+ else:
+ raw_dep = re.sub('\(.*\)', '', dep)
+ raw_dep = raw_dep.rstrip().strip()
+ bdeps[barch][pkgname].append(raw_dep)
+ elif words[0] == 'Provides':
+ for pkg in words[1].split(','):
+ dname = pkg.rstrip().strip()
+ if dname not in bprov[barch]:
+ bprov[barch][dname] = set()
+ bprov[barch][dname].add(pkgname)
+
+ self.deps['global'] = dict()
+ self.provides['global'] = dict()
+ for barch in bdeps:
+ self.deps[barch] = dict()
+ self.provides[barch] = dict()
+ for pkgname in self.pkgnames:
+ pset = set()
+ for barch in bdeps:
+ if pkgname not in bdeps[barch]:
+ bdeps[barch][pkgname] = []
+ pset.add(tuple(bdeps[barch][pkgname]))
+ if len(pset) == 1:
+ self.deps['global'][pkgname] = pset.pop()
+ else:
+ for barch in bdeps:
+ self.deps[barch][pkgname] = bdeps[barch][pkgname]
+ provided = set()
+ for bprovided in list(bprov.values()):
+ provided.update(set(bprovided))
+ for prov in provided:
+ prset = set()
+ for barch in bprov:
+ if prov not in bprov[barch]:
+ continue
+ prset.add(tuple(bprov[barch].get(prov, ())))
+ if len(prset) == 1:
+ self.provides['global'][prov] = prset.pop()
+ else:
+ for barch in bprov:
+ self.provides[barch][prov] = bprov[barch].get(prov, ())
+ self.save_state()
+
+ def is_package(self, _, pkg):
+ return (pkg in self.pkgnames and
+ pkg not in self.blacklist and
+ (len(self.whitelist) == 0 or pkg in self.whitelist))
diff --git a/src/lib/Server/Plugins/Packages/Collection.py b/src/lib/Server/Plugins/Packages/Collection.py
new file mode 100644
index 000000000..aed85fe77
--- /dev/null
+++ b/src/lib/Server/Plugins/Packages/Collection.py
@@ -0,0 +1,336 @@
+import copy
+import logging
+
+try:
+ from hashlib import md5
+except ImportError:
+ import md5
+
+logger = logging.getLogger("Packages")
+
+collections = dict()
+
+class Collection(object):
+ def __init__(self, metadata, sources, basepath):
+ """ don't call this directly; use the Factory method """
+ self.metadata = metadata
+ self.sources = sources
+ self.logger = logging.getLogger("Packages")
+ self.basepath = basepath
+ self.virt_pkgs = dict()
+
+ try:
+ self.config = sources[0].config
+ self.cachepath = sources[0].basepath
+ self.ptype = sources[0].ptype
+ except IndexError:
+ self.config = None
+ self.cachepath = None
+ self.ptype = "unknown"
+
+ self.cachefile = None
+
+ @property
+ def cachekey(self):
+ return md5(self.get_config()).hexdigest()
+
+ def get_config(self):
+ self.logger.error("Cannot generate config for host with multiple "
+ "source types (%s)" % self.metadata.hostname)
+ return ""
+
+ def get_relevant_groups(self):
+ groups = []
+ for source in self.sources:
+ groups.extend(source.get_relevant_groups(self.metadata))
+ return sorted(list(set(groups)))
+
+ @property
+ def basegroups(self):
+ groups = set()
+ for source in self.sources:
+ groups.update(source.basegroups)
+ return list(groups)
+
+ @property
+ def cachefiles(self):
+ cachefiles = set([self.cachefile])
+ for source in self.sources:
+ cachefiles.add(source.cachefile)
+ return list(cachefiles)
+
+ def get_group(self, group):
+ for source in self.sources:
+ pkgs = source.get_group(self.metadata, group)
+ if pkgs:
+ return pkgs
+ self.logger.warning("'%s' is not a valid group" % group)
+ return []
+
+ def is_package(self, package):
+ for source in self.sources:
+ if source.is_package(self.metadata, package):
+ return True
+ return False
+
+ def is_virtual_package(self, package):
+ for source in self.sources:
+ if source.is_virtual_package(self.metadata, package):
+ return True
+ return False
+
+ def get_deps(self, package):
+ for source in self.sources:
+ if source.is_package(self.metadata, package):
+ return source.get_deps(self.metadata, package)
+ return []
+
+ def get_provides(self, package):
+ for source in self.sources:
+ providers = source.get_provides(self.metadata, package)
+ if providers:
+ return providers
+ return []
+
+ def get_vpkgs(self):
+ """ get virtual packages """
+ vpkgs = dict()
+ for source in self.sources:
+ s_vpkgs = source.get_vpkgs(self.metadata)
+ for name, prov_set in list(s_vpkgs.items()):
+ if name not in vpkgs:
+ vpkgs[name] = set(prov_set)
+ else:
+ vpkgs[name].update(prov_set)
+ return vpkgs
+
+ def filter_unknown(self, unknown):
+ for source in self.sources:
+ source.filter_unknown(unknown)
+
+ def magic_groups_match(self):
+ for source in self.sources:
+ if source.magic_groups_match(self.metadata):
+ return True
+
+ def build_extra_structures(self, independent):
+ pass
+
+ def get_additional_data(self):
+ sdata = []
+ for source in self.sources:
+ sdata.extend(copy.deepcopy(source.url_map))
+ return sdata
+
+ def setup_data(self, force_update=False):
+ """ do any collection-level data setup tasks """
+ for source in self.sources:
+ source.setup_data(force_update)
+
+ def complete(self, packagelist):
+ '''Build the transitive closure of all package dependencies
+
+ Arguments:
+ packageslist - set of package names
+ returns => (set(packages), set(unsatisfied requirements))
+ '''
+
+ # setup vpkg cache
+ pgrps = tuple(self.get_relevant_groups())
+ if pgrps not in self.virt_pkgs:
+ self.virt_pkgs[pgrps] = self.get_vpkgs()
+ vpkg_cache = self.virt_pkgs[pgrps]
+
+ # unclassified is set of unsatisfied requirements (may be pkg
+ # for vpkg)
+ unclassified = set(packagelist)
+ vpkgs = set()
+ both = set()
+ pkgs = set(packagelist)
+
+ packages = set()
+ examined = set()
+ unknown = set()
+
+ final_pass = False
+ really_done = False
+ # do while unclassified or vpkgs or both or pkgs
+ while unclassified or pkgs or both or final_pass:
+ if really_done:
+ break
+ if len(unclassified) + len(pkgs) + len(both) == 0:
+ # one more pass then exit
+ really_done = True
+
+ while unclassified:
+ current = unclassified.pop()
+ examined.add(current)
+ is_pkg = False
+ if self.is_package(current):
+ is_pkg = True
+
+ is_vpkg = current in vpkg_cache
+
+ if is_pkg and is_vpkg:
+ both.add(current)
+ elif is_pkg and not is_vpkg:
+ pkgs.add(current)
+ elif is_vpkg and not is_pkg:
+ vpkgs.add(current)
+ elif not is_vpkg and not is_pkg:
+ unknown.add(current)
+
+ while pkgs:
+ # direct packages; current can be added, and all deps
+ # should be resolved
+ current = pkgs.pop()
+ self.logger.debug("Packages: handling package requirement %s" %
+ current)
+ packages.add(current)
+ deps = self.get_deps(current)
+ newdeps = set(deps).difference(examined)
+ if newdeps:
+ self.logger.debug("Packages: Package %s added "
+ "requirements %s" % (current, newdeps))
+ unclassified.update(newdeps)
+
+ satisfied_vpkgs = set()
+ for current in vpkgs:
+ # virtual dependencies, satisfied if one of N in the
+ # config, or can be forced if only one provider
+ if len(vpkg_cache[current]) == 1:
+ self.logger.debug("Packages: requirement %s satisfied by "
+ "%s" % (current,
+ vpkg_cache[current]))
+ unclassified.update(vpkg_cache[current].difference(examined))
+ satisfied_vpkgs.add(current)
+ else:
+ satisfiers = [item for item in vpkg_cache[current]
+ if item in packages]
+ self.logger.debug("Packages: requirement %s satisfied by "
+ "%s" % (current, satisfiers))
+ satisfied_vpkgs.add(current)
+ vpkgs.difference_update(satisfied_vpkgs)
+
+ satisfied_both = set()
+ for current in both:
+ # packages that are both have virtual providers as
+ # well as a package with that name. allow use of virt
+ # through explicit specification, then fall back to
+ # forcing current on last pass
+ satisfiers = [item for item in vpkg_cache[current]
+ if item in packages]
+ if satisfiers:
+ self.logger.debug("Packages: requirement %s satisfied by "
+ "%s" % (current, satisfiers))
+ satisfied_both.add(current)
+ elif current in packagelist or final_pass:
+ pkgs.add(current)
+ satisfied_both.add(current)
+ both.difference_update(satisfied_both)
+
+ if len(unclassified) + len(pkgs) == 0:
+ final_pass = True
+ else:
+ final_pass = False
+
+ self.filter_unknown(unknown)
+
+ return packages, unknown
+
+ def __len__(self):
+ return len(self.sources)
+
+ def __getitem__(self, item):
+ return self.sources[item]
+
+ def __setitem__(self, item, value):
+ self.sources[item] = value
+
+ def __delitem__(self, item):
+ del self.sources[item]
+
+ def append(self, item):
+ self.sources.append(item)
+
+ def count(self):
+ return self.sources.count()
+
+ def index(self, item):
+ return self.sources.index(item)
+
+ def extend(self, items):
+ self.sources.extend(items)
+
+ def insert(self, index, item):
+ self.sources.insert(index, item)
+
+ def pop(self, index=None):
+ self.sources.pop(index)
+
+ def remove(self, item):
+ self.sources.remove(item)
+
+ def reverse(self):
+ self.sources.reverse()
+
+ def sort(self, cmp=None, key=None, reverse=False):
+ self.sources.sort(cmp, key, reverse)
+
+def clear_cache():
+ global collections
+ collections = dict()
+
+def factory(metadata, sources, basepath):
+ global collections
+
+ if not sources.loaded:
+ # if sources.xml has not received a FAM event yet, defer;
+ # instantiate a dummy Collection object, but do not cache it
+ # in collections
+ return Collection(metadata, [], basepath)
+
+ sclasses = set()
+ relevant = list()
+
+ for source in sources:
+ if source.applies(metadata):
+ relevant.append(source)
+ sclasses.update([source.__class__])
+
+ # collections is a cache dict of Collection objects that is keyed
+ # off of the set of source urls that apply to each Collection
+ ckeydata = set()
+ for source in relevant:
+ ckeydata.update(source.urls)
+ ckey = tuple(sorted(list(ckeydata)))
+ if ckey not in collections:
+ if len(sclasses) > 1:
+ logger.warning("Multiple source types found for %s: %s" %
+ ",".join([s.__name__ for s in sclasses]))
+ cclass = Collection
+ elif len(sclasses) == 0:
+ logger.warning("No sources found for %s" % metadata.hostname)
+ cclass = Collection
+ else:
+ stype = sclasses.pop().__name__.replace("Source", "")
+ try:
+ module = \
+ getattr(__import__("Bcfg2.Server.Plugins.Packages.%s" %
+ stype.title()).Server.Plugins.Packages,
+ stype.title())
+ cclass = getattr(module, "%sCollection" % stype.title())
+ except ImportError:
+ logger.error("Unknown source type %s" % stype)
+ except AttributeError:
+ logger.warning("No collection class found for %s sources" %
+ stype)
+
+ logger.debug("Using %s for Collection of sources for %s" %
+ (cclass.__name__, metadata.hostname))
+
+ collection = cclass(metadata, relevant, basepath)
+ # reverse so that file order determines precedence
+ collection.reverse()
+ collections[ckey] = collection
+ return collections[ckey]
diff --git a/src/lib/Server/Plugins/Packages/Pac.py b/src/lib/Server/Plugins/Packages/Pac.py
new file mode 100644
index 000000000..8b75c1e1d
--- /dev/null
+++ b/src/lib/Server/Plugins/Packages/Pac.py
@@ -0,0 +1,122 @@
+import gzip
+import tarfile
+import logging
+from Bcfg2.Bcfg2Py3k import cPickle, file
+from Bcfg2.Server.Plugins.Packages.Collection import Collection
+from Bcfg2.Server.Plugins.Packages.Source import Source
+
+logger = logging.getLogger("Packages")
+
+class PacCollection(Collection):
+ def get_group(self, group):
+ self.logger.warning("Package groups are not supported by APT")
+ return []
+
+class PacSource(Source):
+ basegroups = ['arch', 'parabola']
+ ptype = 'pacman'
+
+ def __init__(self, basepath, xsource, config):
+ Source.__init__(self, basepath, xsource, config)
+ self.pkgnames = set()
+
+ self.url_map = [{'rawurl': self.rawurl, 'url': self.url,
+ 'version': self.version,
+ 'components': self.components, 'arches': self.arches}]
+
+ def save_state(self):
+ cache = file(self.cachefile, 'wb')
+ cPickle.dump((self.pkgnames, self.deps, self.provides),
+ cache, 2)
+ cache.close()
+
+ def load_state(self):
+ data = file(self.cachefile)
+ self.pkgnames, self.deps, self.provides = cPickle.load(data)
+
+ def filter_unknown(self, unknown):
+ filtered = set([u for u in unknown if u.startswith('choice')])
+ unknown.difference_update(filtered)
+
+ def get_urls(self):
+ if not self.rawurl:
+ rv = []
+ for part in self.components:
+ for arch in self.arches:
+ rv.append("%s%s/os/%s/%s.db.tar.gz" %
+ (self.url, part, arch, part))
+ return rv
+ else:
+ raise Exception("PacSource : RAWUrl not supported (yet)")
+ urls = property(get_urls)
+
+ def read_files(self):
+ bdeps = dict()
+ bprov = dict()
+
+ if self.recommended:
+ depfnames = ['Depends', 'Pre-Depends', 'Recommends']
+ else:
+ depfnames = ['Depends', 'Pre-Depends']
+
+ for fname in self.files:
+ if not self.rawurl:
+ barch = [x for x in fname.split('@') if x in self.arches][0]
+ else:
+ # RawURL entries assume that they only have one <Arch></Arch>
+ # element and that it is the architecture of the source.
+ barch = self.arches[0]
+
+ if barch not in bdeps:
+ bdeps[barch] = dict()
+ bprov[barch] = dict()
+ try:
+ print("try to read : " + fname)
+ tar = tarfile.open(fname, "r")
+ reader = gzip.GzipFile(fname)
+ except:
+ print("Failed to read file %s" % fname)
+ raise
+
+ for tarinfo in tar:
+ if tarinfo.isdir():
+ self.pkgnames.add(tarinfo.name.rsplit("-", 2)[0])
+ print("added : " + tarinfo.name.rsplit("-", 2)[0])
+ tar.close()
+
+ self.deps['global'] = dict()
+ self.provides['global'] = dict()
+ for barch in bdeps:
+ self.deps[barch] = dict()
+ self.provides[barch] = dict()
+ for pkgname in self.pkgnames:
+ pset = set()
+ for barch in bdeps:
+ if pkgname not in bdeps[barch]:
+ bdeps[barch][pkgname] = []
+ pset.add(tuple(bdeps[barch][pkgname]))
+ if len(pset) == 1:
+ self.deps['global'][pkgname] = pset.pop()
+ else:
+ for barch in bdeps:
+ self.deps[barch][pkgname] = bdeps[barch][pkgname]
+ provided = set()
+ for bprovided in list(bprov.values()):
+ provided.update(set(bprovided))
+ for prov in provided:
+ prset = set()
+ for barch in bprov:
+ if prov not in bprov[barch]:
+ continue
+ prset.add(tuple(bprov[barch].get(prov, ())))
+ if len(prset) == 1:
+ self.provides['global'][prov] = prset.pop()
+ else:
+ for barch in bprov:
+ self.provides[barch][prov] = bprov[barch].get(prov, ())
+ self.save_state()
+
+ def is_package(self, _, pkg):
+ return (pkg in self.pkgnames and
+ pkg not in self.blacklist and
+ (len(self.whitelist) == 0 or pkg in self.whitelist))
diff --git a/src/lib/Server/Plugins/Packages/PackagesConfig.py b/src/lib/Server/Plugins/Packages/PackagesConfig.py
new file mode 100644
index 000000000..1bb250007
--- /dev/null
+++ b/src/lib/Server/Plugins/Packages/PackagesConfig.py
@@ -0,0 +1,28 @@
+import os
+import logging
+from Bcfg2.Bcfg2Py3k import ConfigParser
+from Bcfg2.Server.Plugins.Packages import *
+
+logger = logging.getLogger('Packages')
+
+class PackagesConfig(Bcfg2.Server.Plugin.FileBacked,
+ ConfigParser.SafeConfigParser):
+ def __init__(self, filename, fam, packages):
+ Bcfg2.Server.Plugin.FileBacked.__init__(self, filename)
+ ConfigParser.SafeConfigParser.__init__(self)
+
+ self.fam = fam
+ # packages.conf isn't strictly necessary, so only set a
+ # monitor if it exists. if it gets added, that will require a
+ # server restart
+ if os.path.exists(self.name):
+ self.fam.AddMonitor(self.name, self)
+
+ self.pkg_obj = packages
+
+ def Index(self):
+ """ Build local data structures """
+ for section in self.sections():
+ self.remove_section(section)
+ self.read(self.name)
+ self.pkg_obj.Reload()
diff --git a/src/lib/Server/Plugins/Packages/PackagesSources.py b/src/lib/Server/Plugins/Packages/PackagesSources.py
new file mode 100644
index 000000000..5f82deb1f
--- /dev/null
+++ b/src/lib/Server/Plugins/Packages/PackagesSources.py
@@ -0,0 +1,66 @@
+import os
+import sys
+import lxml.etree
+import logging
+import Bcfg2.Server.Plugin
+
+logger = logging.getLogger("Packages")
+
+
+class PackagesSources(Bcfg2.Server.Plugin.SingleXMLFileBacked,
+ Bcfg2.Server.Plugin.StructFile):
+ __identifier__ = None
+
+ def __init__(self, filename, cachepath, fam, packages, config):
+ try:
+ Bcfg2.Server.Plugin.SingleXMLFileBacked.__init__(self,
+ filename,
+ fam)
+ except OSError:
+ err = sys.exc_info()[1]
+ msg = "Packages: Failed to read configuration file: %s" % err
+ if not os.path.exists(self.name):
+ msg += " Have you created it?"
+ logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginInitError(msg)
+ Bcfg2.Server.Plugin.StructFile.__init__(self, filename)
+ self.cachepath = cachepath
+ self.config = config
+ if not os.path.exists(self.cachepath):
+ # create cache directory if needed
+ os.makedirs(self.cachepath)
+ self.pkg_obj = packages
+ self.loaded = False
+
+ def Index(self):
+ Bcfg2.Server.Plugin.SingleXMLFileBacked.Index(self)
+ self.entries = []
+ for xsource in self.xdata.findall('.//Source'):
+ source = self.source_from_xml(xsource)
+ if source is not None:
+ self.entries.append(source)
+
+ self.pkg_obj.Reload()
+ self.loaded = True
+
+ def source_from_xml(self, xsource):
+ """ create a *Source object from its XML representation in
+ sources.xml """
+ stype = xsource.get("type")
+ if stype is None:
+ logger.error("No type specified for source, skipping")
+ return None
+
+ try:
+ module = getattr(__import__("Bcfg2.Server.Plugins.Packages.%s" %
+ stype.title()).Server.Plugins.Packages,
+ stype.title())
+ cls = getattr(module, "%sSource" % stype.title())
+ except (ImportError, AttributeError):
+ logger.error("Unknown source type %s" % stype)
+ return None
+
+ return cls(self.cachepath, xsource, self.config)
+
+ def __getitem__(self, key):
+ return self.entries[key]
diff --git a/src/lib/Server/Plugins/Packages/Source.py b/src/lib/Server/Plugins/Packages/Source.py
new file mode 100644
index 000000000..255f3ea7a
--- /dev/null
+++ b/src/lib/Server/Plugins/Packages/Source.py
@@ -0,0 +1,262 @@
+import os
+import re
+import sys
+import base64
+import logging
+from Bcfg2.Bcfg2Py3k import HTTPError, HTTPBasicAuthHandler, \
+ HTTPPasswordMgrWithDefaultRealm, install_opener, build_opener, \
+ urlopen, file, cPickle
+
+try:
+ from hashlib import md5
+except ImportError:
+ import md5
+
+logger = logging.getLogger('Packages')
+
+def fetch_url(url):
+ if '@' in url:
+ mobj = re.match('(\w+://)([^:]+):([^@]+)@(.*)$', url)
+ if not mobj:
+ raise ValueError
+ user = mobj.group(2)
+ passwd = mobj.group(3)
+ url = mobj.group(1) + mobj.group(4)
+ auth = HTTPBasicAuthHandler(HTTPPasswordMgrWithDefaultRealm())
+ auth.add_password(None, url, user, passwd)
+ install_opener(build_opener(auth))
+ return urlopen(url).read()
+
+
+class Source(object):
+ reponame_re = re.compile(r'.*/(?:RPMS\.)?([^/]+)')
+ basegroups = []
+
+ def __init__(self, basepath, xsource, config):
+ self.basepath = basepath
+ self.xsource = xsource
+ self.config = config
+
+ try:
+ self.version = xsource.find('Version').text
+ except AttributeError:
+ pass
+
+ for key, tag in [('components', 'Component'), ('arches', 'Arch'),
+ ('blacklist', 'Blacklist'),
+ ('whitelist', 'Whitelist')]:
+ self.__dict__[key] = [item.text for item in xsource.findall(tag)]
+
+ self.gpgkeys = [el.text for el in xsource.findall("GPGKey")]
+
+ self.recommended = xsource.get('recommended', 'false').lower() == 'true'
+
+ self.rawurl = xsource.get('rawurl', '')
+ if self.rawurl and not self.rawurl.endswith("/"):
+ self.rawurl += "/"
+ self.url = xsource.get('url', '')
+ if self.url and not self.url.endswith("/"):
+ self.url += "/"
+ self.version = xsource.get('version', '')
+
+ # build the set of conditions to see if this source applies to
+ # a given set of metadata
+ self.conditions = []
+ self.groups = [] # provided for some limited backwards compat
+ for el in xsource.iterancestors():
+ if el.tag == "Group":
+ if el.get("negate", "false").lower() == "true":
+ self.conditions.append(lambda m, el=el:
+ el.get("name") not in m.groups)
+ else:
+ self.groups.append(el.get("name"))
+ self.conditions.append(lambda m, el=el:
+ el.get("name") in m.groups)
+ elif el.tag == "Client":
+ if el.get("negate", "false").lower() == "true":
+ self.conditions.append(lambda m, el=el:
+ el.get("name") != m.hostname)
+ else:
+ self.conditions.append(lambda m, el=el:
+ el.get("name") == m.hostname)
+
+ self.deps = dict()
+ self.provides = dict()
+
+ self.cachefile = os.path.join(self.basepath,
+ "cache-%s" % self.cachekey)
+ self.url_map = []
+
+ @property
+ def cachekey(self):
+ return md5(cPickle.dumps([self.version, self.components, self.url,
+ self.rawurl, self.arches])).hexdigest()
+
+ def get_relevant_groups(self, metadata):
+ return sorted(list(set([g for g in metadata.groups
+ if (g in self.basegroups or
+ g in self.groups or
+ g in self.arches)])))
+
+ def load_state(self):
+ pass
+
+ def setup_data(self, force_update=False):
+ should_read = True
+ should_download = False
+ if os.path.exists(self.cachefile):
+ try:
+ self.load_state()
+ should_read = False
+ except:
+ logger.error("Cachefile %s load failed; "
+ "falling back to file read" % self.cachefile)
+ if should_read:
+ try:
+ self.read_files()
+ except:
+ logger.error("Packages: File read failed; "
+ "falling back to file download")
+ should_download = True
+
+ if should_download or force_update:
+ try:
+ self.update()
+ self.read_files()
+ except:
+ logger.error("Failed to update source", exc_info=1)
+
+ def get_repo_name(self, url_map):
+ # try to find a sensible name for a repo
+ match = self.reponame_re.search(url_map['url'])
+ if url_map['component']:
+ return url_map['component']
+ elif match:
+ return match.group(1)
+ else:
+ # couldn't figure out the name from the URL or URL map
+ # (which probably means its a screwy URL), so we just
+ # generate a random one
+ name = base64.b64encode(os.urandom(16))[:-2]
+ return "%s-%s" % (self.groups[0], name)
+
+ def __str__(self):
+ if self.rawurl:
+ return "%s at %s" % (self.__class__.__name__, self.rawurl)
+ elif self.url:
+ return "%s at %s" % (self.__class__.__name__, self.url)
+ else:
+ return self.__class__.__name__
+
+ def get_urls(self):
+ return []
+ urls = property(get_urls)
+
+ def get_files(self):
+ return [self.escape_url(url) for url in self.urls]
+ files = property(get_files)
+
+ def get_vpkgs(self, metadata):
+ agroups = ['global'] + [a for a in self.arches
+ if a in metadata.groups]
+ vdict = dict()
+ for agrp in agroups:
+ for key, value in list(self.provides[agrp].items()):
+ if key not in vdict:
+ vdict[key] = set(value)
+ else:
+ vdict[key].update(value)
+ return vdict
+
+ def is_virtual_package(self, metadata, package):
+ """ called to determine if a package is a virtual package.
+ this is only invoked if the package is not listed in the dict
+ returned by get_vpkgs """
+ return False
+
+ def escape_url(self, url):
+ return os.path.join(self.basepath, url.replace('/', '@'))
+
+ def file_init(self):
+ pass
+
+ def read_files(self):
+ pass
+
+ def filter_unknown(self, unknown):
+ pass
+
+ def update(self):
+ for url in self.urls:
+ logger.info("Packages: Updating %s" % url)
+ fname = self.escape_url(url)
+ try:
+ data = fetch_url(url)
+ except ValueError:
+ logger.error("Packages: Bad url string %s" % url)
+ continue
+ except HTTPError:
+ err = sys.exc_info()[1]
+ logger.error("Packages: Failed to fetch url %s. code=%s" %
+ (url, err.code))
+ continue
+ file(fname, 'w').write(data)
+
+ def applies(self, metadata):
+ # check base groups
+ if not self.magic_groups_match(metadata):
+ return False
+
+ # check Group/Client tags from sources.xml
+ for condition in self.conditions:
+ if not condition(metadata):
+ return False
+
+ return True
+
+ def get_arches(self, metadata):
+ return ['global'] + [a for a in self.arches if a in metadata.groups]
+
+ def get_deps(self, metadata, pkgname):
+ for arch in self.get_arches(metadata):
+ if pkgname in self.deps[arch]:
+ return self.deps[arch][pkgname]
+ return []
+
+ def get_provides(self, metadata, required):
+ for arch in self.get_arches(metadata):
+ if required in self.provides[arch]:
+ return self.provides[arch][required]
+ return []
+
+ def is_package(self, metadata, _):
+ return False
+
+ def get_package(self, metadata, package):
+ return package
+
+ def get_group(self, metadata, package):
+ return []
+
+ def magic_groups_match(self, metadata):
+ """ check to see if this source applies to the given host
+ metadata by checking 'magic' (base) groups only, or if magic
+ groups are off """
+ # we always check that arch matches
+ found_arch = False
+ for arch in self.arches:
+ if arch in metadata.groups:
+ found_arch = True
+ break
+ if not found_arch:
+ return False
+
+ if (self.config.has_section("global") and
+ self.config.has_option("global", "magic_groups") and
+ self.config.getboolean("global", "magic_groups") == False):
+ return True
+ else:
+ for group in self.basegroups:
+ if group in metadata.groups:
+ return True
+ return False
diff --git a/src/lib/Server/Plugins/Packages/Yum.py b/src/lib/Server/Plugins/Packages/Yum.py
new file mode 100644
index 000000000..fa0dc527e
--- /dev/null
+++ b/src/lib/Server/Plugins/Packages/Yum.py
@@ -0,0 +1,950 @@
+import os
+import sys
+import time
+import copy
+import glob
+import socket
+import random
+import logging
+import threading
+import lxml.etree
+from UserDict import DictMixin
+import Bcfg2.Server.Plugin
+from Bcfg2.Bcfg2Py3k import StringIO, cPickle, HTTPError, ConfigParser, file
+from Bcfg2.Server.Plugins.Packages.Collection import Collection
+from Bcfg2.Server.Plugins.Packages.Source import Source, fetch_url
+
+logger = logging.getLogger("Packages")
+
+try:
+ from pulp.client.consumer.config import ConsumerConfig
+ from pulp.client.api.repository import RepositoryAPI
+ from pulp.client.api.consumer import ConsumerAPI
+ from pulp.client.api import server
+ has_pulp = True
+except ImportError:
+ has_pulp = False
+
+try:
+ import yum
+ has_yum = True
+except ImportError:
+ has_yum = False
+ logger.info("No yum libraries found; forcing use of internal dependency "
+ "resolver")
+
+XP = '{http://linux.duke.edu/metadata/common}'
+RP = '{http://linux.duke.edu/metadata/rpm}'
+RPO = '{http://linux.duke.edu/metadata/repo}'
+FL = '{http://linux.duke.edu/metadata/filelists}'
+
+PULPSERVER = None
+PULPCONFIG = None
+
+def _setup_pulp(config):
+ global PULPSERVER, PULPCONFIG
+ if not has_pulp:
+ logger.error("Cannot create Pulp collection: Pulp libraries not "
+ "found")
+ raise Bcfg2.Server.Plugin.PluginInitError
+
+ if PULPSERVER is None:
+ try:
+ username = config.get("pulp", "username")
+ password = config.get("pulp", "password")
+ except ConfigParser.NoSectionError:
+ logger.error("No [pulp] section found in Packages/packages.conf")
+ raise Bcfg2.Server.Plugin.PluginInitError
+ except ConfigParser.NoOptionError:
+ err = sys.exc_info()[1]
+ logger.error("Required option not found in "
+ "Packages/packages.conf: %s" % err)
+ raise Bcfg2.Server.Plugin.PluginInitError
+
+ PULPCONFIG = ConsumerConfig()
+ serveropts = PULPCONFIG.server
+
+ PULPSERVER = server.PulpServer(serveropts['host'],
+ int(serveropts['port']),
+ serveropts['scheme'],
+ serveropts['path'])
+ PULPSERVER.set_basic_auth_credentials(username, password)
+ server.set_active_server(PULPSERVER)
+ return PULPSERVER
+
+
+class CacheItem(object):
+ def __init__(self, value, expiration=None):
+ self.value = value
+ if expiration:
+ self.expiration = time.time() + expiration
+
+ def expired(self):
+ if self.expiration:
+ return time.time() > self.expiration
+ else:
+ return False
+
+
+class Cache(DictMixin):
+ def __init__(self, expiration=None, tidy=None):
+ """ params:
+ - expiration: How many seconds a cache entry stays alive for.
+ Specify None for no expiration.
+ - tidy: How frequently to tidy the cache (remove all expired
+ entries). Without this, entries are only expired as they
+ are accessed. Cache will be tidied once per every <tidy>
+ accesses to cache data; a sensible value might be, e.g.,
+ 10000. Specify 0 to fully tidy the cache every access; this
+ makes the cache much slower, but also smaller in memory.
+ Specify None to never tidy the cache; this makes the cache
+ faster, but potentially much larger in memory, especially if
+ cache items are accessed infrequently."""
+ self.cache = dict()
+ self.expiration = expiration
+ self.tidy = tidy
+ self.access_count = 0
+
+ def __getitem__(self, key):
+ self._expire(key)
+ if key in self.cache:
+ return self.cache[key].value
+ else:
+ raise KeyError(key)
+
+ def __setitem__(self, key, value):
+ self.cache[key] = CacheItem(value, self.expiration)
+
+ def __delitem__(self, key):
+ del self.cache[key]
+
+ def __contains__(self, key):
+ self.expire(key)
+ return key in self.cache
+
+ def keys(self):
+ return self.cache.keys()
+
+ def __iter__(self):
+ for k in self.cache.keys():
+ try:
+ yield k
+ except KeyError:
+ pass
+
+ def iteritems(self):
+ for k in self:
+ try:
+ yield (k, self[k])
+ except KeyError:
+ pass
+
+ def _expire(self, *args):
+ if args:
+ self.access_count += 1
+ if self.access_count >= self.tidy:
+ self.access_count = 0
+ candidates = self.cache.items()
+ else:
+ candidates = [(k, self.cache[k]) for k in args]
+ else:
+ candidates = self.cache.items()
+
+ expire = []
+ for key, item in candidates:
+ if item.expired():
+ expire.append(key)
+ for key in expire:
+ del self.cache[key]
+
+ def clear(self):
+ self.cache = dict()
+
+
+class YumCollection(Collection):
+ def __init__(self, metadata, sources, basepath):
+ Collection.__init__(self, metadata, sources, basepath)
+ self.keypath = os.path.join(self.basepath, "keys")
+
+ if len(sources):
+ config = sources[0].config
+ self.use_yum = has_yum
+ try:
+ self.use_yum &= config.getboolean("yum", "use_yum_libraries")
+ except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
+ self.use_yum = False
+ else:
+ self.use_yum = False
+
+ if self.use_yum:
+ self._yb = None
+ self.cachefile = os.path.join(self.cachepath,
+ "cache-%s" % self.cachekey)
+ if not os.path.exists(self.cachefile):
+ os.mkdir(self.cachefile)
+
+ self.configdir = os.path.join(self.basepath, "yum")
+ if not os.path.exists(self.configdir):
+ os.mkdir(self.configdir)
+ self.cfgfile = os.path.join(self.configdir,
+ "%s-yum.conf" % self.cachekey)
+ if self.config.has_option("yum", "metadata_expire"):
+ cache_expire = self.config.getint("yum", "metadata_expire")
+ else:
+ cache_expire = 21600
+
+ self.pkgs_cache = Cache(expiration=cache_expire)
+ self.deps_cache = Cache(expiration=cache_expire)
+ self.vpkgs_cache = Cache(expiration=cache_expire)
+ self.group_cache = Cache(expiration=cache_expire)
+ self.pkgset_cache = Cache(expiration=cache_expire)
+
+ if has_pulp:
+ _setup_pulp(self.config)
+
+ @property
+ def yumbase(self):
+ """ if we try to access a Yum SQLitePackageSack object in a
+ different thread from the one it was created in, we get a
+ nasty error. but I can't find a way to detect when a new
+ thread is started (which happens for every new client
+ connection, I think), so this property creates a new YumBase
+ object if the old YumBase object was created in a different
+ thread than the current one. (I definitely don't want to
+ create a new YumBase object every time it's used, because that
+ involves writing a temp file, at least for now.) """
+ if not self.use_yum:
+ self._yb = None
+ self._yb_thread = None
+ elif (self._yb is None or
+ self._yb_thread != threading.current_thread().ident):
+ self._yb = yum.YumBase()
+ self._yb_thread = threading.current_thread().ident
+
+ if not os.path.exists(self.cfgfile):
+ # todo: detect yum version. Supposedly very new
+ # versions of yum have better support for
+ # reconfiguring on the fly using the RepoStorage API
+ yumconf = self.get_config(raw=True)
+ yumconf.add_section("main")
+
+ mainopts = dict(cachedir=self.cachefile,
+ keepcache="0",
+ sslverify="0",
+ reposdir="/dev/null")
+ try:
+ for opt in self.config.options("yum"):
+ if opt != "use_yum_libraries":
+ mainopts[opt] = self.config.get("yum", opt)
+ except ConfigParser.NoSectionError:
+ pass
+
+ for opt, val in list(mainopts.items()):
+ yumconf.set("main", opt, val)
+
+ yumconf.write(open(self.cfgfile, 'w'))
+
+ # it'd be nice if we could change this to be more verbose
+ # if -v was given, but Collection objects don't get setup.
+ # It'd also be nice if we could tell yum to log to syslog,
+ # but so would a unicorn.
+ self._yb.preconf.debuglevel = 1
+ self._yb.preconf.fn = self.cfgfile
+ return self._yb
+
+ def get_config(self, raw=False):
+ config = ConfigParser.SafeConfigParser()
+ for source in self.sources:
+ # get_urls() loads url_map as a side-effect
+ source.get_urls()
+ for url_map in source.url_map:
+ if url_map['arch'] in self.metadata.groups:
+ reponame = source.get_repo_name(url_map)
+ config.add_section(reponame)
+ config.set(reponame, "name", reponame)
+ config.set(reponame, "baseurl", url_map['url'])
+ config.set(reponame, "enabled", "1")
+ if len(source.gpgkeys):
+ config.set(reponame, "gpgcheck", "1")
+ config.set(reponame, "gpgkey",
+ " ".join(source.gpgkeys))
+ else:
+ config.set(reponame, "gpgcheck", "0")
+
+ if len(source.blacklist):
+ config.set(reponame, "exclude",
+ " ".join(source.blacklist))
+ if len(source.whitelist):
+ config.set(reponame, "includepkgs",
+ " ".join(source.whitelist))
+
+ if raw:
+ return config
+ else:
+ # configparser only writes to file, so we have to use a
+ # StringIO object to get the data out as a string
+ buf = StringIO()
+ config.write(buf)
+ return "# This config was generated automatically by the Bcfg2 " \
+ "Packages plugin\n\n" + buf.getvalue()
+
+ def build_extra_structures(self, independent):
+ """ build list of gpg keys to be added to the specification by
+ validate_structures() """
+ needkeys = set()
+ for source in self.sources:
+ for key in source.gpgkeys:
+ needkeys.add(key)
+
+ if len(needkeys):
+ keypkg = lxml.etree.Element('BoundPackage', name="gpg-pubkey",
+ type=self.ptype, origin='Packages')
+
+ for key in needkeys:
+ # figure out the path of the key on the client
+ try:
+ keydir = self.config.get("global", "gpg_keypath")
+ except (ConfigParser.NoOptionError,
+ ConfigParser.NoSectionError):
+ keydir = "/etc/pki/rpm-gpg"
+ remotekey = os.path.join(keydir, os.path.basename(key))
+ localkey = os.path.join(self.keypath, os.path.basename(key))
+ kdata = open(localkey).read()
+
+ # copy the key to the client
+ keypath = lxml.etree.Element("BoundPath", name=remotekey,
+ encoding='ascii',
+ owner='root', group='root',
+ type='file', perms='0644',
+ important='true')
+ keypath.text = kdata
+
+ # hook to add version/release info if possible
+ self._add_gpg_instances(keypkg, kdata, localkey, remotekey)
+ independent.append(keypath)
+ independent.append(keypkg)
+
+ # see if there are any pulp sources to handle
+ has_pulp_sources = False
+ for source in self.sources:
+ if source.pulp_id:
+ has_pulp_sources = True
+ break
+
+ if has_pulp_sources:
+ consumerapi = ConsumerAPI()
+ consumer = self._get_pulp_consumer(consumerapi=consumerapi)
+ if consumer is None:
+ consumer = consumerapi.create(self.metadata.hostname,
+ self.metadata.hostname)
+ lxml.etree.SubElement(independent, "BoundAction",
+ name="pulp-update", timing="pre",
+ when="always", status="check",
+ command="pulp-consumer consumer update")
+
+ for source in self.sources:
+ # each pulp source can only have one arch, so we don't
+ # have to check the arch in url_map
+ if (source.pulp_id and
+ source.pulp_id not in consumer['repoids']):
+ consumerapi.bind(self.metadata.hostname, source.pulp_id)
+
+ crt = lxml.etree.SubElement(independent, "BoundPath",
+ name="/etc/pki/consumer/cert.pem",
+ type="file", owner="root",
+ group="root", perms="0644")
+ crt.text = consumerapi.certificate(self.metadata.hostname)
+
+ def _get_pulp_consumer(self, consumerapi=None):
+ if consumerapi is None:
+ consumerapi = ConsumerAPI()
+ consumer = None
+ try:
+ consumer = consumerapi.consumer(self.metadata.hostname)
+ except server.ServerRequestError:
+ # consumer does not exist
+ pass
+ except socket.error:
+ err = sys.exc_info()[1]
+ logger.error("Could not contact Pulp server: %s" % err)
+ except:
+ err = sys.exc_info()[1]
+ logger.error("Unknown error querying Pulp server: %s" % err)
+ return consumer
+
+ def _add_gpg_instances(self, keyentry, keydata, localkey, remotekey):
+ """ add gpg keys to the specification to ensure they get
+ installed """
+ if self.use_yum:
+ try:
+ kinfo = yum.misc.getgpgkeyinfo(keydata)
+ version = yum.misc.keyIdToRPMVer(kinfo['keyid'])
+ release = yum.misc.keyIdToRPMVer(kinfo['timestamp'])
+
+ lxml.etree.SubElement(keyentry, 'Instance',
+ version=version,
+ release=release,
+ simplefile=remotekey)
+ except ValueError:
+ err = sys.exc_info()[1]
+ self.logger.error("Could not read GPG key %s: %s" %
+ (localkey, err))
+
+ def is_package(self, package):
+ if not self.use_yum:
+ return Collection.is_package(self, package)
+
+ if isinstance(package, tuple):
+ if package[1] is None and package[2] == (None, None, None):
+ package = package[0]
+ else:
+ return None
+
+ try:
+ return self.pkgs_cache[package]
+ except KeyError:
+ pass
+
+ self.pkgs_cache[package] = bool(self.get_package_object(package,
+ silent=True))
+ return self.pkgs_cache[package]
+
+ def is_virtual_package(self, package):
+ if self.use_yum:
+ try:
+ return bool(self.vpkgs_cache[package])
+ except KeyError:
+ return bool(self.get_provides(package, silent=True))
+ else:
+ return Collection.is_virtual_package(self, package)
+
+ def get_package_object(self, package, silent=False):
+ """ package objects cannot be cached since they are sqlite
+ objects, so they can't be reused between threads. """
+ try:
+ matches = self.yumbase.pkgSack.returnNewestByName(name=package)
+ except yum.Errors.PackageSackError:
+ if not silent:
+ self.logger.warning("Packages: Package '%s' not found" %
+ self.get_package_name(package))
+ matches = []
+ except yum.Errors.RepoError:
+ err = sys.exc_info()[1]
+ self.logger.error("Packages: Temporary failure loading metadata "
+ "for '%s': %s" %
+ (self.get_package_name(package), err))
+ matches = []
+
+ pkgs = self._filter_arch(matches)
+ if pkgs:
+ return pkgs[0]
+ else:
+ return None
+
+ def get_deps(self, package):
+ if not self.use_yum:
+ return Collection.get_deps(self, package)
+
+ try:
+ return self.deps_cache[package]
+ except KeyError:
+ pass
+
+ pkg = self.get_package_object(package)
+ deps = []
+ if pkg:
+ deps = set(pkg.requires)
+ # filter out things the package itself provides
+ deps.difference_update([dep for dep in deps
+ if pkg.checkPrco('provides', dep)])
+ else:
+ self.logger.error("Packages: No package available: %s" %
+ self.get_package_name(package))
+ self.deps_cache[package] = deps
+ return self.deps_cache[package]
+
+ def get_provides(self, required, all=False, silent=False):
+ if not self.use_yum:
+ return Collection.get_provides(self, package)
+
+ if not isinstance(required, tuple):
+ required = (required, None, (None, None, None))
+
+ try:
+ return self.vpkgs_cache[required]
+ except KeyError:
+ pass
+
+ try:
+ prov = \
+ self.yumbase.whatProvides(*required).returnNewestByNameArch()
+ except yum.Errors.NoMoreMirrorsRepoError:
+ err = sys.exc_info()[1]
+ self.logger.error("Packages: Temporary failure loading metadata "
+ "for '%s': %s" %
+ (self.get_package_name(required),
+ err))
+ self.vpkgs_cache[required] = None
+ return []
+
+ if prov and not all:
+ prov = self._filter_provides(required, prov)
+ elif not prov and not silent:
+ self.logger.error("Packages: No package provides %s" %
+ self.get_package_name(required))
+ self.vpkgs_cache[required] = prov
+ return self.vpkgs_cache[required]
+
+ def get_group(self, group):
+ if not self.use_yum:
+ self.logger.warning("Package groups are not supported by Bcfg2's "
+ "internal Yum dependency generator")
+ return []
+
+ if group.startswith("@"):
+ group = group[1:]
+
+ try:
+ return self.group_cache[group]
+ except KeyError:
+ pass
+
+ try:
+ if self.yumbase.comps.has_group(group):
+ pkgs = self.yumbase.comps.return_group(group).packages
+ else:
+ self.logger.warning("Packages: '%s' is not a valid group" %
+ group)
+ pkgs = []
+ except yum.Errors.GroupsError:
+ err = sys.exc_info()[1]
+ self.logger.warning("Packages: %s" % err)
+ pkgs = []
+
+ self.group_cache[group] = pkgs
+ return self.group_cache[group]
+
+ def _filter_provides(self, package, providers):
+ providers = [pkg for pkg in self._filter_arch(providers)]
+ if len(providers) > 1:
+ # go through each provider and make sure it's the newest
+ # package of its name available. If we have multiple
+ # providers, avoid installing old packages.
+ #
+ # For instance: on Fedora 14,
+ # perl-Sub-WrapPackages-2.0-2.fc14 erroneously provided
+ # perl(lib), which should not have been provided;
+ # perl(lib) is provided by the "perl" package. The bogus
+ # provide was removed in perl-Sub-WrapPackages-2.0-4.fc14,
+ # but if we just queried to resolve the "perl(lib)"
+ # dependency, we'd get both packages. By performing this
+ # check, we learn that there's a newer
+ # perl-Sub-WrapPackages available, so it can't be the best
+ # provider of perl(lib).
+ rv = []
+ for pkg in providers:
+ if self.get_package_object(pkg.name) == pkg:
+ rv.append(pkg)
+ else:
+ rv = providers
+ return [p.name for p in rv]
+
+ def _filter_arch(self, packages):
+ groups = set(list(self.get_relevant_groups()) + ["noarch"])
+ matching = [pkg for pkg in packages if pkg.arch in groups]
+ if matching:
+ return matching
+ else:
+ # no packages match architecture; we'll assume that the
+ # user knows what s/he is doing and this is a multiarch
+ # box.
+ return packages
+
+ def get_package_name(self, package):
+ """ get the name of a package or virtual package from the
+ internal representation used by this Collection class """
+ if self.use_yum and isinstance(package, tuple):
+ return yum.misc.prco_tuple_to_string(package)
+ else:
+ return str(package)
+
+ def complete(self, packagelist):
+ if not self.use_yum:
+ return Collection.complete(self, packagelist)
+
+ cachekey = cPickle.dumps(sorted(packagelist))
+ try:
+ return self.pkgset_cache[cachekey]
+ except KeyError: pass
+
+ packages = set()
+ pkgs = set(packagelist)
+ requires = set()
+ satisfied = set()
+ unknown = set()
+ final_pass = False
+
+ while requires or pkgs:
+ # infinite loop protection
+ start_reqs = len(requires)
+
+ while pkgs:
+ package = pkgs.pop()
+ if package in packages:
+ continue
+
+ if not self.is_package(package):
+ # try this package out as a requirement
+ requires.add((package, None, (None, None, None)))
+ continue
+
+ packages.add(package)
+ reqs = set(self.get_deps(package)).difference(satisfied)
+ if reqs:
+ requires.update(reqs)
+
+ reqs_satisfied = set()
+ for req in requires:
+ if req in satisfied:
+ reqs_satisfied.add(req)
+ continue
+
+ if req[1] is None and self.is_package(req[0]):
+ if req[0] not in packages:
+ pkgs.add(req[0])
+ reqs_satisfied.add(req)
+ continue
+
+ self.logger.debug("Packages: Handling requirement '%s'" %
+ self.get_package_name(req))
+ providers = list(set(self.get_provides(req)))
+ if len(providers) > 1:
+ # hopefully one of the providing packages is already
+ # included
+ best = [p for p in providers if p in packages]
+ if best:
+ providers = best
+ else:
+ # pick a provider whose name matches the requirement
+ best = [p for p in providers if p == req[0]]
+ if len(best) == 1:
+ providers = best
+ elif not final_pass:
+ # found no "best" package, so defer
+ providers = None
+ # else: found no "best" package, but it's the
+ # final pass, so include them all
+
+ if providers:
+ self.logger.debug("Packages: Requirement '%s' satisfied "
+ "by %s" %
+ (self.get_package_name(req),
+ ",".join([self.get_package_name(p)
+ for p in providers])))
+ newpkgs = set(providers).difference(packages)
+ if newpkgs:
+ for package in newpkgs:
+ if self.is_package(package):
+ pkgs.add(package)
+ else:
+ unknown.add(package)
+ reqs_satisfied.add(req)
+ elif providers is not None:
+ # nothing provided this requirement at all
+ unknown.add(req)
+ reqs_satisfied.add(req)
+ # else, defer
+ requires.difference_update(reqs_satisfied)
+
+ # infinite loop protection
+ if len(requires) == start_reqs and len(pkgs) == 0:
+ final_pass = True
+
+ if final_pass and requires:
+ unknown.update(requires)
+ requires = set()
+
+ self.filter_unknown(unknown)
+ unknown = [self.get_package_name(p) for p in unknown]
+
+ self.pkgset_cache[cachekey] = (packages, unknown)
+
+ return packages, unknown
+
+ def setup_data(self, force_update=False):
+ if not self.use_yum:
+ return Collection.setup_data(self, force_update)
+
+ for cfile in glob.glob(os.path.join(self.configdir, "*-yum.conf")):
+ os.unlink(cfile)
+ self._yb = None
+
+ self.pkgs_cache.clear()
+ self.deps_cache.clear()
+ self.vpkgs_cache.clear()
+ self.group_cache.clear()
+ self.pkgset_cache.clear()
+
+ if force_update:
+ for mdtype in ["Headers", "Packages", "Sqlite", "Metadata",
+ "ExpireCache"]:
+ # for reasons that are entirely obvious, all of the
+ # yum API clean* methods return a tuple of 0 (zero,
+ # always zero) and a list containing a single message
+ # about how many files were deleted. so useful.
+ # thanks, yum.
+ self.logger.info("Packages: %s" %
+ getattr(self.yumbase,
+ "clean%s" % mdtype)()[1][0])
+
+
+class YumSource(Source):
+ basegroups = ['yum', 'redhat', 'centos', 'fedora']
+ ptype = 'yum'
+
+ def __init__(self, basepath, xsource, config):
+ Source.__init__(self, basepath, xsource, config)
+ self.pulp_id = None
+ if has_pulp and xsource.get("pulp_id"):
+ self.pulp_id = xsource.get("pulp_id")
+
+ _setup_pulp(self.config)
+ repoapi = RepositoryAPI()
+ try:
+ self.repo = repoapi.repository(self.pulp_id)
+ self.gpgkeys = ["%s/%s" % (PULPCONFIG.cds['keyurl'], key)
+ for key in repoapi.listkeys(self.pulp_id)]
+ except server.ServerRequestError:
+ err = sys.exc_info()[1]
+ if err[0] == 401:
+ msg = "Error authenticating to Pulp: %s" % err[1]
+ elif err[0] == 404:
+ msg = "Pulp repo id %s not found: %s" % (self.pulp_id,
+ err[1])
+ else:
+ msg = "Error %d fetching pulp repo %s: %s" % (err[0],
+ self.pulp_id,
+ err[1])
+ logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginInitError
+ except socket.error:
+ err = sys.exc_info()[1]
+ logger.error("Could not contact Pulp server: %s" % err)
+ raise Bcfg2.Server.Plugin.PluginInitError
+ except:
+ err = sys.exc_info()[1]
+ logger.error("Unknown error querying Pulp server: %s" % err)
+ raise Bcfg2.Server.Plugin.PluginInitError
+ self.rawurl = "%s/%s" % (PULPCONFIG.cds['baseurl'],
+ self.repo['relative_path'])
+ self.arches = [self.repo['arch']]
+
+ if not self.rawurl:
+ self.baseurl = self.url + "%(version)s/%(component)s/%(arch)s/"
+ else:
+ self.baseurl = self.rawurl
+ self.packages = dict()
+ self.deps = dict([('global', dict())])
+ self.provides = dict([('global', dict())])
+ self.filemap = dict([(x, dict())
+ for x in ['global'] + self.arches])
+ self.needed_paths = set()
+ self.file_to_arch = dict()
+
+ self.use_yum = has_yum
+ try:
+ self.use_yum &= config.getboolean("yum", "use_yum_libraries")
+ except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
+ self.use_yum = False
+
+ def save_state(self):
+ if not self.use_yum:
+ cache = file(self.cachefile, 'wb')
+ cPickle.dump((self.packages, self.deps, self.provides,
+ self.filemap, self.url_map), cache, 2)
+ cache.close()
+
+
+ def load_state(self):
+ if not self.use_yum:
+ data = file(self.cachefile)
+ (self.packages, self.deps, self.provides,
+ self.filemap, self.url_map) = cPickle.load(data)
+
+ def get_urls(self):
+ surls = list()
+ self.url_map = []
+ for arch in self.arches:
+ if self.url:
+ usettings = [{'version':self.version, 'component':comp,
+ 'arch':arch}
+ for comp in self.components]
+ else: # rawurl given
+ usettings = [{'version':self.version, 'component':None,
+ 'arch':arch}]
+
+ for setting in usettings:
+ setting['url'] = self.baseurl % setting
+ self.url_map.append(copy.deepcopy(setting))
+ surls.append((arch, [setting['url'] for setting in usettings]))
+ urls = []
+ for (sarch, surl_list) in surls:
+ for surl in surl_list:
+ urls.extend(self._get_urls_from_repodata(surl, sarch))
+ return urls
+ urls = property(get_urls)
+
+ def _get_urls_from_repodata(self, url, arch):
+ if self.use_yum:
+ return [url]
+
+ rmdurl = '%srepodata/repomd.xml' % url
+ try:
+ repomd = fetch_url(rmdurl)
+ xdata = lxml.etree.XML(repomd)
+ except ValueError:
+ logger.error("Packages: Bad url string %s" % rmdurl)
+ return []
+ except HTTPError:
+ err = sys.exc_info()[1]
+ logger.error("Packages: Failed to fetch url %s. code=%s" %
+ (rmdurl, err.code))
+ return []
+ except lxml.etree.XMLSyntaxError:
+ err = sys.exc_info()[1]
+ logger.error("Packages: Failed to process metadata at %s: %s" %
+ (rmdurl, err))
+ return []
+
+ urls = []
+ for elt in xdata.findall(RPO + 'data'):
+ if elt.get('type') in ['filelists', 'primary']:
+ floc = elt.find(RPO + 'location')
+ fullurl = url + floc.get('href')
+ urls.append(fullurl)
+ self.file_to_arch[self.escape_url(fullurl)] = arch
+ return urls
+
+ def read_files(self):
+ # we have to read primary.xml first, and filelists.xml afterwards;
+ primaries = list()
+ filelists = list()
+ for fname in self.files:
+ if fname.endswith('primary.xml.gz'):
+ primaries.append(fname)
+ elif fname.endswith('filelists.xml.gz'):
+ filelists.append(fname)
+
+ for fname in primaries:
+ farch = self.file_to_arch[fname]
+ fdata = lxml.etree.parse(fname).getroot()
+ self.parse_primary(fdata, farch)
+ for fname in filelists:
+ farch = self.file_to_arch[fname]
+ fdata = lxml.etree.parse(fname).getroot()
+ self.parse_filelist(fdata, farch)
+
+ # merge data
+ sdata = list(self.packages.values())
+ try:
+ self.packages['global'] = copy.deepcopy(sdata.pop())
+ except IndexError:
+ logger.error("No packages in repo")
+ while sdata:
+ self.packages['global'] = \
+ self.packages['global'].intersection(sdata.pop())
+
+ for key in self.packages:
+ if key == 'global':
+ continue
+ self.packages[key] = \
+ self.packages[key].difference(self.packages['global'])
+ self.save_state()
+
+ def parse_filelist(self, data, arch):
+ if arch not in self.filemap:
+ self.filemap[arch] = dict()
+ for pkg in data.findall(FL + 'package'):
+ for fentry in pkg.findall(FL + 'file'):
+ if fentry.text in self.needed_paths:
+ if fentry.text in self.filemap[arch]:
+ self.filemap[arch][fentry.text].add(pkg.get('name'))
+ else:
+ self.filemap[arch][fentry.text] = \
+ set([pkg.get('name')])
+
+ def parse_primary(self, data, arch):
+ if arch not in self.packages:
+ self.packages[arch] = set()
+ if arch not in self.deps:
+ self.deps[arch] = dict()
+ if arch not in self.provides:
+ self.provides[arch] = dict()
+ for pkg in data.getchildren():
+ if not pkg.tag.endswith('package'):
+ continue
+ pkgname = pkg.find(XP + 'name').text
+ self.packages[arch].add(pkgname)
+
+ pdata = pkg.find(XP + 'format')
+ pre = pdata.find(RP + 'requires')
+ self.deps[arch][pkgname] = set()
+ for entry in pre.getchildren():
+ self.deps[arch][pkgname].add(entry.get('name'))
+ if entry.get('name').startswith('/'):
+ self.needed_paths.add(entry.get('name'))
+ pro = pdata.find(RP + 'provides')
+ if pro != None:
+ for entry in pro.getchildren():
+ prov = entry.get('name')
+ if prov not in self.provides[arch]:
+ self.provides[arch][prov] = list()
+ self.provides[arch][prov].append(pkgname)
+
+ def is_package(self, metadata, item):
+ arch = [a for a in self.arches if a in metadata.groups]
+ if not arch:
+ return False
+ return ((item in self.packages['global'] or
+ item in self.packages[arch[0]]) and
+ item not in self.blacklist and
+ (len(self.whitelist) == 0 or item in self.whitelist))
+
+ def get_vpkgs(self, metadata):
+ if self.use_yum:
+ return dict()
+
+ rv = Source.get_vpkgs(self, metadata)
+ for arch, fmdata in list(self.filemap.items()):
+ if arch not in metadata.groups and arch != 'global':
+ continue
+ for filename, pkgs in list(fmdata.items()):
+ rv[filename] = pkgs
+ return rv
+
+ def filter_unknown(self, unknown):
+ if self.use_yum:
+ filtered = set()
+ for unk in unknown:
+ try:
+ if unk.startswith('rpmlib'):
+ filtered.update(unk)
+ except AttributeError:
+ try:
+ if unk[0].startswith('rpmlib'):
+ filtered.update(unk)
+ except (IndexError, AttributeError):
+ pass
+ else:
+ filtered = set([u for u in unknown if u.startswith('rpmlib')])
+ unknown.difference_update(filtered)
+
+ def setup_data(self, force_update=False):
+ if not self.use_yum:
+ Source.setup_data(self, force_update=force_update)
+
+ def get_repo_name(self, url_map):
+ if self.pulp_id:
+ return self.pulp_id
+ else:
+ return Source.get_repo_name(self, url_map)
diff --git a/src/lib/Server/Plugins/Packages/__init__.py b/src/lib/Server/Plugins/Packages/__init__.py
new file mode 100644
index 000000000..1132543f1
--- /dev/null
+++ b/src/lib/Server/Plugins/Packages/__init__.py
@@ -0,0 +1,226 @@
+import os
+import sys
+import time
+import copy
+import glob
+import shutil
+import logging
+import lxml.etree
+import Bcfg2.Logger
+import Bcfg2.Server.Plugin
+from Bcfg2.Bcfg2Py3k import ConfigParser, urlopen
+from Bcfg2.Server.Plugins.Packages import Collection
+from Bcfg2.Server.Plugins.Packages.PackagesSources import PackagesSources
+from Bcfg2.Server.Plugins.Packages.PackagesConfig import PackagesConfig
+
+logger = logging.getLogger('Packages')
+
+class Packages(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.StructureValidator,
+ Bcfg2.Server.Plugin.Generator,
+ Bcfg2.Server.Plugin.Connector):
+ name = 'Packages'
+ conflicts = ['Pkgmgr']
+ experimental = True
+ __rmi__ = Bcfg2.Server.Plugin.Plugin.__rmi__ + ['Refresh', 'Reload']
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.StructureValidator.__init__(self)
+ Bcfg2.Server.Plugin.Generator.__init__(self)
+ Bcfg2.Server.Plugin.Connector.__init__(self)
+ Bcfg2.Server.Plugin.Probing.__init__(self)
+
+ self.sentinels = set()
+ self.cachepath = os.path.join(self.data, 'cache')
+ self.keypath = os.path.join(self.data, 'keys')
+ if not os.path.exists(self.keypath):
+ # create key directory if needed
+ os.makedirs(self.keypath)
+
+ # set up config files
+ self.config = PackagesConfig(os.path.join(self.data, "packages.conf"),
+ core.fam, self)
+ self.sources = PackagesSources(os.path.join(self.data, "sources.xml"),
+ self.cachepath, core.fam, self,
+ self.config)
+
+ @property
+ def disableResolver(self):
+ try:
+ return self.config.get("global", "resolver").lower() == "disabled"
+ except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
+ return False
+
+ @property
+ def disableMetaData(self):
+ try:
+ return self.config.get("global", "metadata").lower() == "disabled"
+ except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
+ return False
+
+ def create_config(self, entry, metadata):
+ """ create yum/apt config for the specified host """
+ attrib = {'encoding': 'ascii',
+ 'owner': 'root',
+ 'group': 'root',
+ 'type': 'file',
+ 'perms': '0644'}
+
+ collection = Collection.factory(metadata, self.sources, self.data)
+ entry.text = collection.get_config()
+ for (key, value) in list(attrib.items()):
+ entry.attrib.__setitem__(key, value)
+
+ def HandleEntry(self, entry, metadata):
+ if entry.tag == 'Package':
+ collection = Collection.factory(metadata, self.sources, self.data)
+ entry.set('version', 'auto')
+ entry.set('type', collection.ptype)
+ elif entry.tag == 'Path':
+ if (self.config.has_section("global") and
+ ((self.config.has_option("global", "yum_config") and
+ entry.get("name") == self.config.get("global",
+ "yum_config")) or
+ (self.config.has_option("global", "apt_config") and
+ entry.get("name") == self.config.get("global",
+ "apt_config")))):
+ self.create_config(entry, metadata)
+
+ def HandlesEntry(self, entry, metadata):
+ if entry.tag == 'Package':
+ collection = Collection.factory(metadata, self.sources, self.data)
+ if collection.magic_groups_match():
+ return True
+ elif entry.tag == 'Path':
+ # managed entries for yum/apt configs
+ if ((self.config.has_option("global", "yum_config") and
+ entry.get("name") == self.config.get("global",
+ "yum_config")) or
+ (self.config.has_option("global", "apt_config") and
+ entry.get("name") == self.config.get("global", "apt_config"))):
+ return True
+ return False
+
+ def validate_structures(self, metadata, structures):
+ '''Ensure client configurations include all needed prerequisites
+
+ Arguments:
+ metadata - client metadata instance
+ structures - a list of structure-stage entry combinations
+ '''
+ collection = Collection.factory(metadata, self.sources, self.data)
+ indep = lxml.etree.Element('Independent')
+ self._build_packages(metadata, indep, structures,
+ collection=collection)
+ collection.build_extra_structures(indep)
+ structures.append(indep)
+
+ def _build_packages(self, metadata, independent, structures,
+ collection=None):
+ """ build list of packages that need to be included in the
+ specification by validate_structures() """
+ if self.disableResolver:
+ # Config requests no resolver
+ return
+
+ if collection is None:
+ collection = Collection.factory(metadata, self.sources, self.data)
+ initial = set()
+ to_remove = []
+ for struct in structures:
+ for pkg in struct.xpath('//Package | //BoundPackage'):
+ if pkg.get("name"):
+ initial.add(pkg.get("name"))
+ elif pkg.get("group"):
+ initial.update(collection.get_group(pkg.get("group")))
+ to_remove.append(pkg)
+ else:
+ self.logger.error("Malformed Package: %s" %
+ lxml.etree.tostring(pkg))
+ for el in to_remove:
+ el.getparent().remove(el)
+
+ packages, unknown = collection.complete(initial)
+ if unknown:
+ self.logger.info("Got %d unknown entries" % len(unknown))
+ self.logger.info(list(unknown))
+ newpkgs = list(packages.difference(initial))
+ self.logger.debug("%d initial, %d complete, %d new" %
+ (len(initial), len(packages), len(newpkgs)))
+ newpkgs.sort()
+ for pkg in newpkgs:
+ lxml.etree.SubElement(independent, 'BoundPackage', name=pkg,
+ version='auto', type=collection.ptype,
+ origin='Packages')
+
+ def Refresh(self):
+ '''Packages.Refresh() => True|False\nReload configuration
+ specification and download sources\n'''
+ self._load_config(force_update=True)
+ return True
+
+ def Reload(self):
+ '''Packages.Refresh() => True|False\nReload configuration
+ specification and sources\n'''
+ self._load_config()
+ return True
+
+ def _load_config(self, force_update=False):
+ '''
+ Load the configuration data and setup sources
+
+ Keyword args:
+ force_update Force downloading repo data
+ '''
+ self._load_sources(force_update)
+ self._load_gpg_keys(force_update)
+
+ def _load_sources(self, force_update):
+ """ Load sources from the config """
+ self.sentinels = set()
+ cachefiles = []
+
+ for collection in list(Collection.collections.values()):
+ cachefiles.extend(collection.cachefiles)
+ if not self.disableMetaData:
+ collection.setup_data(force_update)
+ self.sentinels.update(collection.basegroups)
+
+ Collection.clear_cache()
+
+ for cfile in glob.glob(os.path.join(self.cachepath, "cache-*")):
+ if cfile not in cachefiles:
+ try:
+ if os.path.isdir(cfile):
+ shutil.rmtree(cfile)
+ else:
+ os.unlink(cfile)
+ except OSError:
+ err = sys.exc_info()[1]
+ logger.error("Packages: Could not remove cache file %s: %s"
+ % (cfile, err))
+
+ def _load_gpg_keys(self, force_update):
+ """ Load gpg keys from the config """
+ keyfiles = []
+ keys = []
+ for source in self.sources:
+ for key in source.gpgkeys:
+ localfile = os.path.join(self.keypath, os.path.basename(key))
+ if localfile not in keyfiles:
+ keyfiles.append(localfile)
+ if ((force_update and key not in keys) or
+ not os.path.exists(localfile)):
+ self.logger.info("Downloading and parsing %s" % key)
+ response = urlopen(key)
+ open(localfile, 'w').write(response.read())
+ keys.append(key)
+
+ for kfile in glob.glob(os.path.join(self.keypath, "*")):
+ if kfile not in keyfiles:
+ os.unlink(kfile)
+
+ def get_additional_data(self, metadata):
+ collection = Collection.factory(metadata, self.sources, self.data)
+ return dict(sources=collection.get_additional_data())
diff --git a/src/lib/Server/Plugins/Pkgmgr.py b/src/lib/Server/Plugins/Pkgmgr.py
index b96e7ea7d..bf674d0d0 100644
--- a/src/lib/Server/Plugins/Pkgmgr.py
+++ b/src/lib/Server/Plugins/Pkgmgr.py
@@ -4,6 +4,7 @@ __revision__ = '$Revision$'
import logging
import re
import Bcfg2.Server.Plugin
+import lxml
logger = logging.getLogger('Bcfg2.Plugins.Pkgmgr')
@@ -45,9 +46,9 @@ class PNode(Bcfg2.Server.Plugin.INode):
'encap': re.compile('^(?P<name>[\w-]+)-(?P<version>[\w\d\.+-]+).encap.*$')}
ignore = ['Package']
- def Match(self, metadata, data):
+ def Match(self, metadata, data, entry=lxml.etree.Element("None")):
"""Return a dictionary of package mappings."""
- if self.predicate(metadata):
+ if self.predicate(metadata, entry):
for key in self.contents:
try:
data[key].update(self.contents[key])
diff --git a/src/lib/Server/Plugins/SGenshi.py b/src/lib/Server/Plugins/SGenshi.py
index efd981956..3745834a8 100644
--- a/src/lib/Server/Plugins/SGenshi.py
+++ b/src/lib/Server/Plugins/SGenshi.py
@@ -5,6 +5,7 @@ import genshi.input
import genshi.template
import lxml.etree
import logging
+import copy
import sys
import Bcfg2.Server.Plugin
@@ -13,28 +14,45 @@ import Bcfg2.Server.Plugins.TGenshi
logger = logging.getLogger('Bcfg2.Plugins.SGenshi')
-class SGenshiTemplateFile(Bcfg2.Server.Plugins.TGenshi.TemplateFile):
+class SGenshiTemplateFile(Bcfg2.Server.Plugins.TGenshi.TemplateFile,
+ Bcfg2.Server.Plugin.StructFile):
+ def __init__(self, name, specific, encoding):
+ Bcfg2.Server.Plugins.TGenshi.TemplateFile.__init__(self, name,
+ specific, encoding)
+ Bcfg2.Server.Plugin.StructFile.__init__(self, name)
def get_xml_value(self, metadata):
if not hasattr(self, 'template'):
logger.error("No parsed template information for %s" % (self.name))
raise Bcfg2.Server.Plugin.PluginExecutionError
try:
- stream = self.template.generate(metadata=metadata,).filter( \
+ stream = self.template.generate(metadata=metadata).filter( \
Bcfg2.Server.Plugins.TGenshi.removecomment)
- data = stream.render('xml', strip_whitespace=False)
- return lxml.etree.XML(data)
+ data = lxml.etree.XML(stream.render('xml', strip_whitespace=False))
+ bundlename = self.name.split('/')[-1][:-4]
+ bundle = lxml.etree.Element('Bundle', name=bundlename)
+ for item in self.Match(metadata, data):
+ bundle.append(copy.deepcopy(item))
+ return bundle
except LookupError:
lerror = sys.exc_info()[1]
logger.error('Genshi lookup error: %s' % lerror)
except genshi.template.TemplateError:
terror = sys.exc_info()[1]
logger.error('Genshi template error: %s' % terror)
+ raise
except genshi.input.ParseError:
perror = sys.exc_info()[1]
logger.error('Genshi parse error: %s' % perror)
raise
+ def Match(self, metadata, xdata):
+ """Return matching fragments of parsed template."""
+ rv = []
+ for child in xdata.getchildren():
+ rv.extend(self._match(child, metadata))
+ logger.debug("File %s got %d match(es)" % (self.name, len(rv)))
+ return rv
class SGenshiEntrySet(Bcfg2.Server.Plugin.EntrySet):
diff --git a/src/lib/Server/Reports/settings.py b/src/lib/Server/Reports/settings.py
index 869f09f1f..128658ff1 100644
--- a/src/lib/Server/Reports/settings.py
+++ b/src/lib/Server/Reports/settings.py
@@ -1,10 +1,14 @@
import django
+import sys
# Compatibility import
from Bcfg2.Bcfg2Py3k import ConfigParser
# Django settings for bcfg2 reports project.
c = ConfigParser.ConfigParser()
-c.read(['/etc/bcfg2.conf', '/etc/bcfg2-web.conf'])
+if len(c.read(['/etc/bcfg2.conf', '/etc/bcfg2-web.conf'])) == 0:
+ print("Please check that bcfg2.conf or bcfg2-web.conf exists "
+ "and is readable by your web server.")
+ sys.exit(1)
try:
dset = c.get('statistics', 'web_debug')
@@ -23,8 +27,12 @@ ADMINS = (
)
MANAGERS = ADMINS
-
-db_engine = c.get('statistics', 'database_engine')
+try:
+ db_engine = c.get('statistics', 'database_engine')
+except ConfigParser.NoSectionError:
+ e = sys.exc_info()[1]
+ print("Failed to determine database engine: %s" % e)
+ sys.exit(1)
db_name = ''
if c.has_option('statistics', 'database_name'):
db_name = c.get('statistics', 'database_name')
diff --git a/src/sbin/bcfg2-admin b/src/sbin/bcfg2-admin
index 0056a97aa..09117a3f4 100755
--- a/src/sbin/bcfg2-admin
+++ b/src/sbin/bcfg2-admin
@@ -64,7 +64,7 @@ def main():
if setup['args'][0] in get_modes():
modname = setup['args'][0].capitalize()
- if len(setup['args']) == 1 or setup['args'][1] == 'help':
+ if len(setup['args']) > 1 and setup['args'][1] == 'help':
print(mode_import(modname).__longhelp__)
raise SystemExit(0)
try: