summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSol Jerome <sol.jerome@gmail.com>2013-09-06 10:38:55 -0500
committerSol Jerome <sol.jerome@gmail.com>2013-09-06 10:38:55 -0500
commitba4f63898809aebbfcdeb1546ccd2b53c7f3f7c2 (patch)
tree39348beb4f6a366b6a4c91d9cb4dbc7101799814
parentd4931951305e93c976f139ef174adbe29b89ebcf (diff)
downloadbcfg2-ba4f63898809aebbfcdeb1546ccd2b53c7f3f7c2.tar.gz
bcfg2-ba4f63898809aebbfcdeb1546ccd2b53c7f3f7c2.tar.bz2
bcfg2-ba4f63898809aebbfcdeb1546ccd2b53c7f3f7c2.zip
Pylint/PEP8 fixes
Signed-off-by: Sol Jerome <sol.jerome@gmail.com>
-rw-r--r--src/lib/Bcfg2/Client/Tools/APT.py110
-rw-r--r--src/lib/Bcfg2/Client/Tools/Action.py8
-rw-r--r--src/lib/Bcfg2/Client/Tools/Chkconfig.py2
-rw-r--r--src/lib/Bcfg2/Client/Tools/DebInit.py4
-rw-r--r--src/lib/Bcfg2/Client/Tools/MacPorts.py2
-rw-r--r--src/lib/Bcfg2/Client/Tools/POSIX/Device.py2
-rw-r--r--src/lib/Bcfg2/Client/Tools/POSIX/Nonexistent.py4
-rw-r--r--src/lib/Bcfg2/Client/Tools/POSIX/__init__.py6
-rw-r--r--src/lib/Bcfg2/Client/Tools/POSIX/base.py5
-rw-r--r--src/lib/Bcfg2/Client/Tools/Portage.py2
-rw-r--r--src/lib/Bcfg2/Client/Tools/RPM.py1019
-rw-r--r--src/lib/Bcfg2/Client/Tools/SELinux.py2
-rw-r--r--src/lib/Bcfg2/Client/Tools/SYSV.py2
-rw-r--r--src/lib/Bcfg2/Client/Tools/VCS.py21
-rw-r--r--src/lib/Bcfg2/Client/Tools/YUM.py6
-rw-r--r--src/lib/Bcfg2/Client/Tools/__init__.py2
-rw-r--r--src/lib/Bcfg2/Client/__init__.py14
-rw-r--r--src/lib/Bcfg2/Server/Admin.py4
-rw-r--r--src/lib/Bcfg2/Server/CherrypyCore.py2
-rw-r--r--src/lib/Bcfg2/Server/Core.py6
-rwxr-xr-xsrc/lib/Bcfg2/Server/Encryption.py2
-rw-r--r--src/lib/Bcfg2/Server/FileMonitor/Inotify.py2
-rw-r--r--src/lib/Bcfg2/Server/FileMonitor/__init__.py2
-rw-r--r--src/lib/Bcfg2/Server/Info.py10
-rwxr-xr-xsrc/lib/Bcfg2/Server/Lint/Genshi.py6
-rw-r--r--src/lib/Bcfg2/Server/Lint/MergeFiles.py4
-rw-r--r--src/lib/Bcfg2/Server/Lint/Metadata.py2
-rw-r--r--src/lib/Bcfg2/Server/Lint/Pkgmgr.py2
-rw-r--r--src/lib/Bcfg2/Server/Lint/RequiredAttrs.py8
-rw-r--r--src/lib/Bcfg2/Server/Lint/Validate.py3
-rw-r--r--src/lib/Bcfg2/Server/Lint/__init__.py4
-rw-r--r--src/lib/Bcfg2/Server/Plugin/helpers.py2
-rw-r--r--src/lib/Bcfg2/Server/Plugins/ACL.py4
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Bzr.py2
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py2
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Deps.py10
-rw-r--r--src/lib/Bcfg2/Server/Plugins/FileProbes.py2
-rw-r--r--src/lib/Bcfg2/Server/Plugins/GroupPatterns.py2
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Ldap.py34
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Metadata.py14
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/Yum.py2
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Packages/__init__.py4
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Pkgmgr.py4
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Probes.py4
-rw-r--r--src/lib/Bcfg2/Server/Plugins/Svn.py2
-rw-r--r--src/lib/Bcfg2/Server/Reports/reports/models.py30
-rw-r--r--src/lib/Bcfg2/Server/Reports/updatefix.py40
-rw-r--r--src/lib/Bcfg2/Server/SSLServer.py6
48 files changed, 810 insertions, 622 deletions
diff --git a/src/lib/Bcfg2/Client/Tools/APT.py b/src/lib/Bcfg2/Client/Tools/APT.py
index 5f14b43ed..cf4e7c7ea 100644
--- a/src/lib/Bcfg2/Client/Tools/APT.py
+++ b/src/lib/Bcfg2/Client/Tools/APT.py
@@ -16,14 +16,15 @@ class APT(Bcfg2.Client.Tools.Tool):
options = Bcfg2.Client.Tools.Tool.options + [
Bcfg2.Options.PathOption(
- cf=('APT', 'install_path'), default='/usr', dest='apt_install_path',
- help='Apt tools install path'),
+ cf=('APT', 'install_path'),
+ default='/usr', dest='apt_install_path',
+ help='Apt tools install path'),
Bcfg2.Options.PathOption(
cf=('APT', 'var_path'), default='/var', dest='apt_var_path',
help='Apt tools var path'),
Bcfg2.Options.PathOption(
- cf=('APT', 'etc_path'), default='/etc', dest='apt_etc_path',
- help='System etc path')]
+ cf=('APT', 'etc_path'), default='/etc', dest='apt_etc_path',
+ help='System etc path')]
__execs__ = []
__handles__ = [('Package', 'deb'), ('Path', 'ignore')]
@@ -49,14 +50,14 @@ class APT(Bcfg2.Client.Tools.Tool):
if not Bcfg2.Options.setup.debug:
self.pkgcmd += '-q=2 '
self.pkgcmd += '-y install %s'
- self.ignores = [entry.get('name') for struct in config \
- for entry in struct \
- if entry.tag == 'Path' and \
+ self.ignores = [entry.get('name') for struct in config
+ for entry in struct
+ if entry.tag == 'Path' and
entry.get('type') == 'ignore']
- self.__important__ = self.__important__ + \
- [
+ self.__important__ = self.__important__ + [
"%s/cache/debconf/config.dat" % Bcfg2.Options.setup.apt_var_path,
- "%s/cache/debconf/templates.dat" % Bcfg2.Options.setup.apt_var_path,
+ "%s/cache/debconf/templates.dat" %
+ Bcfg2.Options.setup.apt_var_path,
'/etc/passwd', '/etc/group',
'%s/apt/apt.conf' % Bcfg2.Options.setup.apt_etc_path,
'%s/dpkg/dpkg.cfg' % Bcfg2.Options.setup.apt_etc_path] + \
@@ -64,10 +65,11 @@ class APT(Bcfg2.Client.Tools.Tool):
for entry in struct
if (entry.tag == 'Path' and
entry.get('name').startswith(
- '%s/apt/sources.list' % Bcfg2.Options.setup.apt_etc_path))]
- self.nonexistent = [
- entry.get('name') for struct in config for entry in struct
- if entry.tag == 'Path' and entry.get('type') == 'nonexistent']
+ '%s/apt/sources.list' %
+ Bcfg2.Options.setup.apt_etc_path))]
+ self.nonexistent = [entry.get('name') for struct in config
+ for entry in struct if entry.tag == 'Path'
+ and entry.get('type') == 'nonexistent']
os.environ["DEBIAN_FRONTEND"] = 'noninteractive'
self.actions = {}
if Bcfg2.Options.setup.kevlar and not Bcfg2.Options.setup.dry_run:
@@ -96,16 +98,16 @@ class APT(Bcfg2.Client.Tools.Tool):
else:
extras = [(p.name, p.installedVersion) for p in self.pkg_cache
if p.isInstalled and p.name not in packages]
- return [Bcfg2.Client.XML.Element('Package', name=name, \
- type='deb', version=version) \
- for (name, version) in extras]
+ return [Bcfg2.Client.XML.Element('Package', name=name,
+ type='deb', version=version)
+ for (name, version) in extras]
def VerifyDebsums(self, entry, modlist):
output = \
self.cmd.run("%s -as %s" %
(self.debsums, entry.get('name'))).stdout.splitlines()
if len(output) == 1 and "no md5sums for" in output[0]:
- self.logger.info("Package %s has no md5sums. Cannot verify" % \
+ self.logger.info("Package %s has no md5sums. Cannot verify" %
entry.get('name'))
entry.set('qtext',
"Reinstall Package %s-%s to setup md5sums? (y/N) " %
@@ -125,11 +127,11 @@ class APT(Bcfg2.Client.Tools.Tool):
# these files should not exist
continue
elif "is not installed" in item or "missing file" in item:
- self.logger.error("Package %s is not fully installed" \
- % entry.get('name'))
+ self.logger.error("Package %s is not fully installed" %
+ entry.get('name'))
else:
- self.logger.error("Got Unsupported pattern %s from debsums" \
- % item)
+ self.logger.error("Got Unsupported pattern %s from debsums" %
+ item)
files.append(item)
files = list(set(files) - set(self.ignores))
# We check if there is file in the checksum to do
@@ -139,15 +141,15 @@ class APT(Bcfg2.Client.Tools.Tool):
modlist = [os.path.realpath(filename) for filename in modlist]
bad = [filename for filename in files if filename not in modlist]
if bad:
- self.logger.debug("It is suggested that you either manage these "
- "files, revert the changes, or ignore false "
- "failures:")
- self.logger.info("Package %s failed validation. Bad files are:" % \
- entry.get('name'))
+ self.logger.debug("It is suggested that you either manage "
+ "these files, revert the changes, or ignore "
+ "false failures:")
+ self.logger.info("Package %s failed validation. Bad files "
+ "are:" % entry.get('name'))
self.logger.info(bad)
entry.set('qtext',
- "Reinstall Package %s-%s to fix failing files? (y/N) " % \
- (entry.get('name'), entry.get('version')))
+ "Reinstall Package %s-%s to fix failing files? "
+ "(y/N) " % (entry.get('name'), entry.get('version')))
return False
return True
@@ -158,12 +160,12 @@ class APT(Bcfg2.Client.Tools.Tool):
(entry.attrib['name']))
return False
pkgname = entry.get('name')
- if self.pkg_cache.has_key(pkgname):
+ if self.pkg_cache.has_key(pkgname): # nopep8
if self._newapi:
is_installed = self.pkg_cache[pkgname].is_installed
else:
is_installed = self.pkg_cache[pkgname].isInstalled
- if not self.pkg_cache.has_key(pkgname) or not is_installed:
+ if not self.pkg_cache.has_key(pkgname) or not is_installed: # nopep8
self.logger.info("Package %s not installed" % (entry.get('name')))
entry.set('current_exists', 'false')
return False
@@ -177,9 +179,11 @@ class APT(Bcfg2.Client.Tools.Tool):
candidate_version = pkg.candidateVersion
if entry.get('version') == 'auto':
if self._newapi:
- is_upgradable = self.pkg_cache._depcache.is_upgradable(pkg._pkg)
+ is_upgradable = \
+ self.pkg_cache._depcache.is_upgradable(pkg._pkg)
else:
- is_upgradable = self.pkg_cache._depcache.IsUpgradable(pkg._pkg)
+ is_upgradable = \
+ self.pkg_cache._depcache.IsUpgradable(pkg._pkg)
if is_upgradable:
desiredVersion = candidate_version
else:
@@ -190,15 +194,15 @@ class APT(Bcfg2.Client.Tools.Tool):
desiredVersion = entry.get('version')
if desiredVersion != installed_version:
entry.set('current_version', installed_version)
- entry.set('qtext', "Modify Package %s (%s -> %s)? (y/N) " % \
+ entry.set('qtext', "Modify Package %s (%s -> %s)? (y/N) " %
(entry.get('name'), entry.get('current_version'),
desiredVersion))
return False
else:
# version matches
if (not Bcfg2.Options.setup.quick and
- entry.get('verify', 'true') == 'true'
- and checksums):
+ entry.get('verify', 'true') == 'true'
+ and checksums):
pkgsums = self.VerifyDebsums(entry, modlist)
return pkgsums
return True
@@ -236,35 +240,39 @@ class APT(Bcfg2.Client.Tools.Tool):
ipkgs = []
bad_pkgs = []
for pkg in packages:
- if not self.pkg_cache.has_key(pkg.get('name')):
- self.logger.error("APT has no information about package %s" % (pkg.get('name')))
+ if not self.pkg_cache.has_key(pkg.get('name')): # nopep8
+ self.logger.error("APT has no information about package %s" %
+ (pkg.get('name')))
continue
if pkg.get('version') in ['auto', 'any']:
if self._newapi:
try:
- ipkgs.append("%s=%s" % (pkg.get('name'),
- self.pkg_cache[pkg.get('name')].candidate.version))
+ cversion = \
+ self.pkg_cache[pkg.get('name')].candidate.version
+ ipkgs.append("%s=%s" % (pkg.get('name'), cversion))
except AttributeError:
- self.logger.error("Failed to find %s in apt package cache" %
- pkg.get('name'))
+ self.logger.error("Failed to find %s in apt package "
+ "cache" % pkg.get('name'))
continue
else:
- ipkgs.append("%s=%s" % (pkg.get('name'),
- self.pkg_cache[pkg.get('name')].candidateVersion))
+ cversion = self.pkg_cache[pkg.get('name')].candidateVersion
+ ipkgs.append("%s=%s" % (pkg.get('name'), cversion))
continue
if self._newapi:
- avail_vers = [x.ver_str for x in \
- self.pkg_cache[pkg.get('name')]._pkg.version_list]
+ avail_vers = [
+ x.ver_str for x in
+ self.pkg_cache[pkg.get('name')]._pkg.version_list]
else:
- avail_vers = [x.VerStr for x in \
- self.pkg_cache[pkg.get('name')]._pkg.VersionList]
+ avail_vers = [
+ x.VerStr for x in
+ self.pkg_cache[pkg.get('name')]._pkg.VersionList]
if pkg.get('version') in avail_vers:
ipkgs.append("%s=%s" % (pkg.get('name'), pkg.get('version')))
continue
else:
- self.logger.error("Package %s: desired version %s not in %s" \
- % (pkg.get('name'), pkg.get('version'),
- avail_vers))
+ self.logger.error("Package %s: desired version %s not in %s" %
+ (pkg.get('name'), pkg.get('version'),
+ avail_vers))
bad_pkgs.append(pkg.get('name'))
if bad_pkgs:
self.logger.error("Cannot find correct versions of packages:")
diff --git a/src/lib/Bcfg2/Client/Tools/Action.py b/src/lib/Bcfg2/Client/Tools/Action.py
index db1ff500e..5549b1717 100644
--- a/src/lib/Bcfg2/Client/Tools/Action.py
+++ b/src/lib/Bcfg2/Client/Tools/Action.py
@@ -15,12 +15,14 @@ class Action(Bcfg2.Client.Tools.Tool):
""" Return true if the given action is allowed to be run by
the whitelist or blacklist """
if (Bcfg2.Options.setup.decision == 'whitelist' and
- not matches_white_list(action, Bcfg2.Options.setup.decision_list)):
+ not matches_white_list(action,
+ Bcfg2.Options.setup.decision_list)):
self.logger.info("In whitelist mode: suppressing Action: %s" %
action.get('name'))
return False
if (Bcfg2.Options.setup.decision == 'blacklist' and
- not passes_black_list(action, Bcfg2.Options.setup.decision_list)):
+ not passes_black_list(action,
+ Bcfg2.Options.setup.decision_list)):
self.logger.info("In blacklist mode: suppressing Action: %s" %
action.get('name'))
return False
@@ -84,7 +86,7 @@ class Action(Bcfg2.Client.Tools.Tool):
states = dict()
for action in bundle.findall("Action"):
if (action.get('timing') in ['post', 'both'] and
- action.get('when') != 'modified'):
+ action.get('when') != 'modified'):
if not self._action_allowed(action):
continue
states[action] = self.RunAction(action)
diff --git a/src/lib/Bcfg2/Client/Tools/Chkconfig.py b/src/lib/Bcfg2/Client/Tools/Chkconfig.py
index c2c7e21c1..fab142a7c 100644
--- a/src/lib/Bcfg2/Client/Tools/Chkconfig.py
+++ b/src/lib/Bcfg2/Client/Tools/Chkconfig.py
@@ -100,7 +100,7 @@ class Chkconfig(Bcfg2.Client.Tools.SvcTool):
return bootcmdrv
buildmode = Bcfg2.Options.setup.servicemode == 'build'
if ((entry.get('status') == 'on' and not buildmode) and
- entry.get('current_status') == 'off'):
+ entry.get('current_status') == 'off'):
svccmdrv = self.start_service(entry)
elif ((entry.get('status') == 'off' or buildmode) and
entry.get('current_status') == 'on'):
diff --git a/src/lib/Bcfg2/Client/Tools/DebInit.py b/src/lib/Bcfg2/Client/Tools/DebInit.py
index 0433ac80d..53e5e7ec6 100644
--- a/src/lib/Bcfg2/Client/Tools/DebInit.py
+++ b/src/lib/Bcfg2/Client/Tools/DebInit.py
@@ -34,8 +34,8 @@ class DebInit(Bcfg2.Client.Tools.SvcTool):
if entry.get('sequence'):
if (deb_version in DEBIAN_OLD_STYLE_BOOT_SEQUENCE or
- deb_version.startswith('5') or
- os.path.exists('/etc/init.d/.legacy-bootordering')):
+ deb_version.startswith('5') or
+ os.path.exists('/etc/init.d/.legacy-bootordering')):
start_sequence = int(entry.get('sequence'))
kill_sequence = 100 - start_sequence
else:
diff --git a/src/lib/Bcfg2/Client/Tools/MacPorts.py b/src/lib/Bcfg2/Client/Tools/MacPorts.py
index c28f8c743..265171a5a 100644
--- a/src/lib/Bcfg2/Client/Tools/MacPorts.py
+++ b/src/lib/Bcfg2/Client/Tools/MacPorts.py
@@ -38,7 +38,7 @@ class MacPorts(Bcfg2.Client.Tools.PkgTool):
if entry.attrib['name'] in self.installed:
if (self.installed[entry.attrib['name']] == entry.attrib['version']
- or entry.attrib['version'] == 'any'):
+ or entry.attrib['version'] == 'any'):
#FIXME: We should be able to check this once
# http://trac.macports.org/ticket/15709 is implemented
return True
diff --git a/src/lib/Bcfg2/Client/Tools/POSIX/Device.py b/src/lib/Bcfg2/Client/Tools/POSIX/Device.py
index 9b84adad0..6237ccce2 100644
--- a/src/lib/Bcfg2/Client/Tools/POSIX/Device.py
+++ b/src/lib/Bcfg2/Client/Tools/POSIX/Device.py
@@ -13,7 +13,7 @@ class POSIXDevice(POSIXTool):
if entry.get('dev_type') in ['block', 'char']:
# check if major/minor are properly specified
if (entry.get('major') is None or
- entry.get('minor') is None):
+ entry.get('minor') is None):
return False
return True
diff --git a/src/lib/Bcfg2/Client/Tools/POSIX/Nonexistent.py b/src/lib/Bcfg2/Client/Tools/POSIX/Nonexistent.py
index f7251ca50..d67a68c8b 100644
--- a/src/lib/Bcfg2/Client/Tools/POSIX/Nonexistent.py
+++ b/src/lib/Bcfg2/Client/Tools/POSIX/Nonexistent.py
@@ -24,8 +24,8 @@ class POSIXNonexistent(POSIXTool):
for struct in self.config.getchildren():
for el in struct.getchildren():
if (el.tag == 'Path' and
- el.get('type') != 'nonexistent' and
- el.get('name').startswith(ename)):
+ el.get('type') != 'nonexistent' and
+ el.get('name').startswith(ename)):
self.logger.error('POSIX: Not removing %s. One or '
'more files in this directory are '
'specified in your configuration.' %
diff --git a/src/lib/Bcfg2/Client/Tools/POSIX/__init__.py b/src/lib/Bcfg2/Client/Tools/POSIX/__init__.py
index db0fa96ab..13b45a759 100644
--- a/src/lib/Bcfg2/Client/Tools/POSIX/__init__.py
+++ b/src/lib/Bcfg2/Client/Tools/POSIX/__init__.py
@@ -140,9 +140,9 @@ class POSIX(Bcfg2.Client.Tools.Tool):
def _paranoid_backup(self, entry):
""" Take a backup of the specified entry for paranoid mode """
if (entry.get("paranoid", 'false').lower() == 'true' and
- Bcfg2.Options.setup.paranoid and
- entry.get('current_exists', 'true') == 'true' and
- not os.path.isdir(entry.get("name"))):
+ Bcfg2.Options.setup.paranoid and
+ entry.get('current_exists', 'true') == 'true' and
+ not os.path.isdir(entry.get("name"))):
self._prune_old_backups(entry)
bkupnam = "%s_%s" % (entry.get('name').replace('/', '_'),
datetime.isoformat(datetime.now()))
diff --git a/src/lib/Bcfg2/Client/Tools/POSIX/base.py b/src/lib/Bcfg2/Client/Tools/POSIX/base.py
index bd2f8f87e..bce7ba0ca 100644
--- a/src/lib/Bcfg2/Client/Tools/POSIX/base.py
+++ b/src/lib/Bcfg2/Client/Tools/POSIX/base.py
@@ -508,7 +508,8 @@ class POSIXTool(Bcfg2.Client.Tools.Tool):
(path, attrib['current_group'], entry.get('group')))
if (wanted_mode and
- oct_mode(int(attrib['current_mode'], 8)) != oct_mode(wanted_mode)):
+ oct_mode(int(attrib['current_mode'], 8)) !=
+ oct_mode(wanted_mode)):
errors.append("Permissions for path %s are incorrect. "
"Current permissions are %s but should be %s" %
(path, attrib['current_mode'], entry.get('mode')))
@@ -533,7 +534,7 @@ class POSIXTool(Bcfg2.Client.Tools.Tool):
else:
wanted_secontext = entry.get("secontext")
if (wanted_secontext and
- attrib['current_secontext'] != wanted_secontext):
+ attrib['current_secontext'] != wanted_secontext):
errors.append("SELinux context for path %s is incorrect. "
"Current context is %s but should be %s" %
(path, attrib['current_secontext'],
diff --git a/src/lib/Bcfg2/Client/Tools/Portage.py b/src/lib/Bcfg2/Client/Tools/Portage.py
index a877b564f..a61ede820 100644
--- a/src/lib/Bcfg2/Client/Tools/Portage.py
+++ b/src/lib/Bcfg2/Client/Tools/Portage.py
@@ -66,7 +66,7 @@ class Portage(Bcfg2.Client.Tools.PkgTool):
if not Bcfg2.Options.setup.quick:
if ('verify' not in entry.attrib or
- entry.get('verify').lower() == 'true'):
+ entry.get('verify').lower() == 'true'):
# Check the package if:
# - Not running in quick mode
diff --git a/src/lib/Bcfg2/Client/Tools/RPM.py b/src/lib/Bcfg2/Client/Tools/RPM.py
index 1ebc61c93..173623f61 100644
--- a/src/lib/Bcfg2/Client/Tools/RPM.py
+++ b/src/lib/Bcfg2/Client/Tools/RPM.py
@@ -64,72 +64,72 @@ whitelist_re = re.compile('|'.join(whitelist))
# They are defined in lib/rpmcli.h
# Bit(s) for verifyFile() attributes.
#
-RPMVERIFY_NONE = 0 # /*!< */
-RPMVERIFY_MD5 = 1 # 1 << 0 # /*!< from %verify(md5) */
-RPMVERIFY_FILESIZE = 2 # 1 << 1 # /*!< from %verify(size) */
-RPMVERIFY_LINKTO = 4 # 1 << 2 # /*!< from %verify(link) */
-RPMVERIFY_USER = 8 # 1 << 3 # /*!< from %verify(user) */
-RPMVERIFY_GROUP = 16 # 1 << 4 # /*!< from %verify(group) */
-RPMVERIFY_MTIME = 32 # 1 << 5 # /*!< from %verify(mtime) */
-RPMVERIFY_MODE = 64 # 1 << 6 # /*!< from %verify(mode) */
-RPMVERIFY_RDEV = 128 # 1 << 7 # /*!< from %verify(rdev) */
-RPMVERIFY_CONTEXTS = 32768 # (1 << 15) # /*!< from --nocontexts */
-RPMVERIFY_READLINKFAIL = 268435456 # (1 << 28) # /*!< readlink failed */
-RPMVERIFY_READFAIL = 536870912 # (1 << 29) # /*!< file read failed */
-RPMVERIFY_LSTATFAIL = 1073741824 # (1 << 30) # /*!< lstat failed */
-RPMVERIFY_LGETFILECONFAIL = 2147483648 # (1 << 31) # /*!< lgetfilecon failed */
+RPMVERIFY_NONE = 0
+RPMVERIFY_MD5 = 1 # 1 << 0 # from %verify(md5)
+RPMVERIFY_FILESIZE = 2 # 1 << 1 # from %verify(size)
+RPMVERIFY_LINKTO = 4 # 1 << 2 # from %verify(link)
+RPMVERIFY_USER = 8 # 1 << 3 # from %verify(user)
+RPMVERIFY_GROUP = 16 # 1 << 4 # from %verify(group)
+RPMVERIFY_MTIME = 32 # 1 << 5 # from %verify(mtime)
+RPMVERIFY_MODE = 64 # 1 << 6 # from %verify(mode)
+RPMVERIFY_RDEV = 128 # 1 << 7 # from %verify(rdev)
+RPMVERIFY_CONTEXTS = 32768 # (1 << 15) # from --nocontexts
+RPMVERIFY_READLINKFAIL = 268435456 # (1 << 28) # readlink failed
+RPMVERIFY_READFAIL = 536870912 # (1 << 29) # file read failed
+RPMVERIFY_LSTATFAIL = 1073741824 # (1 << 30) # lstat failed
+RPMVERIFY_LGETFILECONFAIL = 2147483648 # (1 << 31) # lgetfilecon failed
RPMVERIFY_FAILURES = \
- (RPMVERIFY_LSTATFAIL|RPMVERIFY_READFAIL|RPMVERIFY_READLINKFAIL| \
- RPMVERIFY_LGETFILECONFAIL)
+ (RPMVERIFY_LSTATFAIL | RPMVERIFY_READFAIL |
+ RPMVERIFY_READLINKFAIL | RPMVERIFY_LGETFILECONFAIL)
# Bit(s) to control rpm_verify() operation.
#
-VERIFY_DEFAULT = 0, # /*!< */
-VERIFY_MD5 = 1 << 0 # /*!< from --nomd5 */
-VERIFY_SIZE = 1 << 1 # /*!< from --nosize */
-VERIFY_LINKTO = 1 << 2 # /*!< from --nolinkto */
-VERIFY_USER = 1 << 3 # /*!< from --nouser */
-VERIFY_GROUP = 1 << 4 # /*!< from --nogroup */
-VERIFY_MTIME = 1 << 5 # /*!< from --nomtime */
-VERIFY_MODE = 1 << 6 # /*!< from --nomode */
-VERIFY_RDEV = 1 << 7 # /*!< from --nodev */
+VERIFY_DEFAULT = 0, # /*!< */
+VERIFY_MD5 = 1 << 0 # /*!< from --nomd5 */
+VERIFY_SIZE = 1 << 1 # /*!< from --nosize */
+VERIFY_LINKTO = 1 << 2 # /*!< from --nolinkto */
+VERIFY_USER = 1 << 3 # /*!< from --nouser */
+VERIFY_GROUP = 1 << 4 # /*!< from --nogroup */
+VERIFY_MTIME = 1 << 5 # /*!< from --nomtime */
+VERIFY_MODE = 1 << 6 # /*!< from --nomode */
+VERIFY_RDEV = 1 << 7 # /*!< from --nodev */
# /* bits 8-14 unused, reserved for rpmVerifyAttrs */
-VERIFY_CONTEXTS = 1 << 15 # /*!< verify: from --nocontexts */
-VERIFY_FILES = 1 << 16 # /*!< verify: from --nofiles */
-VERIFY_DEPS = 1 << 17 # /*!< verify: from --nodeps */
-VERIFY_SCRIPT = 1 << 18 # /*!< verify: from --noscripts */
-VERIFY_DIGEST = 1 << 19 # /*!< verify: from --nodigest */
-VERIFY_SIGNATURE = 1 << 20 # /*!< verify: from --nosignature */
-VERIFY_PATCHES = 1 << 21 # /*!< verify: from --nopatches */
-VERIFY_HDRCHK = 1 << 22 # /*!< verify: from --nohdrchk */
-VERIFY_FOR_LIST = 1 << 23 # /*!< query: from --list */
-VERIFY_FOR_STATE = 1 << 24 # /*!< query: from --state */
-VERIFY_FOR_DOCS = 1 << 25 # /*!< query: from --docfiles */
-VERIFY_FOR_CONFIG = 1 << 26 # /*!< query: from --configfiles */
-VERIFY_FOR_DUMPFILES = 1 << 27 # /*!< query: from --dump */
+VERIFY_CONTEXTS = 1 << 15 # /*!< verify: from --nocontexts */
+VERIFY_FILES = 1 << 16 # /*!< verify: from --nofiles */
+VERIFY_DEPS = 1 << 17 # /*!< verify: from --nodeps */
+VERIFY_SCRIPT = 1 << 18 # /*!< verify: from --noscripts */
+VERIFY_DIGEST = 1 << 19 # /*!< verify: from --nodigest */
+VERIFY_SIGNATURE = 1 << 20 # /*!< verify: from --nosignature */
+VERIFY_PATCHES = 1 << 21 # /*!< verify: from --nopatches */
+VERIFY_HDRCHK = 1 << 22 # /*!< verify: from --nohdrchk */
+VERIFY_FOR_LIST = 1 << 23 # /*!< query: from --list */
+VERIFY_FOR_STATE = 1 << 24 # /*!< query: from --state */
+VERIFY_FOR_DOCS = 1 << 25 # /*!< query: from --docfiles */
+VERIFY_FOR_CONFIG = 1 << 26 # /*!< query: from --configfiles */
+VERIFY_FOR_DUMPFILES = 1 << 27 # /*!< query: from --dump */
# /* bits 28-31 used in rpmVerifyAttrs */
# Comes from C cource. lib/rpmcli.h
VERIFY_ATTRS = \
- (VERIFY_MD5 | VERIFY_SIZE | VERIFY_LINKTO | VERIFY_USER | VERIFY_GROUP | \
- VERIFY_MTIME | VERIFY_MODE | VERIFY_RDEV | VERIFY_CONTEXTS)
+ (VERIFY_MD5 | VERIFY_SIZE | VERIFY_LINKTO | VERIFY_USER | VERIFY_GROUP |
+ VERIFY_MTIME | VERIFY_MODE | VERIFY_RDEV | VERIFY_CONTEXTS)
VERIFY_ALL = \
- (VERIFY_ATTRS | VERIFY_FILES | VERIFY_DEPS | VERIFY_SCRIPT | VERIFY_DIGEST |\
- VERIFY_SIGNATURE | VERIFY_HDRCHK)
+ (VERIFY_ATTRS | VERIFY_FILES | VERIFY_DEPS | VERIFY_SCRIPT |
+ VERIFY_DIGEST | VERIFY_SIGNATURE | VERIFY_HDRCHK)
# Some masks for what checks to NOT do on these file types.
# The C code actiually resets these up for every file.
-DIR_FLAGS = ~(RPMVERIFY_MD5 | RPMVERIFY_FILESIZE | RPMVERIFY_MTIME | \
+DIR_FLAGS = ~(RPMVERIFY_MD5 | RPMVERIFY_FILESIZE | RPMVERIFY_MTIME |
RPMVERIFY_LINKTO)
# These file types all have the same mask, but hopefully this will make the
# code more readable.
FIFO_FLAGS = CHR_FLAGS = BLK_FLAGS = GHOST_FLAGS = DIR_FLAGS
-LINK_FLAGS = ~(RPMVERIFY_MD5 | RPMVERIFY_FILESIZE | RPMVERIFY_MTIME | \
+LINK_FLAGS = ~(RPMVERIFY_MD5 | RPMVERIFY_FILESIZE | RPMVERIFY_MTIME |
RPMVERIFY_MODE | RPMVERIFY_USER | RPMVERIFY_GROUP)
REG_FLAGS = ~(RPMVERIFY_LINKTO)
@@ -142,23 +142,29 @@ def s_isdev(mode):
"""
return stat.S_ISBLK(mode) | stat.S_ISCHR(mode)
+
def rpmpackagelist(rts):
"""
- Equivalent of rpm -qa. Intended for RefreshPackages() in the RPM Driver.
- Requires rpmtransactionset() to be run first to get a ts.
- Returns a list of pkgspec dicts.
+ Equivalent of rpm -qa. Intended for RefreshPackages() in the RPM Driver.
+ Requires rpmtransactionset() to be run first to get a ts.
+ Returns a list of pkgspec dicts.
- e.g. [ {'name':'foo', 'epoch':'20', 'version':'1.2', 'release':'5', 'arch':'x86_64' },
- {'name':'bar', 'epoch':'10', 'version':'5.2', 'release':'2', 'arch':'x86_64' } ]
+ e.g. [{'name':'foo', 'epoch':'20', 'version':'1.2',
+ 'release':'5', 'arch':'x86_64' },
+ {'name':'bar', 'epoch':'10', 'version':'5.2',
+ 'release':'2', 'arch':'x86_64' }]
"""
- return [{'name':header[rpm.RPMTAG_NAME],
- 'epoch':header[rpm.RPMTAG_EPOCH],
- 'version':header[rpm.RPMTAG_VERSION],
- 'release':header[rpm.RPMTAG_RELEASE],
- 'arch':header[rpm.RPMTAG_ARCH],
- 'gpgkeyid':header.sprintf("%|SIGGPG?{%{SIGGPG:pgpsig}}:{None}|").split()[-1]}
- for header in rts.dbMatch()]
+ return [
+ {'name': header[rpm.RPMTAG_NAME],
+ 'epoch': header[rpm.RPMTAG_EPOCH],
+ 'version': header[rpm.RPMTAG_VERSION],
+ 'release': header[rpm.RPMTAG_RELEASE],
+ 'arch': header[rpm.RPMTAG_ARCH],
+ 'gpgkeyid':
+ header.sprintf("%|SIGGPG?{%{SIGGPG:pgpsig}}:{None}|").split()[-1]}
+ for header in rts.dbMatch()]
+
def getindexbykeyword(index_ts, **kwargs):
"""
@@ -177,13 +183,13 @@ def getindexbykeyword(index_ts, **kwargs):
index_mi = index_ts.dbMatch()
if 'epoch' in kwargs:
- if kwargs['epoch'] != None and kwargs['epoch'] != 'None':
+ if kwargs['epoch'] is not None and kwargs['epoch'] != 'None':
kwargs['epoch'] = int(kwargs['epoch'])
else:
del(kwargs['epoch'])
- keywords = [key for key in list(kwargs.keys()) \
- if key in ('name', 'epoch', 'version', 'release', 'arch')]
+ keywords = [key for key in list(kwargs.keys())
+ if key in ('name', 'epoch', 'version', 'release', 'arch')]
keywords_len = len(keywords)
for hdr in index_mi:
match = 0
@@ -195,6 +201,7 @@ def getindexbykeyword(index_ts, **kwargs):
del index_mi
return lst
+
def getheadersbykeyword(header_ts, **kwargs):
"""
Borrowed parts of this from from Yum. Need to fix it though.
@@ -215,13 +222,13 @@ def getheadersbykeyword(header_ts, **kwargs):
header_mi = header_ts.dbMatch()
if 'epoch' in kwargs:
- if kwargs['epoch'] != None and kwargs['epoch'] != 'None':
+ if kwargs['epoch'] is not None and kwargs['epoch'] != 'None':
kwargs['epoch'] = int(kwargs['epoch'])
else:
del(kwargs['epoch'])
- keywords = [key for key in list(kwargs.keys()) \
- if key in ('name', 'epoch', 'version', 'release', 'arch')]
+ keywords = [key for key in list(kwargs.keys())
+ if key in ('name', 'epoch', 'version', 'release', 'arch')]
keywords_len = len(keywords)
for hdr in header_mi:
match = 0
@@ -233,6 +240,7 @@ def getheadersbykeyword(header_ts, **kwargs):
del header_mi
return lst
+
def prelink_md5_check(filename):
"""
Checks if a file is prelinked. If it is run it through prelink -y
@@ -254,13 +262,14 @@ def prelink_md5_check(filename):
if isprelink(plfd):
plf.close()
cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \
- % (re.escape(filename))
+ % (re.escape(filename))
plf = os.popen(cmd, 'rb')
prelink = True
- elif whitelist_re.search(filename) and not blacklist_re.search(filename):
+ elif (whitelist_re.search(filename) and not
+ blacklist_re.search(filename)):
plf.close()
cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \
- % (re.escape(filename))
+ % (re.escape(filename))
plf = os.popen(cmd, 'rb')
prelink = True
@@ -282,6 +291,7 @@ def prelink_md5_check(filename):
else:
return file_md5, 0
+
def prelink_size_check(filename):
"""
This check is only done if the prelink_md5_check() is not done first.
@@ -304,7 +314,7 @@ def prelink_size_check(filename):
if isprelink(plfd):
plf.close()
cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \
- % (re.escape(filename))
+ % (re.escape(filename))
plf = os.popen(cmd, 'rb')
while 1:
@@ -313,10 +323,11 @@ def prelink_size_check(filename):
break
fsize += len(data)
- elif whitelist_re.search(filename) and not blacklist_re.search(filename):
+ elif (whitelist_re.search(filename) and not
+ blacklist_re.search(filename)):
plf.close()
cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \
- % (re.escape(filename))
+ % (re.escape(filename))
plf = os.popen(cmd, 'rb')
while 1:
@@ -329,6 +340,7 @@ def prelink_size_check(filename):
return fsize
+
def debug_verify_flags(vflags):
"""
Decodes the verify flags bits.
@@ -360,6 +372,7 @@ def debug_verify_flags(vflags):
if vflags & RPMVERIFY_LGETFILECONFAIL:
print('RPMVERIFY_LGETFILECONFAIL')
+
def debug_file_flags(fflags):
"""
Decodes the file flags bits.
@@ -397,6 +410,7 @@ def debug_file_flags(fflags):
if fflags & rpm.RPMFILE_PUBKEY:
print('rpm.RPMFILE_PUBKEY')
+
def rpm_verify_file(fileinfo, rpmlinktos, omitmask):
"""
Verify all the files in a package.
@@ -406,8 +420,8 @@ def rpm_verify_file(fileinfo, rpmlinktos, omitmask):
flags used in the C code.
"""
- (fname, fsize, fmode, fmtime, fflags, frdev, finode, fnlink, fstate, \
- vflags, fuser, fgroup, fmd5) = fileinfo
+ (fname, fsize, fmode, fmtime, fflags, frdev, finode, fnlink, fstate,
+ vflags, fuser, fgroup, fmd5) = fileinfo
# 1. rpmtsRootDir stuff. What does it do and where to I get it from?
@@ -423,7 +437,7 @@ def rpm_verify_file(fileinfo, rpmlinktos, omitmask):
try:
lstat = os.lstat(fname)
except OSError:
- if not (fflags & (rpm.RPMFILE_MISSINGOK|rpm.RPMFILE_GHOST)):
+ if not (fflags & (rpm.RPMFILE_MISSINGOK | rpm.RPMFILE_GHOST)):
file_results.append('RPMVERIFY_LSTATFAIL')
#file_results.append(fname)
return file_results
@@ -454,10 +468,10 @@ def rpm_verify_file(fileinfo, rpmlinktos, omitmask):
prelink_size = 0
if flags & RPMVERIFY_MD5:
prelink_md5, prelink_size = prelink_md5_check(fname)
- if prelink_md5 == False:
+ if prelink_md5 is False:
file_results.append('RPMVERIFY_MD5')
file_results.append('RPMVERIFY_READFAIL')
- elif prelink_md5 != fmd5:
+ elif prelink_md5 != fmd5:
file_results.append('RPMVERIFY_MD5')
if flags & RPMVERIFY_LINKTO:
@@ -466,11 +480,11 @@ def rpm_verify_file(fileinfo, rpmlinktos, omitmask):
file_results.append('RPMVERIFY_READLINKFAIL')
file_results.append('RPMVERIFY_LINKTO')
else:
- if len(rpmlinktos) == 0 or linkto != rpmlinktos:
+ if len(rpmlinktos) == 0 or linkto != rpmlinktos:
file_results.append('RPMVERIFY_LINKTO')
if flags & RPMVERIFY_FILESIZE:
- if not (flags & RPMVERIFY_MD5): # prelink check hasn't been done.
+ if not (flags & RPMVERIFY_MD5): # prelink check hasn't been done.
prelink_size = prelink_size_check(fname)
if (prelink_size != 0): # This is a prelinked file.
if (prelink_size != fsize):
@@ -493,7 +507,7 @@ def rpm_verify_file(fileinfo, rpmlinktos, omitmask):
if flags & RPMVERIFY_RDEV:
if (stat.S_ISCHR(fmode) != stat.S_ISCHR(lstat.st_mode) or
- stat.S_ISBLK(fmode) != stat.S_ISBLK(lstat.st_mode)):
+ stat.S_ISBLK(fmode) != stat.S_ISBLK(lstat.st_mode)):
file_results.append('RPMVERIFY_RDEV')
elif (s_isdev(fmode) & s_isdev(lstat.st_mode)):
st_rdev = lstat.st_rdev
@@ -522,6 +536,7 @@ def rpm_verify_file(fileinfo, rpmlinktos, omitmask):
return file_results
+
def rpm_verify_dependencies(header):
"""
Check package dependencies. Header is an rpm.hdr.
@@ -545,6 +560,7 @@ def rpm_verify_dependencies(header):
_ts1.closeDB()
return dep_errors
+
def rpm_verify_package(vp_ts, header, verify_options):
"""
Verify a single package specified by header. Header is an rpm.hdr.
@@ -630,7 +646,7 @@ def rpm_verify_package(vp_ts, header, verify_options):
else:
file_stat.append(' ')
- file_stat.append(fileinfo[0]) # The filename.
+ file_stat.append(fileinfo[0]) # The filename.
package_results.setdefault('files', []).append(file_stat)
# Run the verify script if there is one.
@@ -642,10 +658,10 @@ def rpm_verify_package(vp_ts, header, verify_options):
# If there have been any errors, add the package nevra to the result.
if len(package_results) > 0:
- package_results.setdefault('nevra', (header[rpm.RPMTAG_NAME], \
- header[rpm.RPMTAG_EPOCH], \
- header[rpm.RPMTAG_VERSION], \
- header[rpm.RPMTAG_RELEASE], \
+ package_results.setdefault('nevra', (header[rpm.RPMTAG_NAME],
+ header[rpm.RPMTAG_EPOCH],
+ header[rpm.RPMTAG_VERSION],
+ header[rpm.RPMTAG_RELEASE],
header[rpm.RPMTAG_ARCH]))
else:
package_results = None
@@ -655,6 +671,7 @@ def rpm_verify_package(vp_ts, header, verify_options):
return package_results
+
def rpm_verify(verify_ts, verify_pkgspec, verify_options=[]):
"""
Requires rpmtransactionset() to be run first to get a ts.
@@ -669,7 +686,8 @@ def rpm_verify(verify_ts, verify_pkgspec, verify_options=[]):
Or any combination of keywords to select one or more packages to verify.
- options is a list of 'rpm --verify' options. Default is to check everything.
+ options is a list of 'rpm --verify' options.
+ Default is to check everything.
e.g.:
[ 'nodeps', 'nodigest', 'nofiles', 'noscripts', 'nosignature',
'nolinkto' 'nomd5', 'nosize', 'nouser', 'nogroup', 'nomtime',
@@ -705,6 +723,7 @@ def rpm_verify(verify_ts, verify_pkgspec, verify_options=[]):
return verify_results
+
def rpmtransactionset():
"""
A simple wrapper for rpm.TransactionSet() to keep everthiing together.
@@ -714,15 +733,15 @@ def rpmtransactionset():
ts = rpm.TransactionSet()
return ts
+
class Rpmtscallback(object):
"""
- Callback for ts.run(). Used for adding, upgrading and removing packages.
- Starting with all possible reasons codes, but bcfg2 will probably only
- make use of a few of them.
-
- Mostly just printing stuff at the moment to understand how the callback
- is used.
+ Callback for ts.run(). Used for adding, upgrading and removing packages.
+ Starting with all possible reasons codes, but bcfg2 will probably only
+ make use of a few of them.
+ Mostly just printing stuff at the moment to understand how the callback
+ is used.
"""
def __init__(self):
self.fdnos = {}
@@ -731,14 +750,14 @@ class Rpmtscallback(object):
"""
Generic rpmts call back.
"""
- if reason == rpm.RPMCALLBACK_INST_OPEN_FILE:
+ if reason == rpm.RPMCALLBACK_INST_OPEN_FILE:
pass
elif reason == rpm.RPMCALLBACK_INST_CLOSE_FILE:
pass
elif reason == rpm.RPMCALLBACK_INST_START:
pass
elif reason == rpm.RPMCALLBACK_TRANS_PROGRESS or \
- reason == rpm.RPMCALLBACK_INST_PROGRESS:
+ reason == rpm.RPMCALLBACK_INST_PROGRESS:
pass
# rpm.RPMCALLBACK_INST_PROGRESS'
elif reason == rpm.RPMCALLBACK_TRANS_START:
@@ -813,6 +832,7 @@ def rpm_erase(erase_pkgspecs, erase_flags):
del erase_ts
return erase_problems
+
def display_verify_file(file_results):
'''
Display file results similar to rpm --verify.
@@ -874,15 +894,15 @@ def display_verify_file(file_results):
print(result_string + ' ' + filetype + ' ' + filename)
sys.stdout.flush()
-#===============================================================================
+#=============================================================================
# Some options and output to assist with development and testing.
# These are not intended for normal use.
if __name__ == "__main__":
p = optparse.OptionParser()
- p.add_option('--name', action='store', \
- default=None, \
+ p.add_option('--name', action='store',
+ default=None,
help='''Package name to verify.
******************************************
@@ -890,105 +910,108 @@ if __name__ == "__main__":
******************************************
The specified operation will be carried out on all
- instances of packages that match the package specification
+ instances of packages that match the package
+ specification
(name, epoch, version, release, arch).''')
- p.add_option('--epoch', action='store', \
- default=None, \
+ p.add_option('--epoch', action='store',
+ default=None,
help='''Package epoch.''')
- p.add_option('--version', action='store', \
- default=None, \
+ p.add_option('--version', action='store',
+ default=None,
help='''Package version.''')
- p.add_option('--release', action='store', \
- default=None, \
+ p.add_option('--release', action='store',
+ default=None,
help='''Package release.''')
- p.add_option('--arch', action='store', \
- default=None, \
+ p.add_option('--arch', action='store',
+ default=None,
help='''Package arch.''')
- p.add_option('--erase', '-e', action='store_true', \
- default=None, \
- help='''****************************************************
- REMOVE PACKAGES. THERE ARE NO WARNINGS. MULTIPLE
- PACKAGES WILL BE REMOVED IF A FULL PACKAGE SPEC IS NOT
- GIVEN. E.G. IF JUST A NAME IS GIVEN ALL INSTALLED
- INSTANCES OF THAT PACKAGE WILL BE REMOVED PROVIDED
- DEPENDENCY CHECKS PASS. IF JUST AN EPOCH IS GIVEN
- ALL PACKAGE INSTANCES WITH THAT EPOCH WILL BE REMOVED.
- ****************************************************''')
-
- p.add_option('--list', '-l', action='store_true', \
+ p.add_option('--erase', '-e', action='store_true',
+ default=None,
+ help=
+ '''****************************************************
+ REMOVE PACKAGES. THERE ARE NO WARNINGS. MULTIPLE
+ PACKAGES WILL BE REMOVED IF A FULL PACKAGE SPEC IS NOT
+ GIVEN. E.G. IF JUST A NAME IS GIVEN ALL INSTALLED
+ INSTANCES OF THAT PACKAGE WILL BE REMOVED PROVIDED
+ DEPENDENCY CHECKS PASS. IF JUST AN EPOCH IS GIVEN
+ ALL PACKAGE INSTANCES WITH THAT EPOCH WILL BE REMOVED.
+ ****************************************************''')
+
+ p.add_option('--list', '-l', action='store_true',
help='''List package identity info. rpm -qa ish equivalent
intended for use in RefreshPackages().''')
- p.add_option('--verify', action='store_true', \
+ p.add_option('--verify', action='store_true',
help='''Verify Package(s). Output is only produced after all
packages has been verified. Be patient.''')
- p.add_option('--verbose', '-v', action='store_true', \
+ p.add_option('--verbose', '-v', action='store_true',
help='''Verbose output for --verify option. Output is the
same as rpm -v --verify.''')
- p.add_option('--nodeps', action='store_true', \
- default=False, \
+ p.add_option('--nodeps', action='store_true',
+ default=False,
help='Do not do dependency testing.')
- p.add_option('--nodigest', action='store_true', \
+ p.add_option('--nodigest', action='store_true',
help='Do not check package digests.')
- p.add_option('--nofiles', action='store_true', \
+ p.add_option('--nofiles', action='store_true',
help='Do not do file checks.')
- p.add_option('--noscripts', action='store_true', \
+ p.add_option('--noscripts', action='store_true',
help='Do not run verification scripts.')
- p.add_option('--nosignature', action='store_true', \
+ p.add_option('--nosignature', action='store_true',
help='Do not do package signature verification.')
- p.add_option('--nolinkto', action='store_true', \
+ p.add_option('--nolinkto', action='store_true',
help='Do not do symlink tests.')
- p.add_option('--nomd5', action='store_true', \
+ p.add_option('--nomd5', action='store_true',
help='''Do not do MD5 checksums on files. Note that this does
- not work for prelink files yet.''')
+ not work for prelink files yet.''')
- p.add_option('--nosize', action='store_true', \
- help='''Do not do file size tests. Note that this does not work
- for prelink files yet.''')
+ p.add_option('--nosize', action='store_true',
+ help='''Do not do file size tests. Note that this does not
+ work for prelink files yet.''')
- p.add_option('--nouser', action='store_true', \
+ p.add_option('--nouser', action='store_true',
help='Do not check file user ownership.')
- p.add_option('--nogroup', action='store_true', \
+ p.add_option('--nogroup', action='store_true',
help='Do not check file group ownership.')
- p.add_option('--nomtime', action='store_true', \
+ p.add_option('--nomtime', action='store_true',
help='Do not check file modification times.')
- p.add_option('--nomode', action='store_true', \
+ p.add_option('--nomode', action='store_true',
help='Do not check file modes (permissions).')
- p.add_option('--nordev', action='store_true', \
+ p.add_option('--nordev', action='store_true',
help='Do not check device node.')
- p.add_option('--notriggers', action='store_true', \
+ p.add_option('--notriggers', action='store_true',
help='Do not do not generate triggers on erase.')
- p.add_option('--repackage', action='store_true', \
+ p.add_option('--repackage', action='store_true',
help='''Do repackage on erase.i Packages are put
in /var/spool/repackage.''')
- p.add_option('--allmatches', action='store_true', \
- help='''Remove all package instances that match the
- pkgspec.
+ p.add_option('--allmatches', action='store_true',
+ help=
+ '''Remove all package instances that match the
+ pkgspec.
- ***************************************************
- NO WARNINGS ARE GIVEN. IF THERE IS NO PACKAGE SPEC
- THAT MEANS ALL PACKAGES!!!!
- ***************************************************''')
+ ***************************************************
+ NO WARNINGS ARE GIVEN. IF THERE IS NO PACKAGE SPEC
+ THAT MEANS ALL PACKAGES!!!!
+ ***************************************************''')
options, arguments = p.parse_args()
@@ -1119,7 +1142,7 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
__new_req__ = {'Package': ['name'],
'Instance': ['version', 'release', 'arch']}
- __new_ireq__ = {'Package': ['uri'], \
+ __new_ireq__ = {'Package': ['uri'],
'Instance': ['simplefile']}
__gpg_req__ = {'Package': ['name', 'version']}
@@ -1138,8 +1161,8 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
# create a global ignore list used when ignoring particular
# files during package verification
- self.ignores = [entry.get('name') for struct in config for entry in struct \
- if entry.get('type') == 'ignore']
+ self.ignores = [entry.get('name') for struct in config
+ for entry in struct if entry.get('type') == 'ignore']
self.instance_status = {}
self.extra_instances = []
self.modlists = {}
@@ -1174,7 +1197,7 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
# Many, if not most package verifies can be caused by out of
# date prelinking.
if (os.path.isfile('/usr/sbin/prelink') and
- not Bcfg2.Options.setup.dry_run):
+ not Bcfg2.Options.setup.dry_run):
rv = self.cmd.run('/usr/sbin/prelink -a -mR')
if rv.success:
self.logger.debug('Pre-emptive prelink succeeded')
@@ -1202,7 +1225,7 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
refresh_ts = rpmtransactionset()
# Don't bother with signature checks at this stage. The GPG keys might
# not be installed.
- refresh_ts.setVSFlags(rpm._RPMVSF_NODIGESTS|rpm._RPMVSF_NOSIGNATURES)
+ refresh_ts.setVSFlags(rpm._RPMVSF_NODIGESTS | rpm._RPMVSF_NOSIGNATURES)
for nevra in rpmpackagelist(refresh_ts):
self.installed.setdefault(nevra['name'], []).append(nevra)
if Bcfg2.Options.setup.debug:
@@ -1210,7 +1233,7 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
for name, instances in list(self.installed.items()):
self.logger.debug(" " + name)
for inst in instances:
- self.logger.debug(" %s" %self.str_evra(inst))
+ self.logger.debug(" %s" % self.str_evra(inst))
refresh_ts.closeDB()
del refresh_ts
@@ -1240,18 +1263,19 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
Constructs the text prompts for interactive mode.
"""
- instances = [inst for inst in entry if inst.tag == 'Instance' or inst.tag == 'Package']
+ instances = [inst for inst in entry if inst.tag == 'Instance' or
+ inst.tag == 'Package']
if instances == []:
# We have an old style no Instance entry. Convert it to new style.
instance = Bcfg2.Client.XML.SubElement(entry, 'Package')
for attrib in list(entry.attrib.keys()):
instance.attrib[attrib] = entry.attrib[attrib]
if (Bcfg2.Options.setup.rpm_pkg_checks and
- entry.get('pkg_checks', 'true').lower() == 'true'):
+ entry.get('pkg_checks', 'true').lower() == 'true'):
if 'any' in [entry.get('version'), pinned_version]:
version, release = 'any', 'any'
elif entry.get('version') == 'auto':
- if pinned_version != None:
+ if pinned_version is not None:
version, release = pinned_version.split('-')
else:
return False
@@ -1261,142 +1285,193 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
instance.set('release', release)
if entry.get('verify', 'true') == 'false':
instance.set('verify', 'false')
- instances = [ instance ]
+ instances = [instance]
- self.logger.debug("Verifying package instances for %s" % entry.get('name'))
+ self.logger.debug("Verifying package instances for %s" %
+ entry.get('name'))
package_fail = False
qtext_versions = ''
if entry.get('name') in self.installed:
# There is at least one instance installed.
if (Bcfg2.Options.setup.rpm_pkg_checks and
- entry.get('pkg_checks', 'true').lower() == 'true'):
+ entry.get('pkg_checks', 'true').lower() == 'true'):
rpmTs = rpm.TransactionSet()
rpmHeader = None
for h in rpmTs.dbMatch(rpm.RPMTAG_NAME, entry.get('name')):
- if rpmHeader is None or rpm.versionCompare(h, rpmHeader) > 0:
+ if rpmHeader is None or \
+ rpm.versionCompare(h, rpmHeader) > 0:
rpmHeader = h
- rpmProvides = [ h['provides'] for h in \
- rpmTs.dbMatch(rpm.RPMTAG_NAME, entry.get('name')) ]
+ rpmProvides = [h['provides'] for h in
+ rpmTs.dbMatch(rpm.RPMTAG_NAME,
+ entry.get('name'))]
rpmIntersection = set(rpmHeader['provides']) & \
- set(self.installOnlyPkgs)
+ set(self.installOnlyPkgs)
if len(rpmIntersection) > 0:
# Packages that should only be installed or removed.
# e.g. kernels.
self.logger.debug(" Install only package.")
for inst in instances:
- self.instance_status.setdefault(inst, {})['installed'] = False
+ self.instance_status.setdefault(inst, {})['installed']\
+ = False
self.instance_status[inst]['version_fail'] = False
- if inst.tag == 'Package' and len(self.installed[entry.get('name')]) > 1:
- self.logger.error("WARNING: Multiple instances of package %s are installed." % \
+ if inst.tag == 'Package' and \
+ len(self.installed[entry.get('name')]) > 1:
+ self.logger.error("WARNING: Multiple instances of "
+ "package %s are installed." %
(entry.get('name')))
for pkg in self.installed[entry.get('name')]:
- if inst.get('version') == 'any' or self.pkg_vr_equal(inst, pkg) \
- or self.inst_evra_equal(inst, pkg):
+ if inst.get('version') == 'any' or \
+ self.pkg_vr_equal(inst, pkg) or \
+ self.inst_evra_equal(inst, pkg):
if inst.get('version') == 'any':
self.logger.error("got any version")
- self.logger.debug(" %s" % self.str_evra(inst))
+ self.logger.debug(" %s" %
+ self.str_evra(inst))
self.instance_status[inst]['installed'] = True
if (Bcfg2.Options.setup.rpm_pkg_verify and
- inst.get('pkg_verify', 'true').lower() == 'true'):
- flags = inst.get('verify_flags', '').split(',') + self.verify_flags
+ inst.get('pkg_verify',
+ 'true').lower() == 'true'):
+ flags = inst.get('verify_flags',
+ '').split(',') + \
+ self.verify_flags
if pkg.get('gpgkeyid', '')[-8:] not in self.gpg_keyids and \
entry.get('name') != 'gpg-pubkey':
flags += ['nosignature', 'nodigest']
- self.logger.debug('WARNING: Package %s %s requires GPG Public key with ID %s'\
- % (pkg.get('name'), self.str_evra(pkg), \
- pkg.get('gpgkeyid', '')))
- self.logger.debug(' Disabling signature check.')
+ self.logger.debug('WARNING: Package '
+ '%s %s requires GPG '
+ 'Public key with ID '
+ '%s' %
+ (pkg.get('name'),
+ self.str_evra(pkg),
+ pkg.get('gpgkeyid',
+ '')))
+ self.logger.debug(' Disabling '
+ 'signature check.')
if Bcfg2.Options.setup.quick:
if prelink_exists:
flags += ['nomd5', 'nosize']
else:
flags += ['nomd5']
- self.logger.debug(" verify_flags = %s" % flags)
+ self.logger.debug(" verify_flags = "
+ "%s" % flags)
if inst.get('verify', 'true') == 'false':
- self.instance_status[inst]['verify'] = None
+ self.instance_status[inst]['verify'] =\
+ None
else:
vp_ts = rpmtransactionset()
- self.instance_status[inst]['verify'] = \
- rpm_verify( vp_ts, pkg, flags)
+ self.instance_status[inst]['verify'] =\
+ rpm_verify(vp_ts, pkg, flags)
vp_ts.closeDB()
del vp_ts
- if self.instance_status[inst]['installed'] == False:
- self.logger.info(" Package %s %s not installed." % \
- (entry.get('name'), self.str_evra(inst)))
+ if not self.instance_status[inst]['installed']:
+ self.logger.info(" Package %s %s not "
+ "installed." %
+ (entry.get('name'),
+ self.str_evra(inst)))
- qtext_versions = qtext_versions + 'I(%s) ' % self.str_evra(inst)
+ qtext_versions = qtext_versions + 'I(%s) ' % \
+ self.str_evra(inst)
entry.set('current_exists', 'false')
else:
# Normal Packages that can be upgraded.
for inst in instances:
- self.instance_status.setdefault(inst, {})['installed'] = False
+ self.instance_status.setdefault(inst, {})['installed']\
+ = False
self.instance_status[inst]['version_fail'] = False
- # Only installed packages with the same architecture are
- # relevant.
- if inst.get('arch', None) == None:
+ # only installed packages with the same architecture
+ # are relevant.
+ if inst.get('arch', None) is None:
arch_match = self.installed[entry.get('name')]
else:
- arch_match = [pkg for pkg in self.installed[entry.get('name')] \
- if pkg.get('arch', None) == inst.get('arch', None)]
+ arch_match = [pkg for pkg in
+ self.installed[entry.get('name')]
+ if pkg.get('arch', None) ==
+ inst.get('arch', None)]
if len(arch_match) > 1:
- self.logger.error("Multiple instances of package %s installed with the same achitecture." % \
- (entry.get('name')))
+ self.logger.error("Multiple instances of package "
+ "%s installed with the same "
+ "achitecture." %
+ (entry.get('name')))
elif len(arch_match) == 1:
# There is only one installed like there should be.
# Check that it is the right version.
for pkg in arch_match:
- if inst.get('version') == 'any' or self.pkg_vr_equal(inst, pkg) or \
- self.inst_evra_equal(inst, pkg):
- self.logger.debug(" %s" % self.str_evra(inst))
- self.instance_status[inst]['installed'] = True
+ if inst.get('version') == 'any' or \
+ self.pkg_vr_equal(inst, pkg) or \
+ self.inst_evra_equal(inst, pkg):
+ self.logger.debug(" %s" %
+ self.str_evra(inst))
+ self.instance_status[inst]['installed'] = \
+ True
if (Bcfg2.Options.setup.rpm_pkg_verify and
- inst.get('pkg_verify', 'true').lower() == 'true'):
- flags = inst.get('verify_flags', '').split(',') + self.verify_flags
- if pkg.get('gpgkeyid', '')[-8:] not in self.gpg_keyids and \
- 'nosignature' not in flags:
- flags += ['nosignature', 'nodigest']
- self.logger.info('WARNING: Package %s %s requires GPG Public key with ID %s'\
- % (pkg.get('name'), self.str_evra(pkg), \
- pkg.get('gpgkeyid', '')))
- self.logger.info(' Disabling signature check.')
+ inst.get(
+ 'pkg_verify',
+ 'true').lower() == 'true'):
+ flags = inst.get('verify_flags',
+ '').split(',') + \
+ self.verify_flags
+ if pkg.get('gpgkeyid', '')[-8:] not in\
+ self.gpg_keyids and 'nosignature'\
+ not in flags:
+ flags += ['nosignature',
+ 'nodigest']
+ self.logger.info(
+ 'WARNING: Package %s %s '
+ 'requires GPG Public key with '
+ 'ID %s' % (pkg.get('name'),
+ self.str_evra(pkg),
+ pkg.get('gpgkeyid',
+ '')))
+ self.logger.info(
+ ' Disabling signature '
+ 'check.')
if Bcfg2.Options.setup.quick:
if prelink_exists:
flags += ['nomd5', 'nosize']
else:
flags += ['nomd5']
- self.logger.debug(" verify_flags = %s" % flags)
+ self.logger.debug(
+ " verify_flags = %s" %
+ flags)
- if inst.get('verify', 'true') == 'false':
+ if inst.get('verify', 'true') == \
+ 'false':
self.instance_status[inst]['verify'] = None
else:
vp_ts = rpmtransactionset()
- self.instance_status[inst]['verify'] = \
- rpm_verify( vp_ts, pkg, flags )
+ self.instance_status[inst]['verify'] = rpm_verify(vp_ts, pkg, flags)
vp_ts.closeDB()
del vp_ts
else:
# Wrong version installed.
- self.instance_status[inst]['version_fail'] = True
- self.logger.info(" Wrong version installed. Want %s, but have %s"\
- % (self.str_evra(inst), self.str_evra(pkg)))
-
- qtext_versions = qtext_versions + 'U(%s -> %s) ' % \
- (self.str_evra(pkg), self.str_evra(inst))
+ self.instance_status[inst]['version_fail']\
+ = True
+ self.logger.info(" Wrong version "
+ "installed. Want %s, but "
+ "have %s" %
+ (self.str_evra(inst),
+ self.str_evra(pkg)))
+
+ qtext_versions = qtext_versions + \
+ 'U(%s -> %s) ' % (self.str_evra(pkg),
+ self.str_evra(inst))
elif len(arch_match) == 0:
# This instance is not installed.
self.instance_status[inst]['installed'] = False
- self.logger.info(" %s is not installed." % self.str_evra(inst))
- qtext_versions = qtext_versions + 'I(%s) ' % self.str_evra(inst)
+ self.logger.info(" %s is not installed." %
+ self.str_evra(inst))
+ qtext_versions = qtext_versions + \
+ 'I(%s) ' % self.str_evra(inst)
# Check the rpm verify results.
for inst in instances:
@@ -1404,100 +1479,121 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
# Dump the rpm verify results.
#****Write something to format this nicely.*****
if (Bcfg2.Options.setup.debug and
- self.instance_status[inst].get('verify', None)):
+ self.instance_status[inst].get('verify', None)):
self.logger.debug(self.instance_status[inst]['verify'])
self.instance_status[inst]['verify_fail'] = False
if self.instance_status[inst].get('verify', None):
if len(self.instance_status[inst].get('verify')) > 1:
- self.logger.info("WARNING: Verification of more than one package instance.")
+ self.logger.info("WARNING: Verification of more "
+ "than one package instance.")
for result in self.instance_status[inst]['verify']:
# Check header results
if result.get('hdr', None):
instance_fail = True
- self.instance_status[inst]['verify_fail'] = True
+ self.instance_status[inst]['verify_fail'] = \
+ True
# Check dependency results
if result.get('deps', None):
instance_fail = True
- self.instance_status[inst]['verify_fail'] = True
-
- # Check the rpm verify file results against the modlist
- # and entry and per Instance Ignores.
- ignores = [ig.get('name') for ig in entry.findall('Ignore')] + \
- [ig.get('name') for ig in inst.findall('Ignore')] + \
- self.ignores
+ self.instance_status[inst]['verify_fail'] = \
+ True
+
+ # check the rpm verify file results against
+ # the modlist and entry and per Instance Ignores.
+ ignores = [ig.get('name')
+ for ig in entry.findall('Ignore')] + \
+ [ig.get('name')
+ for ig in inst.findall('Ignore')] + \
+ self.ignores
for file_result in result.get('files', []):
if file_result[-1] not in modlist + ignores:
instance_fail = True
- self.instance_status[inst]['verify_fail'] = True
+ self.instance_status[inst]['verify_fail'] \
+ = True
else:
- self.logger.debug(" Modlist/Ignore match: %s" % \
- (file_result[-1]))
+ self.logger.debug(" Modlist/Ignore "
+ "match: %s" %
+ (file_result[-1]))
- if instance_fail == True:
- self.logger.debug("*** Instance %s failed RPM verification ***" % \
+ if instance_fail:
+ self.logger.debug("*** Instance %s failed RPM "
+ "verification ***" %
self.str_evra(inst))
- qtext_versions = qtext_versions + 'R(%s) ' % self.str_evra(inst)
+ qtext_versions = qtext_versions + \
+ 'R(%s) ' % self.str_evra(inst)
self.modlists[entry] = modlist
- # Attach status structure for return to server for reporting.
- inst.set('verify_status', str(self.instance_status[inst]))
+ # Attach status structure for reporting.
+ inst.set('verify_status',
+ str(self.instance_status[inst]))
- if self.instance_status[inst]['installed'] == False or \
- self.instance_status[inst].get('version_fail', False)== True or \
- self.instance_status[inst].get('verify_fail', False) == True:
+ version_fail = self.instance_status[inst].get(
+ 'version_fail', False)
+ verify_fail = self.instance_status[inst].get(
+ 'verify_fail', False)
+ if not self.instance_status[inst]['installed'] or \
+ version_fail or verify_fail:
package_fail = True
self.instance_status[inst]['pkg'] = entry
self.modlists[entry] = modlist
# Find Installed Instances that are not in the Config.
- extra_installed = self.FindExtraInstances(entry, self.installed[entry.get('name')])
- if extra_installed != None:
+ extra_installed = self.FindExtraInstances(
+ entry, self.installed[entry.get('name')])
+ if extra_installed is not None:
package_fail = True
self.extra_instances.append(extra_installed)
for inst in extra_installed.findall('Instance'):
- qtext_versions = qtext_versions + 'D(%s) ' % self.str_evra(inst)
- self.logger.debug("Found Extra Instances %s" % qtext_versions)
-
- if package_fail == True:
- self.logger.info(" Package %s failed verification." % \
- (entry.get('name')))
- qtext = 'Install/Upgrade/delete Package %s instance(s) - %s (y/N) ' % \
- (entry.get('name'), qtext_versions)
+ qtext_versions = qtext_versions + \
+ 'D(%s) ' % self.str_evra(inst)
+ self.logger.debug("Found Extra Instances %s" %
+ qtext_versions)
+
+ if package_fail:
+ self.logger.info(" Package %s failed verification."
+ % (entry.get('name')))
+ qtext = 'Install/Upgrade/delete Package %s instance(s) - '\
+ '%s (y/N) ' % (entry.get('name'), qtext_versions)
entry.set('qtext', qtext)
bcfg2_versions = ''
- for bcfg2_inst in [inst for inst in instances if inst.tag == 'Instance']:
- bcfg2_versions = bcfg2_versions + '(%s) ' % self.str_evra(bcfg2_inst)
+ for bcfg2_inst in [inst for inst in instances
+ if inst.tag == 'Instance']:
+ bcfg2_versions = bcfg2_versions + \
+ '(%s) ' % self.str_evra(bcfg2_inst)
if bcfg2_versions != '':
entry.set('version', bcfg2_versions)
installed_versions = ''
for installed_inst in self.installed[entry.get('name')]:
- installed_versions = installed_versions + '(%s) ' % \
- self.str_evra(installed_inst)
+ installed_versions = installed_versions + \
+ '(%s) ' % self.str_evra(installed_inst)
entry.set('current_version', installed_versions)
return False
else:
# There are no Instances of this package installed.
- self.logger.debug("Package %s has no instances installed" % (entry.get('name')))
+ self.logger.debug("Package %s has no instances installed" %
+ (entry.get('name')))
entry.set('current_exists', 'false')
bcfg2_versions = ''
for inst in instances:
- qtext_versions = qtext_versions + 'I(%s) ' % self.str_evra(inst)
+ qtext_versions = qtext_versions + \
+ 'I(%s) ' % self.str_evra(inst)
self.instance_status.setdefault(inst, {})['installed'] = False
self.modlists[entry] = modlist
self.instance_status[inst]['pkg'] = entry
if inst.tag == 'Instance':
- bcfg2_versions = bcfg2_versions + '(%s) ' % self.str_evra(inst)
+ bcfg2_versions = bcfg2_versions + \
+ '(%s) ' % self.str_evra(inst)
if bcfg2_versions != '':
entry.set('version', bcfg2_versions)
- entry.set('qtext', "Install Package %s Instance(s) %s? (y/N) " % \
+ entry.set('qtext', "Install Package %s Instance(s) %s? (y/N) " %
(entry.get('name'), qtext_versions))
return False
@@ -1517,26 +1613,31 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
for pkg in packages:
for inst in pkg:
if pkg.get('name') != 'gpg-pubkey':
- pkgspec = { 'name':pkg.get('name'),
- 'epoch':inst.get('epoch', None),
- 'version':inst.get('version'),
- 'release':inst.get('release'),
- 'arch':inst.get('arch') }
+ pkgspec = {'name': pkg.get('name'),
+ 'epoch': inst.get('epoch', None),
+ 'version': inst.get('version'),
+ 'release': inst.get('release'),
+ 'arch': inst.get('arch')}
pkgspec_list.append(pkgspec)
else:
- pkgspec = { 'name':pkg.get('name'),
- 'version':inst.get('version'),
- 'release':inst.get('release')}
- self.logger.info("WARNING: gpg-pubkey package not in configuration %s %s"\
- % (pkgspec.get('name'), self.str_evra(pkgspec)))
- self.logger.info(" This package will be deleted in a future version of the RPM driver.")
+ pkgspec = {'name': pkg.get('name'),
+ 'version': inst.get('version'),
+ 'release': inst.get('release')}
+ self.logger.info("WARNING: gpg-pubkey package not in "
+ "configuration %s %s" %
+ (pkgspec.get('name'),
+ self.str_evra(pkgspec)))
+ self.logger.info(" This package will be deleted "
+ "in a future version of the RPM driver.")
#pkgspec_list.append(pkg_spec)
- erase_results = rpm_erase(pkgspec_list, Bcfg2.Options.setup.rpm_erase_flags)
+ erase_results = rpm_erase(pkgspec_list,
+ Bcfg2.Options.setup.rpm_erase_flags)
if erase_results == []:
self.modified += packages
for pkg in pkgspec_list:
- self.logger.info("Deleted %s %s" % (pkg.get('name'), self.str_evra(pkg)))
+ self.logger.info("Deleted %s %s" % (pkg.get('name'),
+ self.str_evra(pkg)))
else:
self.logger.info("Bulk erase failed with errors:")
self.logger.debug("Erase results = %s" % erase_results)
@@ -1546,32 +1647,38 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
pkg_modified = False
for inst in pkg:
if pkg.get('name') != 'gpg-pubkey':
- pkgspec = { 'name':pkg.get('name'),
- 'epoch':inst.get('epoch', None),
- 'version':inst.get('version'),
- 'release':inst.get('release'),
- 'arch':inst.get('arch') }
+ pkgspec = {'name': pkg.get('name'),
+ 'epoch': inst.get('epoch', None),
+ 'version': inst.get('version'),
+ 'release': inst.get('release'),
+ 'arch': inst.get('arch')}
pkgspec_list.append(pkgspec)
else:
- pkgspec = { 'name':pkg.get('name'),
- 'version':inst.get('version'),
- 'release':inst.get('release')}
- self.logger.info("WARNING: gpg-pubkey package not in configuration %s %s"\
- % (pkgspec.get('name'), self.str_evra(pkgspec)))
- self.logger.info(" This package will be deleted in a future version of the RPM driver.")
- continue # Don't delete the gpg-pubkey packages for now.
+ pkgspec = {'name': pkg.get('name'),
+ 'version': inst.get('version'),
+ 'release': inst.get('release')}
+ self.logger.info("WARNING: gpg-pubkey package not in "
+ "configuration %s %s" %
+ (pkgspec.get('name'),
+ self.str_evra(pkgspec)))
+ self.logger.info(" This package will be "
+ "deleted in a future version of the "
+ "RPM driver.")
+ continue # don't delete the gpg-pubkey packages
erase_results = rpm_erase(
[pkgspec],
Bcfg2.Options.setup.rpm_erase_flags)
if erase_results == []:
pkg_modified = True
- self.logger.info("Deleted %s %s" % \
- (pkgspec.get('name'), self.str_evra(pkgspec)))
+ self.logger.info("Deleted %s %s" %
+ (pkgspec.get('name'),
+ self.str_evra(pkgspec)))
else:
- self.logger.error("unable to delete %s %s" % \
- (pkgspec.get('name'), self.str_evra(pkgspec)))
+ self.logger.error("unable to delete %s %s" %
+ (pkgspec.get('name'),
+ self.str_evra(pkgspec)))
self.logger.debug("Failure = %s" % erase_results)
- if pkg_modified == True:
+ if pkg_modified:
self.modified.append(pkg)
self.RefreshPackages()
@@ -1589,30 +1696,33 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
if not inst_status.get('installed', False):
if (instance.get('install_missing', 'true').lower() == "true" and
- Bcfg2.Options.setup.rpm_install_missing):
+ Bcfg2.Options.setup.rpm_install_missing):
fix = True
else:
- self.logger.debug('Installed Action for %s %s is to not install' % \
+ self.logger.debug('Installed Action for %s %s is to not '
+ 'install' %
(inst_status.get('pkg').get('name'),
self.str_evra(instance)))
elif inst_status.get('version_fail', False):
if (instance.get('fix_version', 'true').lower() == "true" and
- Bcfg2.Options.setup.rpm_fix_version):
+ Bcfg2.Options.setup.rpm_fix_version):
fix = True
else:
- self.logger.debug('Version Fail Action for %s %s is to not upgrade' % \
+ self.logger.debug('Version Fail Action for %s %s is to '
+ 'not upgrade' %
(inst_status.get('pkg').get('name'),
self.str_evra(instance)))
elif inst_status.get('verify_fail', False):
if (instance.get('reinstall_broken', 'true').lower() == "true" and
- Bcfg2.Options.setup.rpm_reinstall_broken):
+ Bcfg2.Options.setup.rpm_reinstall_broken):
for inst in inst_status.get('verify'):
# This needs to be a for loop rather than a straight get()
# because the underlying routines handle multiple packages
# and return a list of results.
- self.logger.debug('reinstall_check: %s %s:%s-%s.%s' % inst.get('nevra'))
+ self.logger.debug('reinstall_check: %s %s:%s-%s.%s' %
+ inst.get('nevra'))
if inst.get("hdr", False):
fix = True
@@ -1620,7 +1730,8 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
elif inst.get('files', False):
# Parse rpm verify file results
for file_result in inst.get('files', []):
- self.logger.debug('reinstall_check: file: %s' % file_result)
+ self.logger.debug('reinstall_check: file: %s' %
+ file_result)
if file_result[-2] != 'c':
fix = True
break
@@ -1629,9 +1740,10 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
elif inst.get("deps", False):
fix = False
else:
- self.logger.debug('Verify Fail Action for %s %s is to not reinstall' % \
- (inst_status.get('pkg').get('name'),
- self.str_evra(instance)))
+ self.logger.debug('Verify Fail Action for %s %s is to not '
+ 'reinstall' %
+ (inst_status.get('pkg').get('name'),
+ self.str_evra(instance)))
return fix
@@ -1665,18 +1777,20 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
# Can not reverify because we don't have a package entry.
if len(self.extra_instances) > 0:
if (Bcfg2.Options.setup.remove in ['all', 'packages'] and
- not Bcfg2.Options.setup.dry_run):
+ not Bcfg2.Options.setup.dry_run):
self.Remove(self.extra_instances)
else:
- self.logger.info("The following extra package instances will be removed by the '-r' option:")
+ self.logger.info("The following extra package instances will "
+ "be removed by the '-r' option:")
for pkg in self.extra_instances:
for inst in pkg:
- self.logger.info(" %s %s" % (pkg.get('name'), self.str_evra(inst)))
+ self.logger.info(" %s %s" % (pkg.get('name'),
+ self.str_evra(inst)))
# Figure out which instances of the packages actually need something
# doing to them and place in the appropriate work 'queue'.
for pkg in packages:
- for inst in [instn for instn in pkg if instn.tag \
+ for inst in [instn for instn in pkg if instn.tag
in ['Instance', 'Package']]:
if self.FixInstance(inst, self.instance_status[inst]):
if pkg.get('name') == 'gpg-pubkey':
@@ -1689,10 +1803,10 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
# Fix installOnlyPackages
if len(install_only_pkgs) > 0:
self.logger.info("Attempting to install 'install only packages'")
- install_args = \
- " ".join(os.path.join(self.instance_status[inst].get('pkg').get('uri'),
- inst.get('simplefile'))
- for inst in install_only_pkgs)
+ install_args = " ".join(os.path.join(
+ self.instance_status[inst].get('pkg').get('uri'),
+ inst.get('simplefile'))
+ for inst in install_only_pkgs)
if self.cmd.run("rpm --install --quiet --oldpackage --replacepkgs "
"%s" % install_args):
# The rpm command succeeded. All packages installed.
@@ -1704,35 +1818,34 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
self.logger.error("Single Pass for InstallOnlyPackages Failed")
installed_instances = []
for inst in install_only_pkgs:
- install_args = \
- os.path.join(self.instance_status[inst].get('pkg').get('uri'),
- inst.get('simplefile'))
+ pkguri = self.instance_status[inst].get('pkg').get('uri')
+ pkgname = self.instance_status[inst].get('pkg').get('name')
+ install_args = os.path.join(pkguri, inst.get('simplefile'))
if self.cmd.run("rpm --install --quiet --oldpackage "
"--replacepkgs %s" % install_args):
installed_instances.append(inst)
else:
- self.logger.debug("InstallOnlyPackage %s %s would not install." % \
- (self.instance_status[inst].get('pkg').get('name'), \
- self.str_evra(inst)))
+ self.logger.debug("InstallOnlyPackage %s %s would not "
+ "install." % (pkgname,
+ self.str_evra(inst)))
- install_pkg_set = set([self.instance_status[inst].get('pkg') \
- for inst in install_only_pkgs])
+ install_pkg_set = set([self.instance_status[inst].get('pkg')
+ for inst in install_only_pkgs])
self.RefreshPackages()
# Install GPG keys.
if len(gpg_keys) > 0:
for inst in gpg_keys:
self.logger.info("Installing GPG keys.")
- key_arg = os.path.join(self.instance_status[inst].get('pkg').get('uri'), \
- inst.get('simplefile'))
+ pkguri = self.instance_status[inst].get('pkg').get('uri')
+ pkgname = self.instance_status[inst].get('pkg').get('name')
+ key_arg = os.path.join(pkguri, inst.get('simplefile'))
if not self.cmd.run("rpm --import %s" % key_arg):
self.logger.debug("Unable to install %s-%s" %
- (self.instance_status[inst].get('pkg').get('name'),
- self.str_evra(inst)))
+ (pkgname, self.str_evra(inst)))
else:
self.logger.debug("Installed %s-%s-%s" %
- (self.instance_status[inst].get('pkg').get('name'),
- inst.get('version'),
+ (pkgname, inst.get('version'),
inst.get('release')))
self.RefreshPackages()
self.gpg_keyids = self.getinstalledgpg()
@@ -1742,9 +1855,10 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
# Fix upgradeable packages.
if len(upgrade_pkgs) > 0:
self.logger.info("Attempting to upgrade packages")
- upgrade_args = " ".join([os.path.join(self.instance_status[inst].get('pkg').get('uri'), \
- inst.get('simplefile')) \
- for inst in upgrade_pkgs])
+ upgrade_args = " ".join([os.path.join(
+ self.instance_status[inst].get('pkg').get('uri'),
+ inst.get('simplefile'))
+ for inst in upgrade_pkgs])
if self.cmd.run("rpm --upgrade --quiet --oldpackage --replacepkgs "
"%s" % upgrade_args):
# The rpm command succeeded. All packages upgraded.
@@ -1758,49 +1872,56 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
self.logger.error("Single Pass for Upgrading Packages Failed")
upgraded_instances = []
for inst in upgrade_pkgs:
- upgrade_args = os.path.join(self.instance_status[inst].get('pkg').get('uri'), \
- inst.get('simplefile'))
- #self.logger.debug("rpm --upgrade --quiet --oldpackage --replacepkgs %s" % \
- # upgrade_args)
+ upgrade_args = os.path.join(
+ self.instance_status[inst].get('pkg').get('uri'),
+ inst.get('simplefile'))
+ #self.logger.debug("rpm --upgrade --quiet --oldpackage "
+ # "--replacepkgs %s" % upgrade_args)
if self.cmd.run("rpm --upgrade --quiet --oldpackage "
"--replacepkgs %s" % upgrade_args):
upgraded_instances.append(inst)
else:
- self.logger.debug("Package %s %s would not upgrade." %
- (self.instance_status[inst].get('pkg').get('name'),
- self.str_evra(inst)))
+ self.logger.debug(
+ "Package %s %s would not upgrade." %
+ (self.instance_status[inst].get('pkg').get('name'),
+ self.str_evra(inst)))
- upgrade_pkg_set = set([self.instance_status[inst].get('pkg') \
- for inst in upgrade_pkgs])
+ upgrade_pkg_set = set([self.instance_status[inst].get('pkg')
+ for inst in upgrade_pkgs])
self.RefreshPackages()
if not Bcfg2.Options.setup.kevlar:
for pkg_entry in packages:
- self.logger.debug("Reverifying Failed Package %s" % (pkg_entry.get('name')))
- states[pkg_entry] = self.VerifyPackage(pkg_entry, \
- self.modlists.get(pkg_entry, []))
+ self.logger.debug("Reverifying Failed Package %s" %
+ (pkg_entry.get('name')))
+ states[pkg_entry] = self.VerifyPackage(
+ pkg_entry, self.modlists.get(pkg_entry, []))
self.modified.extend(ent for ent in packages if states[ent])
return states
+ def _log_incomplete_entry_install(self, etag, ename):
+ self.logger.error("Incomplete information for entry %s:%s; "
+ "cannot install" % (etag, ename))
+ return
+
def canInstall(self, entry):
"""Test if entry has enough information to be installed."""
if not self.handlesEntry(entry):
return False
if 'failure' in entry.attrib:
- self.logger.error("Cannot install entry %s:%s with bind failure" % \
+ self.logger.error("Cannot install entry %s:%s with bind failure" %
(entry.tag, entry.get('name')))
return False
-
instances = entry.findall('Instance')
- # If the entry wasn't verifiable, then we really don't want to try and fix something
- # that we don't know is broken.
+ # If the entry wasn't verifiable, then we really don't want to try
+ # and fix something that we don't know is broken.
if not self.canVerify(entry):
- self.logger.debug("WARNING: Package %s was not verifiable, not passing to Install()" \
- % entry.get('name'))
+ self.logger.debug("WARNING: Package %s was not verifiable, not "
+ "passing to Install()" % entry.get('name'))
return False
if not instances:
@@ -1808,53 +1929,70 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
if entry.get('name') == 'gpg-pubkey':
# gpg-pubkey packages aren't really pacakges, so we have to do
# something a little different.
- # Check that the Package Level has what we need for verification.
- if [attr for attr in self.__gpg_ireq__[entry.tag] if attr not in entry.attrib]:
- self.logger.error("Incomplete information for entry %s:%s; cannot install" \
- % (entry.tag, entry.get('name')))
+ # check that the Package level has
+ # what we need for verification.
+ if [attr for attr in self.__gpg_ireq__[entry.tag]
+ if attr not in entry.attrib]:
+ self._log_incomplete_entry_install(entry.tag,
+ entry.get('name'))
return False
else:
- if [attr for attr in self.__ireq__[entry.tag] if attr not in entry.attrib]:
- self.logger.error("Incomplete information for entry %s:%s; cannot install" \
- % (entry.tag, entry.get('name')))
+ if [attr for attr in self.__ireq__[entry.tag]
+ if attr not in entry.attrib]:
+ self._log_incomplete_entry_install(entry.tag,
+ entry.get('name'))
return False
else:
if entry.get('name') == 'gpg-pubkey':
# gpg-pubkey packages aren't really pacakges, so we have to do
# something a little different.
- # Check that the Package Level has what we need for verification.
- if [attr for attr in self.__new_gpg_ireq__[entry.tag] if attr not in entry.attrib]:
- self.logger.error("Incomplete information for entry %s:%s; cannot install" \
- % (entry.tag, entry.get('name')))
+ # check that the Package level has
+ # what we need for verification.
+ if [attr for attr in self.__new_gpg_ireq__[entry.tag]
+ if attr not in entry.attrib]:
+ self._log_incomplete_entry_install(entry.tag,
+ entry.get('name'))
return False
- # Check that the Instance Level has what we need for verification.
+ # check that the Instance level has
+ # what we need for verification.
for inst in instances:
- if [attr for attr in self.__new_gpg_ireq__[inst.tag] \
- if attr not in inst.attrib]:
- self.logger.error("Incomplete information for entry %s:%s; cannot install"\
- % (inst.tag, entry.get('name')))
+ if [attr for attr in self.__new_gpg_ireq__[inst.tag]
+ if attr not in inst.attrib]:
+ self._log_incomplete_entry_install(inst.tag,
+ entry.get('name'))
return False
else:
# New format with Instances.
- # Check that the Package Level has what we need for verification.
- if [attr for attr in self.__new_ireq__[entry.tag] if attr not in entry.attrib]:
- self.logger.error("Incomplete information for entry %s:%s; cannot install" \
- % (entry.tag, entry.get('name')))
- self.logger.error(" Required attributes that may not be present are %s" \
- % (self.__new_ireq__[entry.tag]))
+ # check that the Package level has
+ # what we need for verification.
+ if [attr for attr in self.__new_ireq__[entry.tag]
+ if attr not in entry.attrib]:
+ self._log_incomplete_entry_install(entry.tag,
+ entry.get('name'))
+ self.logger.error(" Required attributes that "
+ "may not be present are %s" %
+ (self.__new_ireq__[entry.tag]))
return False
- # Check that the Instance Level has what we need for verification.
+ # check that the Instance level has
+ # what we need for verification.
for inst in instances:
if inst.tag == 'Instance':
- if [attr for attr in self.__new_ireq__[inst.tag] \
- if attr not in inst.attrib]:
- self.logger.error("Incomplete information for %s of package %s; cannot install" \
- % (inst.tag, entry.get('name')))
- self.logger.error(" Required attributes that may not be present are %s" \
+ if [attr for attr in self.__new_ireq__[inst.tag]
+ if attr not in inst.attrib]:
+ self._log_incomplete_entry_install(
+ inst.tag,
+ entry.get('name'))
+ self.logger.error(" Required attributes "
+ "that may not be present are %s"
% (self.__new_ireq__[inst.tag]))
return False
return True
+ def _log_incomplete_entry_verify(self, etag, ename):
+ self.logger.error("Incomplete information for entry %s:%s; "
+ "cannot verify" % (etag, ename))
+ return
+
def canVerify(self, entry):
"""
Test if entry has enough information to be verified.
@@ -1872,13 +2010,15 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
return False
if 'failure' in entry.attrib:
- self.logger.error("Entry %s:%s reports bind failure: %s" % \
- (entry.tag, entry.get('name'), entry.get('failure')))
+ self.logger.error("Entry %s:%s reports bind failure: %s" %
+ (entry.tag, entry.get('name'),
+ entry.get('failure')))
return False
- # We don't want to do any checks so we don't care what the entry has in it.
+ # we don't want to do any checks so
+ # we don't care what the entry has in it.
if (not Bcfg2.Options.setup.rpm_pkg_checks or
- entry.get('pkg_checks', 'true').lower() == 'false'):
+ entry.get('pkg_checks', 'true').lower() == 'false'):
return True
instances = entry.findall('Instance')
@@ -1888,53 +2028,72 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
if entry.get('name') == 'gpg-pubkey':
# gpg-pubkey packages aren't really pacakges, so we have to do
# something a little different.
- # Check that the Package Level has what we need for verification.
- if [attr for attr in self.__gpg_req__[entry.tag] if attr not in entry.attrib]:
- self.logger.error("Incomplete information for entry %s:%s; cannot verify" \
- % (entry.tag, entry.get('name')))
+ # check that the Package level has
+ # what we need for verification.
+ if [attr for attr in self.__gpg_req__[entry.tag]
+ if attr not in entry.attrib]:
+ self._log_incomplete_entry_verify(entry.tag,
+ entry.get('name'))
return False
elif entry.tag == 'Path' and entry.get('type') == 'ignore':
# ignored Paths are only relevant during failed package
# verification
pass
else:
- if [attr for attr in self.__req__[entry.tag] if attr not in entry.attrib]:
- self.logger.error("Incomplete information for entry %s:%s; cannot verify" \
- % (entry.tag, entry.get('name')))
+ if [attr for attr in self.__req__[entry.tag]
+ if attr not in entry.attrib]:
+ self._log_incomplete_entry_verify(entry.tag,
+ entry.get('name'))
return False
else:
if entry.get('name') == 'gpg-pubkey':
# gpg-pubkey packages aren't really pacakges, so we have to do
# something a little different.
- # Check that the Package Level has what we need for verification.
- if [attr for attr in self.__new_gpg_req__[entry.tag] if attr not in entry.attrib]:
- self.logger.error("Incomplete information for entry %s:%s; cannot verify" \
- % (entry.tag, entry.get('name')))
+ # check that the Package level has
+ # what we need for verification.
+ if [attr for attr in self.__new_gpg_req__[entry.tag]
+ if attr not in entry.attrib]:
+ self._log_incomplete_entry_verify(entry.tag,
+ entry.get('name'))
return False
- # Check that the Instance Level has what we need for verification.
+ # check that the Instance level has
+ # what we need for verification.
for inst in instances:
- if [attr for attr in self.__new_gpg_req__[inst.tag] \
- if attr not in inst.attrib]:
- self.logger.error("Incomplete information for entry %s:%s; cannot verify" \
- % (inst.tag, inst.get('name')))
+ if [attr for attr in self.__new_gpg_req__[inst.tag]
+ if attr not in inst.attrib]:
+ self._log_incomplete_entry_verify(inst.tag,
+ inst.get('name'))
return False
else:
- # New format with Instances, or old style modified.
- # Check that the Package Level has what we need for verification.
- if [attr for attr in self.__new_req__[entry.tag] if attr not in entry.attrib]:
- self.logger.error("Incomplete information for entry %s:%s; cannot verify" \
- % (entry.tag, entry.get('name')))
+ # new format with Instances, or old style modified.
+ # check that the Package level has
+ # what we need for verification.
+ if [attr for attr in self.__new_req__[entry.tag]
+ if attr not in entry.attrib]:
+ self._log_incomplete_entry_verify(entry.tag,
+ entry.get('name'))
return False
- # Check that the Instance Level has what we need for verification.
+ # check that the Instance level has
+ # what we need for verification.
for inst in instances:
if inst.tag == 'Instance':
- if [attr for attr in self.__new_req__[inst.tag] \
- if attr not in inst.attrib]:
- self.logger.error("Incomplete information for entry %s:%s; cannot verify" \
- % (inst.tag, inst.get('name')))
+ if [attr for attr in self.__new_req__[inst.tag]
+ if attr not in inst.attrib]:
+ self._log_incomplete_entry_verify(inst.tag,
+ inst.get('name'))
return False
return True
+ def _get_tmp_entry(self, extra_entry, inst):
+ tmp_entry = Bcfg2.Client.XML.SubElement(extra_entry, 'Instance',
+ version=inst.get('version'),
+ release=inst.get('release'))
+ if inst.get('epoch', None) is not None:
+ tmp_entry.set('epoch', str(inst.get('epoch')))
+ if installed_inst.get('arch', None) is not None:
+ tmp_entry.set('arch', inst.get('arch'))
+ return
+
def FindExtra(self):
"""Find extra packages."""
packages = [entry.get('name') for entry in self.getSupportedEntries()]
@@ -1942,18 +2101,14 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
for (name, instances) in list(self.installed.items()):
if name not in packages:
- extra_entry = Bcfg2.Client.XML.Element('Package', name=name, type=self.pkgtype)
+ extra_entry = Bcfg2.Client.XML.Element('Package',
+ name=name,
+ type=self.pkgtype)
for installed_inst in instances:
if Bcfg2.Options.setup.extra:
- self.logger.info("Extra Package %s %s." % \
+ self.logger.info("Extra Package %s %s." %
(name, self.str_evra(installed_inst)))
- tmp_entry = Bcfg2.Client.XML.SubElement(extra_entry, 'Instance', \
- version = installed_inst.get('version'), \
- release = installed_inst.get('release'))
- if installed_inst.get('epoch', None) != None:
- tmp_entry.set('epoch', str(installed_inst.get('epoch')))
- if installed_inst.get('arch', None) != None:
- tmp_entry.set('arch', installed_inst.get('arch'))
+ self._get_tmp_entry(extra_entry, installed_inst)
extras.append(extra_entry)
return extras
@@ -1965,8 +2120,11 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
"""
name = pkg_entry.get('name')
- extra_entry = Bcfg2.Client.XML.Element('Package', name=name, type=self.pkgtype)
- instances = [inst for inst in pkg_entry if inst.tag == 'Instance' or inst.tag == 'Package']
+ extra_entry = Bcfg2.Client.XML.Element('Package',
+ name=name,
+ type=self.pkgtype)
+ instances = [inst for inst in pkg_entry if
+ inst.tag == 'Instance' or inst.tag == 'Package']
if name in self.installOnlyPkgs:
for installed_inst in installed_entry:
not_found = True
@@ -1975,36 +2133,25 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
self.inst_evra_equal(inst, installed_inst):
not_found = False
break
- if not_found == True:
+ if not_found:
# Extra package.
- self.logger.info("Extra InstallOnlyPackage %s %s." % \
+ self.logger.info("Extra InstallOnlyPackage %s %s." %
(name, self.str_evra(installed_inst)))
- tmp_entry = Bcfg2.Client.XML.SubElement(extra_entry, 'Instance', \
- version = installed_inst.get('version'), \
- release = installed_inst.get('release'))
- if installed_inst.get('epoch', None) != None:
- tmp_entry.set('epoch', str(installed_inst.get('epoch')))
- if installed_inst.get('arch', None) != None:
- tmp_entry.set('arch', installed_inst.get('arch'))
+ self._get_tmp_entry(extra_entry, installed_inst)
else:
# Normal package, only check arch.
for installed_inst in installed_entry:
not_found = True
for inst in instances:
- if installed_inst.get('arch', None) == inst.get('arch', None) or\
- inst.tag == 'Package':
+ if (installed_inst.get('arch', None) ==
+ inst.get('arch', None) or
+ inst.tag == 'Package'):
not_found = False
break
if not_found:
- self.logger.info("Extra Normal Package Instance %s %s" % \
+ self.logger.info("Extra Normal Package Instance %s %s" %
(name, self.str_evra(installed_inst)))
- tmp_entry = Bcfg2.Client.XML.SubElement(extra_entry, 'Instance', \
- version = installed_inst.get('version'), \
- release = installed_inst.get('release'))
- if installed_inst.get('epoch', None) != None:
- tmp_entry.set('epoch', str(installed_inst.get('epoch')))
- if installed_inst.get('arch', None) != None:
- tmp_entry.set('arch', installed_inst.get('arch'))
+ self._get_tmp_entry(extra_entry, installed_inst)
if len(extra_entry) == 0:
extra_entry = None
@@ -2028,9 +2175,10 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
Compare old style entry to installed entry. Which means ignore
the epoch and arch.
'''
- if (config_entry.tag == 'Package' and \
- config_entry.get('version') == installed_entry.get('version') and \
- config_entry.get('release') == installed_entry.get('release')):
+ if (config_entry.tag == 'Package' and
+ config_entry.get('version') == installed_entry.get('version')
+ and
+ config_entry.get('release') == installed_entry.get('release')):
return True
else:
return False
@@ -2038,18 +2186,19 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
def inst_evra_equal(self, config_entry, installed_entry):
"""Compare new style instance to installed entry."""
- if config_entry.get('epoch', None) != None:
+ if config_entry.get('epoch', None) is not None:
epoch = int(config_entry.get('epoch'))
else:
epoch = None
- if (config_entry.tag == 'Instance' and \
- (epoch == installed_entry.get('epoch', 0) or \
- (epoch == 0 and installed_entry.get('epoch', 0) == None) or \
- (epoch == None and installed_entry.get('epoch', 0) == 0)) and \
- config_entry.get('version') == installed_entry.get('version') and \
- config_entry.get('release') == installed_entry.get('release') and \
- config_entry.get('arch', None) == installed_entry.get('arch', None)):
+ if (config_entry.tag == 'Instance' and
+ (epoch == installed_entry.get('epoch', 0) or
+ (epoch == 0 and installed_entry.get('epoch', 0) is None) or
+ (epoch is None and installed_entry.get('epoch', 0) == 0)) and
+ config_entry.get('version') == installed_entry.get('version') and
+ config_entry.get('release') == installed_entry.get('release') and
+ config_entry.get('arch', None) == installed_entry.get('arch',
+ None)):
return True
else:
return False
@@ -2063,9 +2212,9 @@ class RPM(Bcfg2.Client.Tools.PkgTool):
"""
init_ts = rpmtransactionset()
- init_ts.setVSFlags(rpm._RPMVSF_NODIGESTS|rpm._RPMVSF_NOSIGNATURES)
- gpg_hdrs = getheadersbykeyword(init_ts, **{'name':'gpg-pubkey'})
- keyids = [ header[rpm.RPMTAG_VERSION] for header in gpg_hdrs]
+ init_ts.setVSFlags(rpm._RPMVSF_NODIGESTS | rpm._RPMVSF_NOSIGNATURES)
+ gpg_hdrs = getheadersbykeyword(init_ts, **{'name': 'gpg-pubkey'})
+ keyids = [header[rpm.RPMTAG_VERSION] for header in gpg_hdrs]
keyids.append('None')
init_ts.closeDB()
del init_ts
diff --git a/src/lib/Bcfg2/Client/Tools/SELinux.py b/src/lib/Bcfg2/Client/Tools/SELinux.py
index ef89ef46d..7b5ff7813 100644
--- a/src/lib/Bcfg2/Client/Tools/SELinux.py
+++ b/src/lib/Bcfg2/Client/Tools/SELinux.py
@@ -225,7 +225,7 @@ class SELinuxEntryHandler(object):
match = self.custom_re.search(cmd)
if match:
if (len(self.custom_format) == 1 and
- self.custom_format[0] == "name"):
+ self.custom_format[0] == "name"):
keys.append(match.group("name"))
else:
keys.append(tuple([match.group(k)
diff --git a/src/lib/Bcfg2/Client/Tools/SYSV.py b/src/lib/Bcfg2/Client/Tools/SYSV.py
index f149be7af..5698f237a 100644
--- a/src/lib/Bcfg2/Client/Tools/SYSV.py
+++ b/src/lib/Bcfg2/Client/Tools/SYSV.py
@@ -81,7 +81,7 @@ class SYSV(Bcfg2.Client.Tools.PkgTool):
entry.get("name"))
else:
if (Bcfg2.Options.setup.quick or
- entry.attrib.get('verify', 'true') == 'false'):
+ entry.attrib.get('verify', 'true') == 'false'):
return True
rv = self.cmd.run("/usr/sbin/pkgchk -n %s" % entry.get('name'))
if rv.success:
diff --git a/src/lib/Bcfg2/Client/Tools/VCS.py b/src/lib/Bcfg2/Client/Tools/VCS.py
index aca5dbbc7..4e8ac76a4 100644
--- a/src/lib/Bcfg2/Client/Tools/VCS.py
+++ b/src/lib/Bcfg2/Client/Tools/VCS.py
@@ -88,8 +88,10 @@ class VCS(Bcfg2.Client.Tools.Tool):
return False
try:
- client, path = dulwich.client.get_transport_and_path(entry.get('sourceurl'))
- remote_refs = client.fetch_pack(path, (lambda x: None), None, None, None)
+ client, path = dulwich.client.get_transport_and_path(
+ entry.get('sourceurl'))
+ remote_refs = client.fetch_pack(path,
+ (lambda x: None), None, None, None)
if expected_rev in remote_refs:
expected_rev = remote_refs[expected_rev]
except:
@@ -119,10 +121,12 @@ class VCS(Bcfg2.Client.Tools.Tool):
dulwich.file.ensure_dir_exists(destname)
destr = dulwich.repo.Repo.init(destname)
- cl, host_path = dulwich.client.get_transport_and_path(entry.get('sourceurl'))
+ determine_wants = destr.object_store.determine_wants_all
+ cl, host_path = dulwich.client.get_transport_and_path(
+ entry.get('sourceurl'))
remote_refs = cl.fetch(host_path,
destr,
- determine_wants=destr.object_store.determine_wants_all,
+ determine_wants=determine_wants,
progress=sys.stdout.write)
if entry.get('revision') in remote_refs:
@@ -161,15 +165,18 @@ class VCS(Bcfg2.Client.Tools.Tool):
def Verifysvn(self, entry, _):
"""Verify svn repositories"""
- headrev = pysvn.Revision( pysvn.opt_revision_kind.head )
+ headrev = pysvn.Revision(pysvn.opt_revision_kind.head)
client = pysvn.Client()
try:
cur_rev = str(client.info(entry.get('name')).revision.number)
- server = client.info2(entry.get('sourceurl'), headrev, recurse=False)
+ server = client.info2(entry.get('sourceurl'),
+ headrev,
+ recurse=False)
if server:
server_rev = str(server[0][1].rev.number)
except:
- self.logger.info("Repository %s does not exist" % entry.get('name'))
+ self.logger.info("Repository %s does not exist" %
+ entry.get('name'))
return False
if entry.get('revision') == 'latest' and cur_rev == server_rev:
diff --git a/src/lib/Bcfg2/Client/Tools/YUM.py b/src/lib/Bcfg2/Client/Tools/YUM.py
index ae238174b..8bb87540c 100644
--- a/src/lib/Bcfg2/Client/Tools/YUM.py
+++ b/src/lib/Bcfg2/Client/Tools/YUM.py
@@ -266,7 +266,7 @@ class YUM(Bcfg2.Client.Tools.PkgTool):
to the newest available """
# old style entry; synthesize Instances from current installed
if (entry.get('name') not in self.yum_installed and
- entry.get('name') not in self.yum_avail):
+ entry.get('name') not in self.yum_avail):
# new entry; fall back to default
entry.set('version', 'any')
else:
@@ -320,7 +320,7 @@ class YUM(Bcfg2.Client.Tools.PkgTool):
missing = Bcfg2.Client.Tools.PkgTool.missing_attrs(self, entry)
if (entry.get('name', None) is None and
- entry.get('group', None) is None):
+ entry.get('group', None) is None):
missing += ['name', 'group']
return missing
@@ -951,7 +951,7 @@ class YUM(Bcfg2.Client.Tools.PkgTool):
continue
status = self.instance_status[inst]
if (not status.get('installed', False) and
- Bcfg2.Options.setup.yum_install_missing):
+ Bcfg2.Options.setup.yum_install_missing):
queue_pkg(pkg, inst, install_pkgs)
elif (status.get('version_fail', False) and
Bcfg2.Options.setup.yum_fix_version):
diff --git a/src/lib/Bcfg2/Client/Tools/__init__.py b/src/lib/Bcfg2/Client/Tools/__init__.py
index 4a808aa60..cd294db98 100644
--- a/src/lib/Bcfg2/Client/Tools/__init__.py
+++ b/src/lib/Bcfg2/Client/Tools/__init__.py
@@ -110,7 +110,7 @@ class Tool(object):
for struct in self.config:
for entry in struct:
if (entry.tag == 'Path' and
- entry.get('important', 'false').lower() == 'true'):
+ entry.get('important', 'false').lower() == 'true'):
self.__important__.append(entry.get('name'))
self.handled = self.getSupportedEntries()
diff --git a/src/lib/Bcfg2/Client/__init__.py b/src/lib/Bcfg2/Client/__init__.py
index 433fb570a..bae81f480 100644
--- a/src/lib/Bcfg2/Client/__init__.py
+++ b/src/lib/Bcfg2/Client/__init__.py
@@ -157,7 +157,7 @@ class Client(object):
if Bcfg2.Options.setup.bundle_quick:
if (not Bcfg2.Options.setup.only_bundles and
- not Bcfg2.Options.setup.except_bundles):
+ not Bcfg2.Options.setup.except_bundles):
self.logger.error("-Q option requires -b or -B")
raise SystemExit(1)
if Bcfg2.Options.setup.remove == 'services':
@@ -432,7 +432,7 @@ class Client(object):
lockfile.name)
if (not Bcfg2.Options.setup.file and
- not Bcfg2.Options.setup.bundle_quick):
+ not Bcfg2.Options.setup.bundle_quick):
# upload statistics
feedback = self.GenerateStats()
@@ -567,15 +567,16 @@ class Client(object):
continue
for cfile in parent.findall("./Path"):
if (cfile.get('name') not in self.__important__ or
- cfile.get('type') != 'file' or
- cfile not in self.whitelist):
+ cfile.get('type') != 'file' or
+ cfile not in self.whitelist):
continue
tools = [t for t in self.tools
if t.handlesEntry(cfile) and t.canVerify(cfile)]
if not tools:
continue
if (Bcfg2.Options.setup.interactive and not
- self.promptFilter("Install %s: %s? (y/N):", [cfile])):
+ self.promptFilter("Install %s: %s? (y/N):",
+ [cfile])):
self.whitelist.remove(cfile)
continue
try:
@@ -756,7 +757,8 @@ class Client(object):
for bundle in self.config.findall('.//Bundle'):
if (Bcfg2.Options.setup.only_bundles and
- bundle.get('name') not in Bcfg2.Options.setup.only_bundles):
+ bundle.get('name') not in
+ Bcfg2.Options.setup.only_bundles):
# prune out unspecified bundles when running with -b
continue
if bundle in mbundles:
diff --git a/src/lib/Bcfg2/Server/Admin.py b/src/lib/Bcfg2/Server/Admin.py
index 59059c240..c82a6d7fd 100644
--- a/src/lib/Bcfg2/Server/Admin.py
+++ b/src/lib/Bcfg2/Server/Admin.py
@@ -265,7 +265,7 @@ class Compare(AdminCmd):
diff = []
for line in difflib.unified_diff(lines1, lines2, **kwargs):
if (line.startswith("--- ") or line.startswith("+++ ") or
- line.startswith("@@ ")):
+ line.startswith("@@ ")):
continue
if lines is not None and len(diff) > lines:
diff.append(" ...")
@@ -363,7 +363,7 @@ class Compare(AdminCmd):
else:
el2 = elements2[elid]
if (el.getparent().get("name") !=
- el2.getparent().get("name")):
+ el2.getparent().get("name")):
self.changed(
"Element %s was in bundle %s, "
"now in bundle %s" % (elid,
diff --git a/src/lib/Bcfg2/Server/CherrypyCore.py b/src/lib/Bcfg2/Server/CherrypyCore.py
index dbfe260f7..3cb0e291b 100644
--- a/src/lib/Bcfg2/Server/CherrypyCore.py
+++ b/src/lib/Bcfg2/Server/CherrypyCore.py
@@ -72,7 +72,7 @@ class CherrypyCore(NetworkCore):
raise Exception("Unknown error processing XML-RPC request body")
if (not self.check_acls(address[0], rpcmethod) or
- not self.authenticate(cert, username, password, address)):
+ not self.authenticate(cert, username, password, address)):
raise cherrypy.HTTPError(401)
@cherrypy.expose
diff --git a/src/lib/Bcfg2/Server/Core.py b/src/lib/Bcfg2/Server/Core.py
index 69d61580f..4eb151c29 100644
--- a/src/lib/Bcfg2/Server/Core.py
+++ b/src/lib/Bcfg2/Server/Core.py
@@ -1340,9 +1340,9 @@ class NetworkCore(Core):
if self._database_available:
db_settings = Bcfg2.settings.DATABASES['default']
if (Bcfg2.Options.setup.daemon and
- Bcfg2.Options.setup.daemon_uid and
- db_settings['ENGINE'].endswith(".sqlite3") and
- not os.path.exists(db_settings['NAME'])):
+ Bcfg2.Options.setup.daemon_uid and
+ db_settings['ENGINE'].endswith(".sqlite3") and
+ not os.path.exists(db_settings['NAME'])):
# syncdb will create the sqlite database, and we're
# going to daemonize, dropping privs to a non-root
# user, so we need to chown the database after
diff --git a/src/lib/Bcfg2/Server/Encryption.py b/src/lib/Bcfg2/Server/Encryption.py
index dd3f46b07..6d6c82fab 100755
--- a/src/lib/Bcfg2/Server/Encryption.py
+++ b/src/lib/Bcfg2/Server/Encryption.py
@@ -592,7 +592,7 @@ class CLI(object):
tool.write(data)
if (Bcfg2.Options.setup.remove and
- tool.get_destination_filename(fname) != fname):
+ tool.get_destination_filename(fname) != fname):
try:
os.unlink(fname)
except IOError:
diff --git a/src/lib/Bcfg2/Server/FileMonitor/Inotify.py b/src/lib/Bcfg2/Server/FileMonitor/Inotify.py
index 39d062604..b8eb06aa1 100644
--- a/src/lib/Bcfg2/Server/FileMonitor/Inotify.py
+++ b/src/lib/Bcfg2/Server/FileMonitor/Inotify.py
@@ -149,7 +149,7 @@ class Inotify(Pseudo, pyinotify.ProcessEvent):
evt = Event(handleID, path, action)
if (ievent.wd not in self.event_filter or
- ievent.pathname in self.event_filter[ievent.wd]):
+ ievent.pathname in self.event_filter[ievent.wd]):
self.events.append(evt)
def AddMonitor(self, path, obj, handleID=None):
diff --git a/src/lib/Bcfg2/Server/FileMonitor/__init__.py b/src/lib/Bcfg2/Server/FileMonitor/__init__.py
index ae42a3429..45a8d08fa 100644
--- a/src/lib/Bcfg2/Server/FileMonitor/__init__.py
+++ b/src/lib/Bcfg2/Server/FileMonitor/__init__.py
@@ -188,7 +188,7 @@ class FileMonitor(Debuggable):
"""
for pattern in self.ignore:
if (fnmatch.fnmatch(event.filename, pattern) or
- fnmatch.fnmatch(os.path.split(event.filename)[-1], pattern)):
+ fnmatch.fnmatch(os.path.split(event.filename)[-1], pattern)):
self.debug_log("Ignoring %s" % event)
return True
return False
diff --git a/src/lib/Bcfg2/Server/Info.py b/src/lib/Bcfg2/Server/Info.py
index 3a1ed7433..dd1fb1968 100644
--- a/src/lib/Bcfg2/Server/Info.py
+++ b/src/lib/Bcfg2/Server/Info.py
@@ -207,7 +207,7 @@ dir>. This only handles file entries, and does not respect 'owner' or
for struct in client_config:
for entry in struct:
if (entry.tag == 'Path' and
- entry.get("type") not in self.blacklisted_types):
+ entry.get("type") not in self.blacklisted_types):
failure = entry.get("failure")
if failure is not None:
print("Skipping entry %s:%s with bind failure: %s" %
@@ -265,7 +265,7 @@ class BuildAllMixin(object):
""" the parent command """
for cls in self.__class__.__mro__:
if (cls != InfoCmd and cls != self.__class__ and
- issubclass(cls, InfoCmd)):
+ issubclass(cls, InfoCmd)):
return cls
def run(self, setup):
@@ -366,8 +366,8 @@ class Automatch(InfoCmd):
pfile = props.entries[setup.propertyfile]
if (not Bcfg2.Options.setup.force and
- not Bcfg2.Options.setup.automatch and
- pfile.xdata.get("automatch", "false").lower() != "true"):
+ not Bcfg2.Options.setup.automatch and
+ pfile.xdata.get("automatch", "false").lower() != "true"):
print("Automatch not enabled on %s" % setup.propertyfile)
else:
metadata = self.core.build_metadata(setup.hostname)
@@ -585,7 +585,7 @@ class Mappings(InfoCmd):
def run(self, setup):
data = [('Plugin', 'Type', 'Name')]
for generator in self.core.plugins_by_type(
- Bcfg2.Server.Plugin.Generator):
+ Bcfg2.Server.Plugin.Generator):
etypes = setup.type or list(generator.Entries.keys())
if setup.name:
interested = [(etype, [setup.name]) for etype in etypes]
diff --git a/src/lib/Bcfg2/Server/Lint/Genshi.py b/src/lib/Bcfg2/Server/Lint/Genshi.py
index e3a0004a6..a2581e70b 100755
--- a/src/lib/Bcfg2/Server/Lint/Genshi.py
+++ b/src/lib/Bcfg2/Server/Lint/Genshi.py
@@ -39,8 +39,8 @@ class Genshi(Bcfg2.Server.Lint.ServerPlugin):
for entryset in self.core.plugins['Cfg'].entries.values():
for entry in entryset.entries.values():
if (self.HandlesFile(entry.name) and
- isinstance(entry, CfgGenshiGenerator) and
- not entry.template):
+ isinstance(entry, CfgGenshiGenerator) and
+ not entry.template):
self.check_template(entry.loader, entry.name,
cls=NewTextTemplate)
@@ -49,5 +49,5 @@ class Genshi(Bcfg2.Server.Lint.ServerPlugin):
loader = TemplateLoader()
for entry in self.core.plugins['Bundler'].entries.values():
if (self.HandlesFile(entry.name) and
- entry.template is not None):
+ entry.template is not None):
self.check_template(loader, entry.name, cls=MarkupTemplate)
diff --git a/src/lib/Bcfg2/Server/Lint/MergeFiles.py b/src/lib/Bcfg2/Server/Lint/MergeFiles.py
index 972475d91..cb14ca477 100644
--- a/src/lib/Bcfg2/Server/Lint/MergeFiles.py
+++ b/src/lib/Bcfg2/Server/Lint/MergeFiles.py
@@ -87,8 +87,8 @@ class MergeFiles(Bcfg2.Server.Lint.ServerPlugin):
seqmatch = SequenceMatcher(None, fdata.data, cdata.data)
# perform progressively more expensive comparisons
if (seqmatch.real_quick_ratio() > Bcfg2.Options.setup.threshold and
- seqmatch.quick_ratio() > Bcfg2.Options.setup.threshold and
- seqmatch.ratio() > Bcfg2.Options.setup.threshold):
+ seqmatch.quick_ratio() > Bcfg2.Options.setup.threshold and
+ seqmatch.ratio() > Bcfg2.Options.setup.threshold):
rv.extend(
self._find_similar((cname, cdata), copy.copy(others)))
return rv
diff --git a/src/lib/Bcfg2/Server/Lint/Metadata.py b/src/lib/Bcfg2/Server/Lint/Metadata.py
index 3c8f2831d..bf19c25fc 100644
--- a/src/lib/Bcfg2/Server/Lint/Metadata.py
+++ b/src/lib/Bcfg2/Server/Lint/Metadata.py
@@ -142,7 +142,7 @@ class Metadata(ServerPlugin):
def default_is_profile(self):
""" Ensure that the default group is a profile group. """
if (self.metadata.default and
- not self.metadata.groups[self.metadata.default].is_profile):
+ not self.metadata.groups[self.metadata.default].is_profile):
xdata = \
self.metadata.groups_xml.xdata.xpath("//Group[@name='%s']" %
self.metadata.default)[0]
diff --git a/src/lib/Bcfg2/Server/Lint/Pkgmgr.py b/src/lib/Bcfg2/Server/Lint/Pkgmgr.py
index 6b718d8b5..3f0b9477c 100644
--- a/src/lib/Bcfg2/Server/Lint/Pkgmgr.py
+++ b/src/lib/Bcfg2/Server/Lint/Pkgmgr.py
@@ -26,7 +26,7 @@ class Pkgmgr(ServerlessPlugin):
if pkg.getparent().tag == 'Group':
grp = pkg.getparent().get('name')
if (type(grp) is not str and
- grp.getparent().tag == 'Group'):
+ grp.getparent().tag == 'Group'):
pgrp = grp.getparent().get('name')
else:
pgrp = 'none'
diff --git a/src/lib/Bcfg2/Server/Lint/RequiredAttrs.py b/src/lib/Bcfg2/Server/Lint/RequiredAttrs.py
index cf7b51ecc..5d9e229fa 100644
--- a/src/lib/Bcfg2/Server/Lint/RequiredAttrs.py
+++ b/src/lib/Bcfg2/Server/Lint/RequiredAttrs.py
@@ -138,7 +138,7 @@ class RequiredAttrs(Bcfg2.Server.Lint.ServerPlugin):
for source in self.core.plugins['Packages'].sources:
if isinstance(source, Yum.YumSource):
if (not source.pulp_id and not source.url and
- not source.rawurl):
+ not source.rawurl):
self.LintError(
"required-attrs-missing",
"A %s source must have either a url, rawurl, or "
@@ -152,7 +152,7 @@ class RequiredAttrs(Bcfg2.Server.Lint.ServerPlugin):
(source.ptype, self.RenderXML(source.xsource)))
if (not isinstance(source, Apt.AptSource) and
- source.recommended):
+ source.recommended):
self.LintError(
"extra-attrs",
"The recommended attribute is not supported on %s sources:"
@@ -180,7 +180,7 @@ class RequiredAttrs(Bcfg2.Server.Lint.ServerPlugin):
for bundle in self.core.plugins['Bundler'].entries.values():
if self.HandlesFile(bundle.name) and bundle.template is None:
for path in bundle.xdata.xpath(
- "//*[substring(name(), 1, 5) = 'Bound']"):
+ "//*[substring(name(), 1, 5) = 'Bound']"):
self.check_entry(path, bundle.name)
def check_entry(self, entry, filename):
@@ -228,7 +228,7 @@ class RequiredAttrs(Bcfg2.Server.Lint.ServerPlugin):
fmt = required_attrs['__text__']
del required_attrs['__text__']
if (not entry.text and
- not entry.get('empty', 'false').lower() == 'true'):
+ not entry.get('empty', 'false').lower() == 'true'):
self.LintError("required-attrs-missing",
"Text missing for %s %s in %s: %s" %
(tag, name, filename,
diff --git a/src/lib/Bcfg2/Server/Lint/Validate.py b/src/lib/Bcfg2/Server/Lint/Validate.py
index de7ae038a..2042382e7 100644
--- a/src/lib/Bcfg2/Server/Lint/Validate.py
+++ b/src/lib/Bcfg2/Server/Lint/Validate.py
@@ -182,7 +182,8 @@ class Validate(Bcfg2.Server.Lint.ServerlessPlugin):
fpath, fname = path.split('/**/')
self.filelists[path] = []
for root, _, files in os.walk(
- os.path.join(Bcfg2.Options.setup.repository, fpath)):
+ os.path.join(Bcfg2.Options.setup.repository,
+ fpath)):
self.filelists[path].extend([os.path.join(root, f)
for f in files
if f == fname])
diff --git a/src/lib/Bcfg2/Server/Lint/__init__.py b/src/lib/Bcfg2/Server/Lint/__init__.py
index c8cdb5be1..f918ad851 100644
--- a/src/lib/Bcfg2/Server/Lint/__init__.py
+++ b/src/lib/Bcfg2/Server/Lint/__init__.py
@@ -400,8 +400,8 @@ class CLI(object):
self.run_server_plugins()
if (self.errorhandler.errors or
- self.errorhandler.warnings or
- Bcfg2.Options.setup.verbose):
+ self.errorhandler.warnings or
+ Bcfg2.Options.setup.verbose):
print("%d errors" % self.errorhandler.errors)
print("%d warnings" % self.errorhandler.warnings)
diff --git a/src/lib/Bcfg2/Server/Plugin/helpers.py b/src/lib/Bcfg2/Server/Plugin/helpers.py
index 7a3d887fe..73ad24614 100644
--- a/src/lib/Bcfg2/Server/Plugin/helpers.py
+++ b/src/lib/Bcfg2/Server/Plugin/helpers.py
@@ -523,7 +523,7 @@ class XMLFileBacked(FileBacked):
self.extra_monitors = []
if ((create is not None or self.create not in [None, False]) and
- not os.path.exists(self.name)):
+ not os.path.exists(self.name)):
toptag = create or self.create
self.logger.warning("%s does not exist, creating" % self.name)
if hasattr(toptag, "getroottree"):
diff --git a/src/lib/Bcfg2/Server/Plugins/ACL.py b/src/lib/Bcfg2/Server/Plugins/ACL.py
index 3de3f767c..f059eb4f1 100644
--- a/src/lib/Bcfg2/Server/Plugins/ACL.py
+++ b/src/lib/Bcfg2/Server/Plugins/ACL.py
@@ -30,7 +30,7 @@ def rmi_names_equal(first, second):
return False
for i in range(len(first_parts)):
if (first_parts[i] != second_parts[i] and first_parts[i] != '*' and
- second_parts[i] != '*'):
+ second_parts[i] != '*'):
return False
return True
@@ -75,7 +75,7 @@ class IPACLFile(Bcfg2.Server.Plugin.XMLFileBacked):
return self.actions["Defer"]
for entry in self.entries:
if (ip_matches(address, entry) and
- rmi_names_equal(entry.get("method"), rmi)):
+ rmi_names_equal(entry.get("method"), rmi)):
self.debug_log("ACL: %s requests %s: Found matching IP ACL, "
"%s" % (address, rmi, entry.tag.lower()))
return self.actions[entry.tag]
diff --git a/src/lib/Bcfg2/Server/Plugins/Bzr.py b/src/lib/Bcfg2/Server/Plugins/Bzr.py
index f91cc1943..886584477 100644
--- a/src/lib/Bcfg2/Server/Plugins/Bzr.py
+++ b/src/lib/Bcfg2/Server/Plugins/Bzr.py
@@ -23,7 +23,7 @@ class Bzr(Bcfg2.Server.Plugin.Version):
working_tree = WorkingTree.open(Bcfg2.Options.setup.vcs_root)
revision = str(working_tree.branch.revno())
if (working_tree.has_changes(working_tree.basis_tree()) or
- working_tree.unknowns()):
+ working_tree.unknowns()):
revision += "+"
except errors.NotBranchError:
msg = "Failed to read Bazaar branch"
diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py b/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py
index eea0a3456..5b32a91c0 100644
--- a/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py
+++ b/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py
@@ -690,7 +690,7 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet):
rv = []
for ent in self.entries.values():
if (isinstance(ent, handler_type) and
- (not ent.__specific__ or ent.specific.matches(metadata))):
+ (not ent.__specific__ or ent.specific.matches(metadata))):
rv.append(ent)
return rv
diff --git a/src/lib/Bcfg2/Server/Plugins/Deps.py b/src/lib/Bcfg2/Server/Plugins/Deps.py
index fa821aad3..a4fe7aa91 100644
--- a/src/lib/Bcfg2/Server/Plugins/Deps.py
+++ b/src/lib/Bcfg2/Server/Plugins/Deps.py
@@ -33,7 +33,7 @@ class Deps(Bcfg2.Server.Plugin.PrioDir,
if tag.startswith('Bound'):
tag = tag[5:]
if ((tag, entry.get('name')) not in entries
- and not isinstance(entry, lxml.etree._Comment)):
+ and not isinstance(entry, lxml.etree._Comment)):
entries.append((tag, entry.get('name')))
entries.sort()
entries = tuple(entries)
@@ -74,10 +74,10 @@ class Deps(Bcfg2.Server.Plugin.PrioDir,
prio = [int(m[0].priority) for m in matching]
if prio.count(max(prio)) > 1:
raise PluginExecutionError(
- "Deps: Found conflicting dependencies with same "
- "priority for %s:%s for %s: %s" %
- (entry.tag, entry.get("name"),
- metadata.hostname, [m[0].name for m in matching]))
+ "Deps: Found conflicting dependencies with same "
+ "priority for %s:%s for %s: %s" %
+ (entry.tag, entry.get("name"),
+ metadata.hostname, [m[0].name for m in matching]))
index = prio.index(max(prio))
matching = [matching[index]]
if not matching:
diff --git a/src/lib/Bcfg2/Server/Plugins/FileProbes.py b/src/lib/Bcfg2/Server/Plugins/FileProbes.py
index 51eb6e09a..3d0336c36 100644
--- a/src/lib/Bcfg2/Server/Plugins/FileProbes.py
+++ b/src/lib/Bcfg2/Server/Plugins/FileProbes.py
@@ -87,7 +87,7 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin,
# for which update is false; we can't possibly do
# anything with the data we get from such a probe
if (entry.get('update', 'false').lower() == "false" and
- not cfg.has_generator(entry, metadata)):
+ not cfg.has_generator(entry, metadata)):
continue
self.entries[metadata.hostname][path] = entry
probe = lxml.etree.Element('probe', name=path,
diff --git a/src/lib/Bcfg2/Server/Plugins/GroupPatterns.py b/src/lib/Bcfg2/Server/Plugins/GroupPatterns.py
index 90cbd083d..767ae6254 100644
--- a/src/lib/Bcfg2/Server/Plugins/GroupPatterns.py
+++ b/src/lib/Bcfg2/Server/Plugins/GroupPatterns.py
@@ -77,7 +77,7 @@ class PatternFile(Bcfg2.Server.Plugin.XMLFileBacked):
def Index(self):
Bcfg2.Server.Plugin.XMLFileBacked.Index(self)
if (self.core and
- self.core.metadata_cache_mode in ['cautious', 'aggressive']):
+ self.core.metadata_cache_mode in ['cautious', 'aggressive']):
self.core.metadata_cache.expire()
self.patterns = []
for entry in self.xdata.xpath('//GroupPattern'):
diff --git a/src/lib/Bcfg2/Server/Plugins/Ldap.py b/src/lib/Bcfg2/Server/Plugins/Ldap.py
index 6fc89b4f3..2be27c6d7 100644
--- a/src/lib/Bcfg2/Server/Plugins/Ldap.py
+++ b/src/lib/Bcfg2/Server/Plugins/Ldap.py
@@ -73,7 +73,7 @@ class Ldap(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.Connector):
Bcfg2.Server.Plugin.Connector.__init__(self)
self.config = ConfigFile(self.data + "/config.py")
- def debug_log(self, message, flag = None):
+ def debug_log(self, message, flag=None):
if (flag is None) and self.debug_flag or flag:
self.logger.error(message)
@@ -82,37 +82,39 @@ class Ldap(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.Connector):
try:
data = {}
self.debug_log("LdapPlugin debug: found queries " +
- str(LDAP_QUERIES))
+ str(LDAP_QUERIES))
for QueryClass in LDAP_QUERIES:
query = QueryClass()
if query.is_applicable(metadata):
self.debug_log("LdapPlugin debug: processing query '" +
- query.name + "'")
+ query.name + "'")
data[query.name] = query.get_result(metadata)
else:
self.debug_log("LdapPlugin debug: query '" + query.name +
- "' not applicable to host '" + metadata.hostname + "'")
+ "' not applicable to host '" +
+ metadata.hostname + "'")
return data
except Exception:
if hasattr(query, "name"):
logger.error("LdapPlugin error: " +
- "Exception during processing of query named '" +
- str(query.name) +
- "', query results will be empty" +
- " and may cause bind failures")
+ "Exception during processing of query named '" +
+ str(query.name) +
+ "', query results will be empty" +
+ " and may cause bind failures")
for line in traceback.format_exception(sys.exc_info()[0],
sys.exc_info()[1],
sys.exc_info()[2]):
logger.error("LdapPlugin error: " +
- line.replace("\n", ""))
+ line.replace("\n", ""))
return {}
+
class LdapConnection(object):
"""
Connection to an LDAP server.
"""
- def __init__(self, host = "localhost", port = 389,
- binddn = None, bindpw = None):
+ def __init__(self, host="localhost", port=389,
+ binddn=None, bindpw=None):
self.host = host
self.port = port
self.binddn = binddn
@@ -133,8 +135,8 @@ class LdapConnection(object):
for attempt in range(RETRY_COUNT + 1):
if attempt >= 1:
logger.error("LdapPlugin error: " +
- "LDAP server down (retry " + str(attempt) + "/" +
- str(RETRY_COUNT) + ")")
+ "LDAP server down (retry " + str(attempt) + "/" +
+ str(RETRY_COUNT) + ")")
try:
if not self.conn:
self.init_conn()
@@ -154,6 +156,7 @@ class LdapConnection(object):
def url(self):
return "ldap://" + self.host + ":" + str(self.port)
+
class LdapQuery(object):
"""
Query referencing an LdapConnection and providing several
@@ -210,9 +213,10 @@ class LdapQuery(object):
return self.result
else:
logger.error("LdapPlugin error: " +
- "No valid connection defined for query " + str(self))
+ "No valid connection defined for query " + str(self))
return None
+
class LdapSubQuery(LdapQuery):
"""
SubQueries are meant for internal use only and are not added
@@ -243,5 +247,5 @@ class LdapSubQuery(LdapQuery):
return self.process_result(metadata, **kwargs)
else:
logger.error("LdapPlugin error: " +
- "No valid connection defined for query " + str(self))
+ "No valid connection defined for query " + str(self))
return None
diff --git a/src/lib/Bcfg2/Server/Plugins/Metadata.py b/src/lib/Bcfg2/Server/Plugins/Metadata.py
index db104b27e..073424c90 100644
--- a/src/lib/Bcfg2/Server/Plugins/Metadata.py
+++ b/src/lib/Bcfg2/Server/Plugins/Metadata.py
@@ -520,7 +520,7 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
self.handlers = dict()
self.groups_xml = self._handle_file("groups.xml")
if (self._use_db and
- os.path.exists(os.path.join(self.data, "clients.xml"))):
+ os.path.exists(os.path.join(self.data, "clients.xml"))):
self.logger.warning("Metadata: database enabled but clients.xml "
"found, parsing in compatibility mode")
self.clients_xml = self._handle_file("clients.xml")
@@ -614,7 +614,7 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
elif alias:
for child in node:
if (child.tag == "Alias" and
- child.attrib["name"] == name):
+ child.attrib["name"] == name):
return node
return None
@@ -816,7 +816,7 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
if client.get('secure', 'false').lower() == 'true':
self.secure.append(clname)
if (client.get('location', 'fixed') == 'floating' or
- client.get('floating', 'false').lower() == 'true'):
+ client.get('floating', 'false').lower() == 'true'):
self.floating.append(clname)
if 'password' in client.attrib:
self.passwords[clname] = client.get('password')
@@ -1103,7 +1103,7 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
for p in self.group_membership[grpname]):
newgroups.add(grpname)
if (grpname in self.groups and
- self.groups[grpname].category):
+ self.groups[grpname].category):
categories[self.groups[grpname].category] = grpname
groups.update(newgroups)
for grpname, predicates in self.negated_groups.items():
@@ -1112,7 +1112,7 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
if any(p(client, groups, categories) for p in predicates):
removegroups.add(grpname)
if (grpname in self.groups and
- self.groups[grpname].category):
+ self.groups[grpname].category):
del categories[self.groups[grpname].category]
groups.difference_update(removegroups)
return (groups, categories)
@@ -1406,7 +1406,7 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
# next we validate the address
if (id_method != 'uuid' and
- not self.validate_client_address(client, address)):
+ not self.validate_client_address(client, address)):
return False
if id_method == 'cert' and auth_type != 'cert+password':
@@ -1555,7 +1555,7 @@ class Metadata(Bcfg2.Server.Plugin.Metadata,
for group in egroups:
for parent in group.findall('Group'):
if (parent.get('name') not in gseen and
- include_group(parent.get('name'))):
+ include_group(parent.get('name'))):
rv.append(gfmt % (parent.get('name'),
parent.get('name')))
gseen.append(parent.get("name"))
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py b/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py
index 5f66cb8a0..b98d3f419 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py
@@ -575,7 +575,7 @@ class YumCollection(Collection):
# each pulp source can only have one arch, so we don't
# have to check the arch in url_map
if (source.pulp_id and
- source.pulp_id not in consumer['repoids']):
+ source.pulp_id not in consumer['repoids']):
try:
consumerapi.bind(self.metadata.hostname,
source.pulp_id)
diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py b/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py
index 56285705a..4c685d427 100644
--- a/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py
+++ b/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py
@@ -342,7 +342,7 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
:type collection: Bcfg2.Server.Plugins.Packages.Collection.Collection
"""
if (not Bcfg2.Options.setup.packages_metadata or
- not Bcfg2.Options.setup.packages_resolver):
+ not Bcfg2.Options.setup.packages_resolver):
# Config requests no resolver. Note that disabling
# metadata implies disabling the resolver.
for struct in structures:
@@ -494,7 +494,7 @@ class Packages(Bcfg2.Server.Plugin.Plugin,
if localfile not in keyfiles:
keyfiles.append(localfile)
if ((force_update and key not in keys) or
- not os.path.exists(localfile)):
+ not os.path.exists(localfile)):
self.logger.info("Packages: Downloading and parsing %s" %
key)
try:
diff --git a/src/lib/Bcfg2/Server/Plugins/Pkgmgr.py b/src/lib/Bcfg2/Server/Plugins/Pkgmgr.py
index c85bc7d41..c7d8986ed 100644
--- a/src/lib/Bcfg2/Server/Plugins/Pkgmgr.py
+++ b/src/lib/Bcfg2/Server/Plugins/Pkgmgr.py
@@ -97,7 +97,7 @@ class PNode(object):
self.contents['Package'] = FuzzyDict()
for pkg in data.findall('./Package'):
if ('name' in pkg.attrib and
- pkg.get('name') not in pdict['Package']):
+ pkg.get('name') not in pdict['Package']):
pdict['Package'].add(pkg.get('name'))
if pkg.get('name') is not None:
self.contents['Package'][pkg.get('name')] = {}
@@ -123,7 +123,7 @@ class PNode(object):
pkg.set('url', '%s/%s' % (pkg.get('uri'),
pkg.get('file')))
if (pkg.get('type') in self.splitters and
- pkg.get('file') is not None):
+ pkg.get('file') is not None):
mdata = \
self.splitters[pkg.get('type')].match(pkg.get('file'))
if not mdata:
diff --git a/src/lib/Bcfg2/Server/Plugins/Probes.py b/src/lib/Bcfg2/Server/Plugins/Probes.py
index 560546c70..568d1b3a0 100644
--- a/src/lib/Bcfg2/Server/Plugins/Probes.py
+++ b/src/lib/Bcfg2/Server/Plugins/Probes.py
@@ -357,7 +357,7 @@ class ProbeSet(Bcfg2.Server.Plugin.EntrySet):
def HandleEvent(self, event):
""" handle events on everything but probed.xml """
if (event.filename != self.path and
- not event.filename.endswith("probed.xml")):
+ not event.filename.endswith("probed.xml")):
return self.handle_event(event)
def get_probe_data(self, metadata):
@@ -384,7 +384,7 @@ class ProbeSet(Bcfg2.Server.Plugin.EntrySet):
probe.set('name', os.path.basename(name))
probe.set('source', self.plugin_name)
if (metadata.version_info and
- metadata.version_info > (1, 3, 1, '', 0)):
+ metadata.version_info > (1, 3, 1, '', 0)):
try:
probe.text = entry.data.decode('utf-8')
except AttributeError:
diff --git a/src/lib/Bcfg2/Server/Plugins/Svn.py b/src/lib/Bcfg2/Server/Plugins/Svn.py
index 679e38ff9..6266e9fd2 100644
--- a/src/lib/Bcfg2/Server/Plugins/Svn.py
+++ b/src/lib/Bcfg2/Server/Plugins/Svn.py
@@ -60,7 +60,7 @@ class Svn(Bcfg2.Server.Plugin.Version):
self.ssl_server_trust_prompt
if (Bcfg2.Options.setup.svn_user and
- Bcfg2.Options.setup.svn_password):
+ Bcfg2.Options.setup.svn_password):
self.client.callback_get_login = self.get_login
self.logger.debug("Svn: Initialized svn plugin with SVN directory %s" %
diff --git a/src/lib/Bcfg2/Server/Reports/reports/models.py b/src/lib/Bcfg2/Server/Reports/reports/models.py
index 73adaaaaf..c7105fdd9 100644
--- a/src/lib/Bcfg2/Server/Reports/reports/models.py
+++ b/src/lib/Bcfg2/Server/Reports/reports/models.py
@@ -51,7 +51,7 @@ class ClientManager(models.Manager):
yet been expired as of optional timestmamp argument. Timestamp
should be a datetime object."""
- if timestamp == None:
+ if timestamp is None:
timestamp = datetime.now()
elif not isinstance(timestamp, datetime):
raise ValueError('Expected a datetime object')
@@ -62,8 +62,9 @@ class ClientManager(models.Manager):
except ValueError:
return self.none()
- return self.filter(Q(expiration__gt=timestamp) | Q(expiration__isnull=True),
- creation__lt=timestamp)
+ return self.filter(
+ Q(expiration__gt=timestamp) | Q(expiration__isnull=True),
+ creation__lt=timestamp)
class Client(models.Model):
@@ -99,7 +100,8 @@ class InteractiveManager(models.Manager):
if maxdate and not isinstance(maxdate, datetime):
raise ValueError('Expected a datetime object')
- return self.filter(id__in=self.get_interaction_per_client_ids(maxdate, active_only))
+ return self.filter(
+ id__in=self.get_interaction_per_client_ids(maxdate, active_only))
def get_interaction_per_client_ids(self, maxdate=None, active_only=True):
"""
@@ -114,15 +116,17 @@ class InteractiveManager(models.Manager):
cursor = connection.cursor()
cfilter = "expiration is null"
- sql = 'select reports_interaction.id, x.client_id from (select client_id, MAX(timestamp) ' + \
- 'as timer from reports_interaction'
+ sql = 'select reports_interaction.id, x.client_id ' + \
+ 'from (select client_id, MAX(timestamp) ' + \
+ 'as timer from reports_interaction'
if maxdate:
if not isinstance(maxdate, datetime):
raise ValueError('Expected a datetime object')
sql = sql + " where timestamp <= '%s' " % maxdate
cfilter = "(expiration is null or expiration > '%s') and creation <= '%s'" % (maxdate, maxdate)
sql = sql + ' GROUP BY client_id) x, reports_interaction where ' + \
- 'reports_interaction.client_id = x.client_id AND reports_interaction.timestamp = x.timer'
+ 'reports_interaction.client_id = x.client_id AND ' + \
+ 'reports_interaction.timestamp = x.timer'
if active_only:
sql = sql + " and x.client_id in (select id from reports_client where %s)" % \
cfilter
@@ -136,14 +140,16 @@ class InteractiveManager(models.Manager):
class Interaction(models.Model):
- """Models each reconfiguration operation interaction between client and server."""
+ """Models each reconfiguration operation
+ interaction between client and server."""
client = models.ForeignKey(Client, related_name="interactions")
- timestamp = models.DateTimeField(db_index=True) # Timestamp for this record
+ timestamp = models.DateTimeField(db_index=True) # record timestamp
state = models.CharField(max_length=32) # good/bad/modified/etc
- repo_rev_code = models.CharField(max_length=64) # repo revision at time of interaction
+ # repository revision at the time of the latest interaction
+ repo_rev_code = models.CharField(max_length=64)
goodcount = models.IntegerField() # of good config-items
totalcount = models.IntegerField() # of total config-items
- server = models.CharField(max_length=256) # Name of the server used for the interaction
+ server = models.CharField(max_length=256) # server used for interaction
bad_entries = models.IntegerField(default=-1)
modified_entries = models.IntegerField(default=-1)
extra_entries = models.IntegerField(default=-1)
@@ -389,5 +395,3 @@ class InteractionMetadata(models.Model):
profile = models.ForeignKey(Group, related_name="+")
groups = models.ManyToManyField(Group)
bundles = models.ManyToManyField(Bundle)
-
-
diff --git a/src/lib/Bcfg2/Server/Reports/updatefix.py b/src/lib/Bcfg2/Server/Reports/updatefix.py
index cb131c29d..c3fbcd2e9 100644
--- a/src/lib/Bcfg2/Server/Reports/updatefix.py
+++ b/src/lib/Bcfg2/Server/Reports/updatefix.py
@@ -16,9 +16,9 @@ def _merge_database_table_entries():
find_cursor = connection.cursor()
cursor.execute("""
Select name, kind from reports_bad
- union
+ union
select name, kind from reports_modified
- union
+ union
select name, kind from reports_extra
""")
# this fetch could be better done
@@ -43,20 +43,26 @@ def _merge_database_table_entries():
if entries_map.get(key, None):
entry_id = entries_map[key]
else:
- find_cursor.execute("Select id from reports_entries where name=%s and kind=%s", key)
+ find_cursor.execute("Select id from reports_entries where "
+ "name=%s and kind=%s", key)
rowe = find_cursor.fetchone()
entry_id = rowe[0]
- insert_cursor.execute("insert into reports_entries_interactions \
- (entry_id, interaction_id, reason_id, type) values (%s, %s, %s, %s)", (entry_id, row[3], row[2], row[4]))
+ insert_cursor.execute("insert into reports_entries_interactions "
+ "(entry_id, interaction_id, reason_id, type) "
+ "values (%s, %s, %s, %s)",
+ (entry_id, row[3], row[2], row[4]))
def _interactions_constraint_or_idx():
'''sqlite doesn't support alter tables.. or constraints'''
cursor = connection.cursor()
try:
- cursor.execute('alter table reports_interaction add constraint reports_interaction_20100601 unique (client_id,timestamp)')
+ cursor.execute('alter table reports_interaction '
+ 'add constraint reports_interaction_20100601 '
+ 'unique (client_id,timestamp)')
except:
- cursor.execute('create unique index reports_interaction_20100601 on reports_interaction (client_id,timestamp)')
+ cursor.execute('create unique index reports_interaction_20100601 '
+ 'on reports_interaction (client_id,timestamp)')
def _populate_interaction_entry_counts():
@@ -67,13 +73,16 @@ def _populate_interaction_entry_counts():
3: 'extra_entries'}
for type in list(count_field.keys()):
- cursor.execute("select count(type), interaction_id " +
- "from reports_entries_interactions where type = %s group by interaction_id" % type)
+ cursor.execute("select count(type), interaction_id "
+ "from reports_entries_interactions "
+ "where type = %s group by interaction_id" % type)
updates = []
for row in cursor.fetchall():
updates.append(row)
try:
- cursor.executemany("update reports_interaction set " + count_field[type] + "=%s where id = %s", updates)
+ cursor.executemany("update reports_interaction set " +
+ count_field[type] +
+ "=%s where id = %s", updates)
except Exception:
e = sys.exc_info()[1]
print(e)
@@ -106,9 +115,8 @@ _fixes = [_merge_database_table_entries,
_interactions_constraint_or_idx,
'alter table reports_reason add is_binary bool NOT NULL default False;',
'alter table reports_reason add is_sensitive bool NOT NULL default False;',
- update_noop, #_remove_table_column('reports_interaction', 'client_version'),
- "alter table reports_reason add unpruned varchar(1280) not null default 'N/A';",
-]
+ update_noop, # _remove_table_column('reports_interaction', 'client_version'),
+ "alter table reports_reason add unpruned varchar(1280) not null default 'N/A';"]
# this will calculate the last possible version of the database
lastversion = len(_fixes)
@@ -127,8 +135,10 @@ def rollupdate(current_version):
else:
_fixes[i]()
except:
- logger.error("Failed to perform db update %s" % (_fixes[i]), exc_info=1)
- # since array start at 0 but version start at 1 we add 1 to the normal count
+ logger.error("Failed to perform db update %s" % (_fixes[i]),
+ exc_info=1)
+ # since array start at 0 but version start at 1
+ # we add 1 to the normal count
ret = InternalDatabaseVersion.objects.create(version=i + 1)
return ret
else:
diff --git a/src/lib/Bcfg2/Server/SSLServer.py b/src/lib/Bcfg2/Server/SSLServer.py
index 6a3948f40..5e6846a44 100644
--- a/src/lib/Bcfg2/Server/SSLServer.py
+++ b/src/lib/Bcfg2/Server/SSLServer.py
@@ -118,7 +118,7 @@ class SSLServer(SocketServer.TCPServer, object):
self.socket.settimeout(timeout)
self.keyfile = keyfile
if (keyfile is not None and
- (keyfile == False or
+ (keyfile is False or
not os.path.exists(keyfile) or
not os.access(keyfile, os.R_OK))):
msg = "Keyfile %s does not exist or is not readable" % keyfile
@@ -126,7 +126,7 @@ class SSLServer(SocketServer.TCPServer, object):
raise Exception(msg)
self.certfile = certfile
if (certfile is not None and
- (certfile == False or
+ (certfile is False or
not os.path.exists(certfile) or
not os.access(certfile, os.R_OK))):
msg = "Certfile %s does not exist or is not readable" % certfile
@@ -134,7 +134,7 @@ class SSLServer(SocketServer.TCPServer, object):
raise Exception(msg)
self.ca = ca
if (ca is not None and
- (ca == False or
+ (ca is False or
not os.path.exists(ca) or
not os.access(ca, os.R_OK))):
msg = "CA %s does not exist or is not readable" % ca