summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--AUTHORS2
-rw-r--r--doc/appendix/guides/bootstrap.txt6
-rw-r--r--doc/appendix/guides/converging_rhel5.txt4
-rw-r--r--doc/appendix/guides/fedora.txt28
-rw-r--r--doc/appendix/guides/ubuntu.txt58
-rw-r--r--doc/client/agent.txt2
-rw-r--r--doc/client/tools/actions.txt6
-rw-r--r--doc/development/plugins.txt2
-rw-r--r--doc/help/troubleshooting.txt4
-rw-r--r--doc/server/configurationentries.txt22
-rw-r--r--doc/server/info.txt8
-rw-r--r--doc/server/plugins/generators/decisions.txt1
-rw-r--r--doc/server/plugins/generators/packages.txt239
-rw-r--r--doc/server/plugins/generators/rules.txt67
-rw-r--r--doc/server/plugins/generators/tcheetah.txt18
-rw-r--r--doc/server/plugins/generators/tgenshi/clientsxml.txt6
-rw-r--r--doc/server/plugins/generators/tgenshi/test.txt34
-rw-r--r--doc/server/plugins/grouping/metadata.txt4
-rw-r--r--doc/server/plugins/structures/bundler/index.txt69
-rw-r--r--doc/server/plugins/structures/defaults.txt31
-rw-r--r--man/bcfg2.13
-rw-r--r--man/bcfg2.conf.53
-rw-r--r--misc/bcfg2.spec2
-rw-r--r--schemas/base.xsd2
-rw-r--r--schemas/bundle.xsd10
-rw-r--r--schemas/defaults.xsd68
-rw-r--r--schemas/deps.xsd6
-rw-r--r--schemas/grouppatterns.xsd23
-rw-r--r--schemas/packages.xsd8
-rw-r--r--schemas/pathentry.xsd3
-rw-r--r--schemas/pkgtype.xsd56
-rw-r--r--schemas/rules.xsd11
-rw-r--r--schemas/servicetype.xsd35
-rw-r--r--schemas/types.xsd14
-rwxr-xr-x[-rw-r--r--]setup.py1
-rw-r--r--src/lib/Bcfg2Py3k.py5
-rw-r--r--src/lib/Client/Frame.py37
-rw-r--r--src/lib/Client/Tools/APT.py1
-rw-r--r--src/lib/Client/Tools/DebInit.py4
-rw-r--r--src/lib/Client/Tools/POSIX.py14
-rw-r--r--src/lib/Client/Tools/YUMng.py15
-rw-r--r--src/lib/Client/Tools/__init__.py3
-rw-r--r--src/lib/Options.py9
-rw-r--r--src/lib/SSLServer.py29
-rw-r--r--src/lib/Server/Admin/Init.py137
-rw-r--r--src/lib/Server/Admin/__init__.py5
-rw-r--r--src/lib/Server/Core.py2
-rw-r--r--src/lib/Server/Lint/Bundles.py6
-rw-r--r--src/lib/Server/Lint/Comments.py2
-rw-r--r--src/lib/Server/Lint/MergeFiles.py9
-rw-r--r--src/lib/Server/Lint/RequiredAttrs.py123
-rw-r--r--src/lib/Server/Lint/Validate.py18
-rw-r--r--src/lib/Server/Lint/__init__.py2
-rw-r--r--src/lib/Server/Plugin.py49
-rw-r--r--src/lib/Server/Plugins/Cfg.py2
-rw-r--r--src/lib/Server/Plugins/Defaults.py51
-rw-r--r--src/lib/Server/Plugins/GroupPatterns.py21
-rw-r--r--src/lib/Server/Plugins/Packages.py1320
-rw-r--r--src/lib/Server/Plugins/Packages/Apt.py142
-rw-r--r--src/lib/Server/Plugins/Packages/Collection.py336
-rw-r--r--src/lib/Server/Plugins/Packages/Pac.py122
-rw-r--r--src/lib/Server/Plugins/Packages/PackagesConfig.py28
-rw-r--r--src/lib/Server/Plugins/Packages/PackagesSources.py66
-rw-r--r--src/lib/Server/Plugins/Packages/Source.py262
-rw-r--r--src/lib/Server/Plugins/Packages/Yum.py950
-rw-r--r--src/lib/Server/Plugins/Packages/__init__.py226
-rw-r--r--src/lib/Server/Plugins/Pkgmgr.py5
-rw-r--r--src/lib/Server/Plugins/SGenshi.py26
-rw-r--r--src/lib/Server/Reports/settings.py14
-rwxr-xr-xsrc/sbin/bcfg2-admin2
70 files changed, 3058 insertions, 1841 deletions
diff --git a/AUTHORS b/AUTHORS
index 4fe9da9ba..f13deb691 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -1,4 +1,4 @@
-In chronological order:
+In no particular order:
- Narayan Desai <desai@mcs.anl.gov> has written most of Bcfg2,
including all parts not explicitly mentioned in this file.
diff --git a/doc/appendix/guides/bootstrap.txt b/doc/appendix/guides/bootstrap.txt
index b9b5f318e..a5133c4c1 100644
--- a/doc/appendix/guides/bootstrap.txt
+++ b/doc/appendix/guides/bootstrap.txt
@@ -34,6 +34,6 @@ you to specify the options that are normally found in the client's
bcfg2 -x password -p basic -S https://bcfg2-server:6789
The above command will add the client to ``Metadata/clients.xml`` with the
-profile *basic*. Generally, the configuration given to the client by the
-bcfg2 server in this initial run will include the ``/etc/bcfg2.conf`` file
-so that the client won't need to specify these options on future runs.
+profile *basic*. Generally, you should include ``/etc/bcfg2.conf`` in the
+configuration given to the client by the bcfg2 server in this initial run
+to avoid specifying these options on future runs.
diff --git a/doc/appendix/guides/converging_rhel5.txt b/doc/appendix/guides/converging_rhel5.txt
index d6a9d2d1c..1b52e198d 100644
--- a/doc/appendix/guides/converging_rhel5.txt
+++ b/doc/appendix/guides/converging_rhel5.txt
@@ -19,7 +19,7 @@ Unmanaged entries
* Package (top-level)
#. Enable the "Packages" plugin in ``/etc/bcfg2.conf``, and configure
- the Yum repositories in ``/var/lib/bcfg2/Packages/config.xml``.
+ the Yum repositories in ``/var/lib/bcfg2/Packages/sources.xml``.
#. If a package is unwanted, remove it::
sudo yum remove PACKAGE
@@ -29,7 +29,7 @@ Unmanaged entries
* Package (dependency)
#. Ensure the Yum repository sources configured in
- ``/var/lib/bcfg2/Packages/config.xml`` are correct.
+ ``/var/lib/bcfg2/Packages/sources.xml`` are correct.
#. Ensure the Yum repositories themselves are up-to-date with the main
package and dependencies.
#. Rebuild the Packages plugin cache::
diff --git a/doc/appendix/guides/fedora.txt b/doc/appendix/guides/fedora.txt
index 7aaa02815..9d11414ef 100644
--- a/doc/appendix/guides/fedora.txt
+++ b/doc/appendix/guides/fedora.txt
@@ -190,28 +190,32 @@ Setup the :ref:`server-plugins-generators-packages` plugin
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
First, replace **Pkgmgr** with **Packages** in the plugins
-line of ``bcfg2.conf``. Then create `Packages/` directory in
+line of ``bcfg2.conf``. Then create a `Packages/` directory in
``/var/lib/bcfg2`` ::
$ su -c 'mkdir /var/lib/bcfg2/Packages'
-Create a ``config.xml`` file for the packages in
-``/var/lib/bcfg2/Packages`` with the following content. Choose a
-mirror near your location according the `Mirror list`_ .
+Create a ``packages.conf`` in the ``/var/lib/bcfg2/Packages`` directory
+with the following contents::
+
+ [global]
+
+Create a ``sources.xml`` file for the packages in
+``/var/lib/bcfg2/Packages`` with the following content. Choose a mirror
+near your location according the `Mirror list`_ .
.. _Mirror list: http://mirrors.fedoraproject.org/publiclist/
.. code-block:: xml
<Sources>
- <YUMSource>
- <Group>fedora-13</Group>
- <URL>ftp://fedora.tu-chemnitz.de/pub/linux/fedora/linux/releases/</URL>
- <Version>13</Version>
- <Component>Fedora</Component>
- <Arch>i386</Arch>
- <Arch>x86_64</Arch>
- </YUMSource>
+ <Group name="fedora-13">
+ <Source type="yum" url="ftp://fedora.tu-chemnitz.de/pub/linux/fedora/linux/releases/" version="13">
+ <Component>Fedora</Component>
+ <Arch>i386</Arch>
+ <Arch>x86_64</Arch>
+ <Source>
+ </Group>
</Sources>
diff --git a/doc/appendix/guides/ubuntu.txt b/doc/appendix/guides/ubuntu.txt
index 6655a3b04..fe5564d19 100644
--- a/doc/appendix/guides/ubuntu.txt
+++ b/doc/appendix/guides/ubuntu.txt
@@ -152,39 +152,33 @@ Create Packages layout (as per :ref:`packages-exampleusage`) in
.. code-block:: xml
root@lucid:~# mkdir /var/lib/bcfg2/Packages
- root@lucid:~# cat /var/lib/bcfg2/Packages/config.xml
+ root@lucid:~# cat /var/lib/bcfg2/Packages/packages.conf
+ [global]
+ root@lucid:~# cat /var/lib/bcfg2/Packages/sources.xml
<Sources>
- <APTSource>
- <Group>ubuntu-lucid</Group>
- <URL>http://us.archive.ubuntu.com/ubuntu</URL>
- <Version>lucid</Version>
- <Component>main</Component>
- <Component>multiverse</Component>
- <Component>restricted</Component>
- <Component>universe</Component>
- <Arch>amd64</Arch>
- <Arch>i386</Arch>
- </APTSource>
- <APTSource>
- <Group>lucid</Group>
- <URL>http://archive.ubuntu.com/ubuntu</URL>
- <Version>lucid-updates</Version>
- <Component>main</Component>
- <Component>multiverse</Component>
- <Component>restricted</Component>
- <Component>universe</Component>
- <Arch>amd64</Arch>
- </APTSource>
- <APTSource>
- <Group>lucid</Group>
- <URL>http://security.ubuntu.com/ubuntu</URL>
- <Version>lucid-security</Version>
- <Component>main</Component>
- <Component>multiverse</Component>
- <Component>restricted</Component>
- <Component>universe</Component>
- <Arch>amd64</Arch>
- </APTSource>
+ <Group name="lucid">
+ <Source type="apt" url="http://archive.ubuntu.com/ubuntu" version="lucid">
+ <Component>main</Component>
+ <Component>multiverse</Component>
+ <Component>restricted</Component>
+ <Component>universe</Component>
+ <Arch>amd64</Arch>
+ </Source>
+ <Source type="apt" url="http://archive.ubuntu.com/ubuntu" version="lucid-updates">
+ <Component>main</Component>
+ <Component>multiverse</Component>
+ <Component>restricted</Component>
+ <Component>universe</Component>
+ <Arch>amd64</Arch>
+ </Source>
+ <Source type="apt" url="http://security.ubuntu.com/ubuntu" version="lucid-security">
+ <Component>main</Component>
+ <Component>multiverse</Component>
+ <Component>restricted</Component>
+ <Component>universe</Component>
+ <Arch>amd64</Arch>
+ </Source>
+ </Group>
</Sources>
Due to the :ref:`server-plugins-generators-packages-magic-groups`,
diff --git a/doc/client/agent.txt b/doc/client/agent.txt
index ef152133e..770c2a41c 100644
--- a/doc/client/agent.txt
+++ b/doc/client/agent.txt
@@ -24,7 +24,7 @@ is pretty easy:
The key fingerprint is:
aa:25:9b:a7:10:60:f3:eb:2b:ae:4b:1a:42:1b:63:5d desai@ubik
-#. Add this this public key to root's authorized_keys file, with several
+#. Add this public key to root's authorized_keys file, with several
commands prepended to it::
command="/usr/sbin/bcfg2 -q <other options>",no-port-forwarding,no-X11-forwarding,no-pty,no-agent-forwarding,from="<bcfg2-server ipaddr>" <pub key>
diff --git a/doc/client/tools/actions.txt b/doc/client/tools/actions.txt
index 11922b00d..49488dfbe 100644
--- a/doc/client/tools/actions.txt
+++ b/doc/client/tools/actions.txt
@@ -20,7 +20,8 @@ so they can be centrally observed. Actions look like:
name='name'
command='cmd text'
when='always|modified'
- status='ignore|check'/>
+ status='ignore|check'
+ build='true|false'/>
+-----------+------------------+-------------------------------------------+
| Attribute | Values | Meaning |
@@ -37,6 +38,9 @@ so they can be centrally observed. Actions look like:
| status | ignore, check | If the return code of the action |
| | | should be reported or not |
+-----------+------------------+-------------------------------------------+
+| build | true, false | Also execute the action in build mode |
+| | | (default: true) |
++-----------+------------------+-------------------------------------------+
Note that the status attribute tells the bcfg2 client to ignore
return status, causing failures to still not be centrally reported. If
diff --git a/doc/development/plugins.txt b/doc/development/plugins.txt
index e6a5a73a0..15b512365 100644
--- a/doc/development/plugins.txt
+++ b/doc/development/plugins.txt
@@ -175,7 +175,7 @@ Example Connector
def get_additional_data(self, metadata):
mydata = {}
- for data in self.store.entries['foo.xml'].data.get("foo", []):
+ for data in self.store.entries['foo.xml'].xdata.get("foo", []):
mydata[data] = "bar"
diff --git a/doc/help/troubleshooting.txt b/doc/help/troubleshooting.txt
index 7aeb0f247..b964154e0 100644
--- a/doc/help/troubleshooting.txt
+++ b/doc/help/troubleshooting.txt
@@ -202,7 +202,7 @@ Server Errors
| Packages: No matching | Server | None of the sources | [s12]_ |
| sources for client | | defined in the | |
| <clientname>; improper group | | Package plugin's | |
-| memberships? | | ``config.xml`` | |
+| memberships? | | ``sources.xml`` | |
| | | apply to the client | |
+------------------------------+----------+---------------------+--------------+
@@ -222,7 +222,7 @@ Server Errors
section of ``bcfg2.conf``.
.. [s8] For packages listed other than **gpg-pubkey**, this error means
that the Packages plugin is unable to find the package in any of
- the sources listed in ``Packages/config.xml``. The issue often
+ the sources listed in ``Packages/sources.xml``. The issue often
arises when the client is not in one of the groups necessary for
the Source listed. In the case of gpg-pubkey, you can safely
ignore the message as the Packages plugin has no knowledge of
diff --git a/doc/server/configurationentries.txt b/doc/server/configurationentries.txt
index fbce092b9..be9ea987c 100644
--- a/doc/server/configurationentries.txt
+++ b/doc/server/configurationentries.txt
@@ -22,17 +22,17 @@ entries used by Bcfg2.
Non-POSIX entries
=================
-+-------------+---------------------+-----------------------------+
-| TagName | Description | Attributes |
-+=============+=====================+=============================+
-| Action | Command | name, command, when, timing |
-+-------------+---------------------+-----------------------------+
-| Package | Software Packages | name, type, version, url |
-+-------------+---------------------+-----------------------------+
-| PostInstall | PostInstall command | name |
-+-------------+---------------------+-----------------------------+
-| Service | System Services | name, type, status, target |
-+-------------+---------------------+-----------------------------+
++-------------+---------------------+--------------------------------------------+
+| TagName | Description | Attributes |
++=============+=====================+============================================+
+| Action | Command | name, command, when, timing, status, build |
++-------------+---------------------+--------------------------------------------+
+| Package | Software Packages | name, type, version, url |
++-------------+---------------------+--------------------------------------------+
+| PostInstall | PostInstall command | name |
++-------------+---------------------+--------------------------------------------+
+| Service | System Services | name, type, status, target |
++-------------+---------------------+--------------------------------------------+
.. note::
diff --git a/doc/server/info.txt b/doc/server/info.txt
index c58bbd7a3..97bb92a0d 100644
--- a/doc/server/info.txt
+++ b/doc/server/info.txt
@@ -94,7 +94,9 @@ files are XML, and work similarly to those used by :ref:`Rules
The following specifies a different global set of permissions
(root/sys/0651) than on clients in group webserver or named
-"foo.example.com" (root/root/0652)::
+"foo.example.com" (root/root/0652).
+
+.. code-block:: xml
<FileInfo>
<Client name='foo.example.com'>
@@ -107,7 +109,9 @@ The following specifies a different global set of permissions
</FileInfo>
The following specifies a different set of permissions depending on
-the path of the file::
+the path of the file.
+
+.. code-block:: xml
<FileInfo>
<Path name="/etc/bcfg2-web.conf">
diff --git a/doc/server/plugins/generators/decisions.txt b/doc/server/plugins/generators/decisions.txt
index d75a9fede..acb1de6ee 100644
--- a/doc/server/plugins/generators/decisions.txt
+++ b/doc/server/plugins/generators/decisions.txt
@@ -67,7 +67,6 @@ can add comments such as this::
<!-- vim: set ft=xml : -->
-=============
Decision Mode
=============
diff --git a/doc/server/plugins/generators/packages.txt b/doc/server/plugins/generators/packages.txt
index fb145876c..2299da3ee 100644
--- a/doc/server/plugins/generators/packages.txt
+++ b/doc/server/plugins/generators/packages.txt
@@ -23,32 +23,43 @@ through those channels.
Packages is the only plugin that uses "magic groups". Most plugins
operate based on client group memberships, without any concern for the
particular names chosen for groups by the user. The Packages plugin is
-the sole exception to this rule. Packages needs to "know" two different
-sorts of facts about clients. The first is the basic OS/distro of the
-client, enabling classes of sources. The second is the architecture of
-the client, enabling sources for a given architecture. In addition to
-these magic groups, each source may also specify a non-magic group to
-limit the source's applicability to group member clients.
-
-+-----------+----------+--------------+
-| Source | OS Group | Architecture |
-+===========+==========+==============+
-| APTSource | debian | i386 |
-+-----------+----------+--------------+
-| APTSource | ubuntu | amd64 |
-+-----------+----------+--------------+
-| APTSource | nexenta | |
-+-----------+----------+--------------+
-| APTSource | apt | |
-+-----------+----------+--------------+
-| YUMSource | redhat | i386 |
-+-----------+----------+--------------+
-| YUMSource | centos | x86_64 |
-+-----------+----------+--------------+
-| YUMSource | fedora | |
-+-----------+----------+--------------+
-| YUMSource | yum | |
-+-----------+----------+--------------+
+the sole exception to this rule. Packages needs to "know" two
+different sorts of facts about clients. The first is the basic
+OS/distro of the client, enabling classes of sources. The second is
+the architecture of the client, enabling sources for a given
+architecture. In addition to these magic groups, each source may also
+specify non-magic groups to limit the source's applicability to group
+member clients.
+
++--------+----------+--------------+
+| Source | OS Group | Architecture |
++========+==========+==============+
+| Apt | debian | i386 |
++--------+----------+--------------+
+| Apt | ubuntu | amd64 |
++--------+----------+--------------+
+| Apt | nexenta | |
++--------+----------+--------------+
+| Apt | apt | |
++--------+----------+--------------+
+| Yum | redhat | i386 |
++--------+----------+--------------+
+| Yum | centos | x86_64 |
++--------+----------+--------------+
+| Yum | fedora | |
++--------+----------+--------------+
+| Yum | yum | |
++--------+----------+--------------+
+
+.. note::
+
+ .. versionadded:: 1.2.0
+
+ Magic OS groups can be disabled in Bcfg2 1.2 and greater by setting
+ ``magic_groups`` to ``0`` in ``Packages/packages.conf``. This may
+ give you greater flexibility in determining which source types to
+ use for which OSes. Magic architecture groups cannot be disabled.
+
Limiting sources to groups
==========================
@@ -113,9 +124,9 @@ plugin.
Disabling dependency resolution
-------------------------------
-.. versionadded:: 1.2.0
+.. versionadded:: 1.1.0
-Dependency resolution can now be disabled by adding this to
+Dependency resolution can be disabled by adding this to
``Packages/packages.conf`` in the ``global`` section::
[global]
@@ -126,8 +137,6 @@ All metadata processing can be disabled as well::
[global]
metadata=disabled
-.. _packages-exampleusage:
-
Blacklisting faulty dependencies
--------------------------------
@@ -136,18 +145,20 @@ Packages, please file a bug report so that we can fix the problem in
future releases. In the meantime, you can work around this issue by
blacklisting the offending Package in your Sources. The blacklist
element should immediately follow the Component section of your source
-and should look like the following:
-
-.. code-block:: xml
+and should look like the following::
<Blacklist>unwanted-packagename</Blacklist>
+If you use the built-in :ref:`Yum config generator
+<generating-client-configs>`, blacklisted packages will be added to
+the ``exclude`` list for the source.
+
Handling GPG Keys
-----------------
.. versionadded:: 1.2.0
-Packages can automatically handle GPG signing keys for Yum
+Packages can automatically handle GPG signing keys for Yum and Pulp
repositories. Simply specify the URL to the GPG key(s) for a
repository in ``sources.xml``::
@@ -163,11 +174,17 @@ With the keys specified thusly, Packages will include the keys in the
generated yum config file, and will ensure that the keys are imported
on the client.
+There is no need to specify ``<GPGKey>`` tags for :ref:``Pulp sources
+<pulp-source-support>``; that data is pulled directly from the Pulp
+REST API.
+
+.. _packages-exampleusage:
+
Example usage
=============
-Create a config.xml file in the Packages directory that looks something
-like this::
+Create a ``sources.xml`` file in the Packages directory that looks
+something like this::
<Sources>
<Group name="ubuntu-intrepid">
@@ -193,7 +210,9 @@ like this::
will notice that the default behavior for apt is to add Recommended
packages as dependencies. You can configure the Packages plugin to
add recommended packages by adding the ``recommended`` attribute,
- e.g.::
+ e.g.:
+
+ .. code-block:: xml
<Source type="apt" recommended="true" ...>
@@ -209,6 +228,7 @@ Yum sources can be similarly specified::
<Component>extras</Component>
<Arch>i386</Arch>
<Arch>x86_64</Arch>
+ <GPGKey>http://mirror.centos.org/centos/RPM-GPG-KEY-CentOS-5</GPGKey>
</Source>
</Group>
</Sources>
@@ -216,8 +236,21 @@ Yum sources can be similarly specified::
For sources with a **URL** attribute, the **Version** attribute is
also necessary.
+:ref:``Pulp sources <pulp-source-support>`` are very simple to specify
+due to the amount of data that can be queried from Pulp itself::
+
+ <Sources>
+ <Group name="centos-6-x86_64">
+ <Source type="yum" pulp_id="centos-6-x86_64-os"/>
+ <Source type="yum" pulp_id="centos-6-x86_64-updates"/>
+ <Source type="yum" pulp_id="centos-6-x86_64-extras"/>
+ </Group>
+ </Sources>
+
.. note:: There is also a rawurl attribute for specifying sources that
- don't follow the conventional layout::
+ don't follow the conventional layout.
+
+ .. code-block:: xml
<Sources>
<Group name="centos5.4">
@@ -255,7 +288,7 @@ Configuration Updates
=====================
Packages will reload its configuration upon an explicit command via
-bcfg2-admin.::
+bcfg2-admin::
[0:3711] bcfg2-admin xcmd Packages.Refresh
True
@@ -272,9 +305,13 @@ will report information like::
Packages: Updating http://mirror.centos.org/centos/5/extras/x86_64/repodata/filelists.xml.gz
Packages: Updating http://mirror.centos.org/centos/5/extras/x86_64/repodata/primary.xml.gz
-Once line per file download needed. ``Packages/sources.xml`` will be reloaded
-at this time, so any source specification changes (new or modified
-sources in this file) will be reflected by the server at this point.
+Once line per file download needed. ``Packages/sources.xml`` will
+be reloaded at this time, so any source specification changes (new
+or modified sources in this file) will be reflected by the server at
+this point.
+
+This process is much, much faster if you use the :ref:`native yum
+library support <native-yum-libraries>`.
Soft reload
-----------
@@ -287,6 +324,9 @@ download only missing sources.::
[0:3711] bcfg2-admin xcmd Packages.Reload
True
+This is done automatically any time ``Packages/sources.xml`` is
+updated.
+
Availability
============
@@ -317,6 +357,7 @@ need to use :ref:`BoundEntries <boundentries>`, e.g.::
type="yum" verify="false"/>
+.. _generating-client-configs:
Generating Client APT/Yum Configurations
========================================
@@ -332,7 +373,7 @@ to the yum config file you want to generate::
Then add the corresponding Path entry to your Yum bundle.
-.. versionadded:: 1.2.0
+.. versionadded:: 1.1.0
APT repository information can be generated automatically from
software sources using :doc:`./tgenshi/index` or :doc:`./tcheetah`. A
@@ -346,6 +387,99 @@ list of source urls are exposed in the client's metadata as
{% end %}\
+.. _native-yum-libraries:
+
+Using Native Yum Libraries
+==========================
+
+.. versionadded:: 1.2.0
+
+By default, Bcfg2 uses an internal implementation of Yum's dependency
+resolution and other routines so that the Bcfg2 server can be run on a
+host that does not support Yum itself. If you run the Bcfg2 server on
+a machine that does have Yum libraries, however, you can enable use of
+those native libraries in Bcfg2 by setting ``use_yum_libraries`` to
+``1`` in the ``[yum]`` section of ``Packages/packages.conf``.
+
+Benefits to this include:
+
+* Much lower memory usage by the ``bcfg2-server`` process.
+* Much faster ``Packages.Refresh`` behavior.
+* More accurate dependency resolution.
+* Support for package groups.
+
+Drawbacks include:
+
+* More disk I/O. In some cases, you may have to raise the open file
+ limit for the user who runs your Bcfg2 server process, particularly
+ if you have a lot of repositories.
+* Resolution of package dependencies is slower in some cases,
+ particularly after running ``Packages.Refresh``.
+
+Setting Yum Options
+-------------------
+
+In ``Packages/packages.conf``, any options you set in the ``[yum]``
+section other than ``use_yum_libraries`` will be passed along verbatim
+to the configuration of the Yum objects used in the Bcfg2 server. The
+following options are set by default, and should not generally be
+overridden:
+
+* ``cachedir`` is set to a hashed value unique to each distinct Yum
+ configuration. Don't set this unless you know what you're doing.
+* ``keepcache`` is set to ``0``; there is no benefit to changing this.
+* ``sslverify`` is set to ``0``; change this if you know what you're
+ doing.
+* ``reposdir`` is set to ``/dev/null`` to prevent the server's Yum
+ configuration from being read; do not change this.
+
+Package Groups
+--------------
+
+Yum package groups are supported by the native Yum libraries. To
+include a package group, use the ``group`` attribute of the
+``Package`` tag. You can use either the short group ID or the long
+group name::
+
+ <Package group="SNMP Support"/>
+ <Package group="system-management-snmp"/>
+
+.. _pulp-source-support:
+
+Pulp Support
+============
+
+.. versionadded:: 1.2.0
+
+Bcfg2 contains explicit support for repositories managed by Pulp
+(http://pulpproject.org/). Due to the amount of data about a
+repository that can be retrieved directly from Pulp, the only thing
+necessary to configure a Pulp repo is the repo ID::
+
+ <Sources>
+ <Group name="centos-6-x86_64">
+ <Source type="yum" pulp_id="centos-6-x86_64-os"/>
+ <Source type="yum" pulp_id="centos-6-x86_64-updates"/>
+ <Source type="yum" pulp_id="centos-6-x86_64-extras"/>
+ </Group>
+ </Sources>
+
+Pulp sources require some additional configuration. First, the Bcfg2
+server must have a valid ``/etc/pulp/consumer/consumer.conf`` that is
+readable by the user your Bcfg2 server runs as; the Pulp server ,
+URLs, and so on, are determined from this.
+
+Secondly, in ``Packages/packages.conf`` you must set the following
+options in the ``[pulp]`` section:
+
+* ``username`` and ``password``: The username and password of a Pulp
+ user that will be used to register new clients and bind them to
+ repositories. Membership in the default ``consumer-users`` role is
+ sufficient.
+
+Bcfg2 clients using Pulp sources will be registered to the Pulp server
+as consumers, and will be bound to the appropriate repositories.
+
Debugging unexpected behavior
=============================
@@ -463,7 +597,7 @@ packages.conf
``packages.conf`` contains miscellaneous configuration options for the
Packages plugin. It understands the following directives:
-"global" section
+[global] section
----------------
* ``resolver``: Disable dependency resolution. Default is "enabled".
@@ -487,3 +621,20 @@ Packages plugin. It understands the following directives:
to "true", whereupon Packages will import the keys into the server's
key chain. Python RPM libraries must be installed for this to work.
+[yum] section
+-------------
+
+* ``use_yum_libraries``: Whether or not to use the :ref:`native yum
+ library support <native-yum-libraries>`. Default is ``0`` (false).
+
+All other options in the ``[yum]`` section will be passed along
+verbatim to the Yum configuration if you are using the native Yum
+library support.
+
+[pulp] section
+--------------
+
+* ``username`` and ``password``: The username and password of a Pulp
+ user that will be used to register new clients and bind them to
+ repositories. Membership in the default ``consumer-users`` role is
+ sufficient.
diff --git a/doc/server/plugins/generators/rules.txt b/doc/server/plugins/generators/rules.txt
index 6bc407371..925ee6419 100644
--- a/doc/server/plugins/generators/rules.txt
+++ b/doc/server/plugins/generators/rules.txt
@@ -115,29 +115,29 @@ See :ref:`client-tools-actions`
Service Tag
-----------
-+------------+-------------------------------+-----------------------------------------------------+
-| Name | Description | Values |
-+============+===============================+=====================================================+
-| mode | Per Service Mode (New in 1.0) | (manual | default | supervised | interactive_only ) |
-+------------+-------------------------------+-----------------------------------------------------+
-| name | Service name or regular | String or regex |
-| | expression | |
-+------------+-------------------------------+-----------------------------------------------------+
-| status | Should the service be on or | (on | off | ignore) |
-| | off (default: off). | |
-+------------+-------------------------------+-----------------------------------------------------+
-| target | Service command for restart | String |
-| | (default: restart) | |
-+------------+-------------------------------+-----------------------------------------------------+
-| type | Driver to use on the client | (chkconfig | deb | rc-update | smf | upstart) |
-| | to manage this service. | |
-+------------+-------------------------------+-----------------------------------------------------+
-| sequence | Order for service startup | integer |
-| | (debian services only) | |
-+------------+-------------------------------+-----------------------------------------------------+
-| parameters | Pass parameters to service | String |
-| | (Upstart services only) | |
-+------------+-------------------------------+-----------------------------------------------------+
++------------+-------------------------------+---------------------------------------------------------+
+| Name | Description | Values |
++============+===============================+=========================================================+
+| mode | Per Service Mode (New in 1.0) | (manual | default | supervised | interactive_only ) |
++------------+-------------------------------+---------------------------------------------------------+
+| name | Service name or regular | String or regex |
+| | expression | |
++------------+-------------------------------+---------------------------------------------------------+
+| status | Should the service be on or | (on | off | ignore) |
+| | off (default: off). | |
++------------+-------------------------------+---------------------------------------------------------+
+| target | Service command for restart | String |
+| | (default: restart) | |
++------------+-------------------------------+---------------------------------------------------------+
+| type | Driver to use on the client | (chkconfig | deb | rc-update | smf | upstart | |
+| | to manage this service. | systemd | freebsd | launchd ) |
++------------+-------------------------------+---------------------------------------------------------+
+| sequence | Order for service startup | integer |
+| | (debian services only) | |
++------------+-------------------------------+---------------------------------------------------------+
+| parameters | Pass parameters to service | String |
+| | (Upstart services only) | |
++------------+-------------------------------+---------------------------------------------------------+
Service mode descriptions
^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -355,28 +355,11 @@ Using Regular Expressions in Rules
==================================
The ``name`` attribute in Rules supports the use of regular
-expressions to match multiple abstract configuration entries. For
-instance, to make all Service entries use the ``systemd``
-tool on Fedora 15 and the ``chkconfig`` tool on Fedora 14, you could
-do::
-
- <Rules priority="0">
- <Group name="fedora-15">
- <Service name=".*" type="systemd"/>
- </Group>
- <Group name="fedora-14">
- <Service name=".*" type="chkconfig"/>
- </Group>
- </Rules>
+expressions to match multiple abstract configuration entries.
Regular expressions are anchored at both ends, so ``<Service
name="bcfg2".../>`` will *not* match a Service named ``bcfg2-server``;
you'd have to explicitly specify ``<Service name="bcfg2.*".../>``.
Note that only one Rule can apply to any abstract entry, so you cannot
-specify multiple regexs to match the same rule. In the use case
-above, you would have to specify the services fully (except for type)
-in the bundles.
-
-Attributes specified in a bundle have precedence over attributes
-specified in Rules, so you can use Rules to set defaults.
+specify multiple regexs to match the same rule.
diff --git a/doc/server/plugins/generators/tcheetah.txt b/doc/server/plugins/generators/tcheetah.txt
index ef8bb5513..29fd7affe 100644
--- a/doc/server/plugins/generators/tcheetah.txt
+++ b/doc/server/plugins/generators/tcheetah.txt
@@ -28,7 +28,7 @@ files, ``template`` and ``info``. The template is a standard Cheetah
template with two additions:
* `self.metadata` is the client's :ref:`metadata <server-plugins-grouping-metadata-clientmetadata>`
-* `self.metadata.Properties.data` is an xml document of unstructured data
+* `self.metadata.Properties.xdata` is an xml document of unstructured data
The ``info`` file is formatted like ``:info`` files from Cfg.
@@ -47,7 +47,7 @@ self.metadata variables
self.metadata is an instance of the class ClientMetadata and documented
:ref:`here <server-plugins-grouping-metadata-clientmetadata>`.
-self.metadata.Properties.data
+self.metadata.Properties.xdata
=============================
.. note::
@@ -56,7 +56,7 @@ self.metadata.Properties.data
:ref:`server-plugins-connectors-properties` plugin in
``/etc/bcfg2.conf``.
-Properties.data is a python `ElementTree <http://codespeak.net/lxml/>`_
+Properties.xdata is a python `ElementTree <http://codespeak.net/lxml/>`_
object, loaded from the data in ``/var/lib/bcfg2/Properties/<properties
file>.xml``. That file should have a ``Properties`` node at its root.
@@ -76,13 +76,13 @@ You may use any of the ElementTree methods to access data in your
template. Several examples follow, each producing an identical result
on the host 'www.example.com'::
- $self.metadata.Properties['example.xml'].data.find('host').find('www.example.com').find('rootdev').text
- $self.metadata.Properties['example.xml'].data.find('host').find($self.metadata.hostname).find('rootdev').text
- ${self.metadata.Properties['example.xml'].data.xpath('host/www.example.com/rootdev')[0].text}
- ${self.metadata.Properties['example.xml'].data.xpath('host/' + self.metadata.hostname + '/rootdev')[0].text}
+ $self.metadata.Properties['example.xml'].xdata.find('host').find('www.example.com').find('rootdev').text
+ $self.metadata.Properties['example.xml'].xdata.find('host').find($self.metadata.hostname).find('rootdev').text
+ ${self.metadata.Properties['example.xml'].xdata.xpath('host/www.example.com/rootdev')[0].text}
+ ${self.metadata.Properties['example.xml'].xdata.xpath('host/' + self.metadata.hostname + '/rootdev')[0].text}
#set $path = 'host/' + $self.metadata.hostname + '/rootdev'
- ${self.metadata.Properties['example.xml'].data.xpath($path)[0].text}
- ${self.metadata.Properties['example.xml'].data.xpath(path)[0].text}
+ ${self.metadata.Properties['example.xml'].xdata.xpath($path)[0].text}
+ ${self.metadata.Properties['example.xml'].xdata.xpath(path)[0].text}
Other Variables
===============
diff --git a/doc/server/plugins/generators/tgenshi/clientsxml.txt b/doc/server/plugins/generators/tgenshi/clientsxml.txt
index 7305ba70d..7a8d1fcc4 100644
--- a/doc/server/plugins/generators/tgenshi/clientsxml.txt
+++ b/doc/server/plugins/generators/tgenshi/clientsxml.txt
@@ -22,7 +22,7 @@ There are two main advantages:
thing to note is how the `name` variable is handled - when
just referring to it the standard `${name}` syntax is used, but
when it is used as a variable in the expression to get the password,
- `password="${metadata.Properties['passwords.xml'].data.find('password').find('bcfg2-client').find(name).text}"`,
+ `password="${metadata.Properties['passwords.xml'].xdata.find('password').find('bcfg2-client').find(name).text}"`,
it is just referred to as `name`.
There is the disadvantage that sometimes 2 passes will be needed to get
@@ -53,7 +53,7 @@ Possible improvements:
profile="${profile}"
name="${name}"
uuid="${name}"
- password="${metadata.Properties['passwords.xml'].data.find('password').find('bcfg2-client').find(name).text}"
+ password="${metadata.Properties['passwords.xml'].xdata.find('password').find('bcfg2-client').find(name).text}"
address="${address}"
location="fixed"
secure="true"
@@ -64,7 +64,7 @@ Possible improvements:
profile="${profile}"
name="${name}"
uuid="${name}"
- password="${metadata.Properties['passwords.xml'].data.find('password').find('bcfg2-client').find(name).text}"
+ password="${metadata.Properties['passwords.xml'].xdata.find('password').find('bcfg2-client').find(name).text}"
location="floating"
secure="true"
/>\
diff --git a/doc/server/plugins/generators/tgenshi/test.txt b/doc/server/plugins/generators/tgenshi/test.txt
index dca578434..c047b88d0 100644
--- a/doc/server/plugins/generators/tgenshi/test.txt
+++ b/doc/server/plugins/generators/tgenshi/test.txt
@@ -45,8 +45,8 @@ This file just shows you what's available. It assumes a
{% end %}\
Two main ways to get the same property value:
- ${metadata.Properties['test.xml'].data.find('password').find('bcfg2').text}
- ${metadata.Properties['test.xml'].data.xpath('password/bcfg2')[0].text}
+ ${metadata.Properties['test.xml'].xdata.find('password').find('bcfg2').text}
+ ${metadata.Properties['test.xml'].xdata.xpath('password/bcfg2')[0].text}
One way to get information about metadata and properties:
@@ -55,28 +55,28 @@ This file just shows you what's available. It assumes a
${var} \
{% end %}
- dir(metadata.Properties.data):
- {% for var in dir(metadata.Properties.data) %}\
+ dir(metadata.Properties.xdata):
+ {% for var in dir(metadata.Properties.xdata) %}\
${var} \
{% end %}
- dir(metadata.Properties.data.entries):
- {% for var in dir(metadata.Properties.data.entries) %}\
+ dir(metadata.Properties.xdata.entries):
+ {% for var in dir(metadata.Properties.xdata.entries) %}\
${var} \
{% end %}
- dir(metadata.Properties.data.label):
- {% for var in dir(metadata.Properties.data.label) %}\
+ dir(metadata.Properties.xdata.label):
+ {% for var in dir(metadata.Properties.xdata.label) %}\
${var} \
{% end %}
- dir(metadata.Properties.data.name):
- {% for var in dir(metadata.Properties.data.name) %}\
+ dir(metadata.Properties.xdata.name):
+ {% for var in dir(metadata.Properties.xdata.name) %}\
${var} \
{% end %}
- dir(metadata.Properties.data.properties):
- {% for var in dir(metadata.Properties.data.properties) %}\
+ dir(metadata.Properties.xdata.properties):
+ {% for var in dir(metadata.Properties.xdata.properties) %}\
${var} \
{% end %}
@@ -113,13 +113,13 @@ this (below reformatted a little bit to fit in 80 columns)::
__weakref__ all bundles categories get_clients_by_group get_clients_by_profile
groups hostname inGrouppassword probes uuid
- dir(metadata.Properties.data):
+ dir(metadata.Properties.xdata):
HandleEvent Index __class__ __delattr__ __dict__ __doc__ __getattribute__
__hash__ __identifier__ __init__ __iter__ __module__ __new__ __reduce__
__reduce_ex__ __repr__ __setattr__ __str__ __weakref__ entries label name
properties
- dir(metadata.Properties.data.entries):
+ dir(metadata.Properties.xdata.entries):
__add__ __class__ __contains__ __delattr__ __delitem__ __delslice__ __doc__
__eq__ __ge__ __getattribute__ __getitem__ __getslice__ __gt__ __hash__
__iadd__ __imul__ __init__ __iter__ __le__ __len__ __lt__ __mul__ __ne__
@@ -127,7 +127,7 @@ this (below reformatted a little bit to fit in 80 columns)::
__setitem__ __setslice__ __str__ append count extend index insert pop remove
reverse sort
- dir(metadata.Properties.data.label):
+ dir(metadata.Properties.xdata.label):
__add__ __class__ __contains__ __delattr__ __doc__ __eq__ __ge__
__getattribute__ __getitem__ __getnewargs__ __getslice__ __gt__ __hash__
__init__ __le__ __len__ __lt__ __mod__ __mul__ __ne__ __new__ __reduce__
@@ -137,7 +137,7 @@ this (below reformatted a little bit to fit in 80 columns)::
rfind rindex rjust rpartition rsplit rstrip split splitlinesstartswith strip
swapcase title translate upper zfill
- dir(metadata.Properties.data.name):
+ dir(metadata.Properties.xdata.name):
__add__ __class__ __contains__ __delattr__ __doc__ __eq__ __ge__
__getattribute__ __getitem__ __getnewargs__ __getslice__ __gt__ __hash__
__init__ __le__ __len__ __lt__ __mod__ __mul__ __ne__ __new__ __reduce__
@@ -147,7 +147,7 @@ this (below reformatted a little bit to fit in 80 columns)::
rfind rindex rjust rpartition rsplit rstrip split splitlinesstartswith strip
swapcase title translate upper zfill
- dir(metadata.Properties.data.properties):
+ dir(metadata.Properties.xdata.properties):
__class__ __contains__ __copy__ __deepcopy__ __delattr__ __delitem__
__delslice__ __doc__ __getattribute__ __getitem__ __getslice__ __hash__
__init__ __iter__ __len__ __new__ __nonzero__ __reduce__ __reduce_ex__
diff --git a/doc/server/plugins/grouping/metadata.txt b/doc/server/plugins/grouping/metadata.txt
index 43cc6a2d7..fc8605115 100644
--- a/doc/server/plugins/grouping/metadata.txt
+++ b/doc/server/plugins/grouping/metadata.txt
@@ -281,9 +281,9 @@ This class provides query routines for the servers Metadata.
+==============================+================================================+===================+
| by_name(client) | Get ClientMetadata object for 'client' | ClientMetadata |
+------------------------------+------------------------------------------------+-------------------+
-| names_by_groups(group) | | |
+| names_by_groups(groups) | All client names in the list of 'groups' | List |
+------------------------------+------------------------------------------------+-------------------+
-| names_by_profiles(profile) | All clients names in 'profile' | List |
+| names_by_profiles(profiles) | All client names in the list of 'profiles' | List |
+------------------------------+------------------------------------------------+-------------------+
| all_clients() | All known client hostnames | List |
+------------------------------+------------------------------------------------+-------------------+
diff --git a/doc/server/plugins/structures/bundler/index.txt b/doc/server/plugins/structures/bundler/index.txt
index 0cc2a0e55..3184fe6eb 100644
--- a/doc/server/plugins/structures/bundler/index.txt
+++ b/doc/server/plugins/structures/bundler/index.txt
@@ -148,18 +148,25 @@ Use
.. warning::
- Group tags are not used inside of Genshi templates. You can get the
- same logic (and more) using Genshi conditionals.
+ ``<Group>`` and ``<Client>`` tags are allowed inside of Genshi
+ templates as of Bcfg2 1.2. However, they do not behave the same
+ as using a Genshi conditional, e.g.::
- .. code-block:: xml
-
- <py:if test="groupname in metadata.groups">
+ <py:if test="'groupname' in metadata.groups">
</py:if>
-
-Bcfg uses the Genshi API for templates, and performs a XML format
+
+ The conditional is evaluated when the template is rendered, so
+ code inside the conditional is not executed if the conditional
+ fails. A ``<Group>`` tag is evaluated *after* the template is
+ rendered, so code inside the tag is always executed. This is an
+ important distinction: if you have code that will fail on some
+ groups, you *must* use a Genshi conditional, not a ``<Group>``
+ tag. The same caveats apply to ``<Client>`` tags.
+
+Bcfg2 uses the Genshi API for templates, and performs a XML format
stream rendering of the template into an lxml entry, which is included
in the client configuration. :ref:`Client metadata <client-metadata>`
-is avilable inside of the template using the 'metadata' name. Note that
+is available inside of the template using the 'metadata' name. Note that
only the markup Genshi template format can be used, as the target output
format is XML.
@@ -210,19 +217,35 @@ and returns them in a newline delimited string.
.. code-block:: xml
- <Bundle name='networkinterfaces' xmlns:py="http://genshi.edgewall.org/">
+ <Bundle name="networkinterfaces" xmlns:py="http://genshi.edgewall.org/">
<?python
- files = $metadata.Probes["getmacs"].split("\n")
+ files = metadata.Probes["getmacs"].split("\n")
?>
- <Path py:for="file in files" name="/etc/sysconfig/network/ifcfg-eth-${file}" altsrc='/etc/ifcfg-template'/>
+ <Path py:for="file in files"
+ name="/etc/sysconfig/network/ifcfg-eth-${file}"
+ altsrc="/etc/ifcfg-template"/>
</Bundle>
.. note::
- * The use of the altsrc directive causes all ifcfg files to be handled by the same plugin and entry.
- * The <?python ?> blocks have only been available in genshi since 0.4 (http://genshi.edgewall.org/ticket/84)
+ * The use of the altsrc directive causes all ifcfg files to be
+ handled by the same plugin and entry.
+ * The <?python ?> blocks have only been available in genshi since
+ 0.4 (http://genshi.edgewall.org/ticket/84)
If you want a file to be only on a per-client basis, you can use an
-if declaration:
+if declaration.
+
+.. code-block:: xml
+
+ <Bundle name='bacula' xmlns:py="http://genshi.edgewall.org/">
+ <Path name="/etc/bacula/bconsole.conf"/>
+ <Path name="/etc/bacula/bacula-fd.conf"/>
+ <Path name="/etc/bacula/bacula-sd.conf"/>
+ <Path py:if="metadata.hostname == 'foo.bar.com'"
+ name="/etc/bacula/bacula-dir.conf"/>
+ </Bundle>
+
+or alternately
.. code-block:: xml
@@ -230,10 +253,12 @@ if declaration:
<Path name="/etc/bacula/bconsole.conf"/>
<Path name="/etc/bacula/bacula-fd.conf"/>
<Path name="/etc/bacula/bacula-sd.conf"/>
- <Path py:if="metadata.hostname == 'foo.bar.com'" name="/etc/bacula/bacula-dir.conf"/>
+ <py:if="metadata.hostname == 'foo.bar.com'">
+ <Path name="/etc/bacula/bacula-dir.conf"/>
+ </py:if>
</Bundle>
-or alternately:
+or yet another way
.. code-block:: xml
@@ -241,14 +266,16 @@ or alternately:
<Path name="/etc/bacula/bconsole.conf"/>
<Path name="/etc/bacula/bacula-fd.conf"/>
<Path name="/etc/bacula/bacula-sd.conf"/>
- <py:if test="metadata.hostname == 'foo.bar.com'">
+ <Client name="foo.bar.com">
<Path name="/etc/bacula/bacula-dir.conf"/>
- </py:if>
+ </Client>
</Bundle>
-The latter form is preferred if the if block contains multiple
-files. While this example is simple, the test in the if block can in
-fact be any python statement.
+The final form is preferred if there is no code inside the block that
+would fail on other clients.
+
+While these examples are simple, the test in the if block can in fact
+be any python statement.
.. _server-plugins-structures-bundler-index-examples:
diff --git a/doc/server/plugins/structures/defaults.txt b/doc/server/plugins/structures/defaults.txt
new file mode 100644
index 000000000..58b9feddb
--- /dev/null
+++ b/doc/server/plugins/structures/defaults.txt
@@ -0,0 +1,31 @@
+.. -*- mode: rst -*-
+
+.. _server-plugins-structures-defaults:
+
+==========
+ Defaults
+==========
+
+The Defaults plugin can be used to populate default attributes for
+entries. Defaults is *not* a Generator plugin, so it does not
+actually bind an entry; Defaults are applied after an entry has been
+bound, and only populate attributes that are not yet set.
+
+Like :ref:`server-plugins-generators-rules`, Defaults supports regular
+expressions in the name attribute.
+
+For instance, to make all Service entries use the ``systemd`` tool
+on Fedora 15 and the ``chkconfig`` tool on Fedora 14, you could do::
+
+ <Defaults priority="0">
+ <Group name="fedora-15">
+ <Service name=".*" type="systemd"/>
+ </Group>
+ <Group name="fedora-14">
+ <Service name=".*" type="chkconfig"/>
+ </Group>
+ </Defaults>
+
+If you were to specify a ``type`` attribute for a Service entry in
+Rules (or a ``type`` attribute for a BoundService entry in Bundler),
+that would take precendence over the default.
diff --git a/man/bcfg2.1 b/man/bcfg2.1
index 0ace97e8a..661153a15 100644
--- a/man/bcfg2.1
+++ b/man/bcfg2.1
@@ -98,7 +98,8 @@ specify these changes, but only install them on clients when
administrator supervision is available. Because collaborative
configuration is one of the remaining hard issues in configuration
management, these issues typically crop up in environments with several
-administrators and much configuration variety.
+administrators and much configuration variety. (This setting will be
+ignored if the -f option is also specified.)
.TP
.BR "\-n"
diff --git a/man/bcfg2.conf.5 b/man/bcfg2.conf.5
index def850b67..ba091d8b1 100644
--- a/man/bcfg2.conf.5
+++ b/man/bcfg2.conf.5
@@ -296,7 +296,8 @@ These options only affect client functionality, specified in the
.TP
.B decision
-Specify the server decision list mode (whitelist or blacklist).
+Specify the server decision list mode (whitelist or blacklist). (This
+setting will be ignored if the client is called with the -f option.)
.TP
.B drivers
diff --git a/misc/bcfg2.spec b/misc/bcfg2.spec
index 2519517d8..0c5ebbcc0 100644
--- a/misc/bcfg2.spec
+++ b/misc/bcfg2.spec
@@ -30,7 +30,7 @@ BuildRequires: %{lxmldep}
# %{rhel} wasn't set before rhel 6. so this checks for old RHEL
# %systems (and potentially very old Fedora systems, too)
-%if "%{_vendor}" == "redhat" && 0%{?rhel} < 6
+%if "%{_vendor}" == "redhat" && 0%{?rhel} <= 6
BuildRequires: python-sphinx10
# the python-sphinx10 package doesn't set sys.path correctly, so we
# have to do it for them
diff --git a/schemas/base.xsd b/schemas/base.xsd
index e8d677737..91b7ac8f5 100644
--- a/schemas/base.xsd
+++ b/schemas/base.xsd
@@ -16,7 +16,7 @@
<xsd:choice>
<xsd:element name='Group' type='ContainerType'/>
<xsd:element name='Client' type='ContainerType'/>
- <xsd:element name='Package' type='StructureEntry'/>
+ <xsd:element name='Package' type='PackageStructure'/>
<xsd:element name='Path' type='PathEntry'/>
<xsd:element name='Service' type='StructureEntry'/>
<xsd:element name='BoundPackage' type='PackageType'/>
diff --git a/schemas/bundle.xsd b/schemas/bundle.xsd
index 2dd77e9af..6b32434be 100644
--- a/schemas/bundle.xsd
+++ b/schemas/bundle.xsd
@@ -20,11 +20,12 @@
<xsd:complexType name='GroupType'>
<xsd:choice minOccurs='0' maxOccurs='unbounded'>
- <xsd:element name='Package' type='StructureEntry'>
+ <xsd:element name='Package' type='PackageStructure'>
<xsd:annotation>
<xsd:documentation>
Abstract implementation of a Package entry. The full
- specification will be included in Rules.
+ specification will be generated by a plugin such as
+ Packages.
</xsd:documentation>
</xsd:annotation>
</xsd:element>
@@ -145,11 +146,12 @@
<xsd:complexType name='BundleType'>
<xsd:choice minOccurs='0' maxOccurs='unbounded'>
- <xsd:element name='Package' type='StructureEntry'>
+ <xsd:element name='Package' type='PackageStructure'>
<xsd:annotation>
<xsd:documentation>
Abstract implementation of a Package entry. The full
- specification will be included in Rules.
+ specification will be generated by a plugin such as
+ Packages.
</xsd:documentation>
</xsd:annotation>
</xsd:element>
diff --git a/schemas/defaults.xsd b/schemas/defaults.xsd
new file mode 100644
index 000000000..27e749470
--- /dev/null
+++ b/schemas/defaults.xsd
@@ -0,0 +1,68 @@
+<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema" xml:lang="en">
+
+ <xsd:annotation>
+ <xsd:documentation>
+ string enumeration definitions for bcfg2
+ Narayan Desai, Argonne National Laboratory
+ $Id$
+ </xsd:documentation>
+ </xsd:annotation>
+
+ <xsd:include schemaLocation="servicetype.xsd"/>
+ <xsd:include schemaLocation="types.xsd"/>
+ <xsd:include schemaLocation="pkgtype.xsd"/>
+
+ <xsd:complexType name="ActionType">
+ <xsd:attribute type="ActionTimingEnum" name="timing"/>
+ <xsd:attribute type="ActionWhenEnum" name="when"/>
+ <xsd:attribute type="ActionStatusEnum" name="status"/>
+ <xsd:attribute type="xsd:boolean" name="build"/>
+ <xsd:attribute type="xsd:string" name="name" use="required"/>
+ <xsd:attribute type="xsd:string" name="command"/>
+ </xsd:complexType>
+
+ <xsd:complexType name="PathType">
+ <xsd:attribute type="PathTypeEnum" name="type"/>
+ <xsd:attribute type="xsd:string" name="name" use="required"/>
+ <xsd:attribute type="xsd:string" name="dev_type"/>
+ <xsd:attribute type="xsd:string" name="major"/>
+ <xsd:attribute type="xsd:string" name="minor"/>
+ <xsd:attribute type="xsd:string" name="mode"/>
+ <xsd:attribute type="xsd:string" name="perms"/>
+ <xsd:attribute type="xsd:string" name="owner"/>
+ <xsd:attribute type="xsd:string" name="group"/>
+ <xsd:attribute type="xsd:string" name="recursive"/>
+ <xsd:attribute type="xsd:string" name="prune"/>
+ <xsd:attribute type="xsd:string" name="to"/>
+ <xsd:attribute type="xsd:string" name="vcstype"/>
+ <xsd:attribute type="xsd:string" name="revision"/>
+ <xsd:attribute type="xsd:string" name="sourceurl"/>
+ </xsd:complexType>
+
+ <xsd:complexType name="DContainerType">
+ <xsd:choice minOccurs="0" maxOccurs="unbounded">
+ <xsd:element name="Service" type="ServiceType"/>
+ <xsd:element name="Package" type="PackageType"/>
+ <xsd:element name="Path" type="PathType"/>
+ <xsd:element name="Action" type="ActionType"/>
+ <xsd:element name="Group" type="DContainerType"/>
+ <xsd:element name="Client" type="DContainerType"/>
+ </xsd:choice>
+ <xsd:attribute name="name" type="xsd:string"/>
+ <xsd:attribute name="negate" type="xsd:boolean"/>
+ </xsd:complexType>
+
+ <xsd:element name="Defaults">
+ <xsd:complexType>
+ <xsd:choice minOccurs="0" maxOccurs="unbounded">
+ <xsd:element name="Service" type="ServiceType"/>
+ <xsd:element name="Package" type="PackageType"/>
+ <xsd:element name="Path" type="PathType"/>
+ <xsd:element name="Action" type="ActionType"/>
+ <xsd:element name="Group" type="DContainerType"/>
+ <xsd:element name="Client" type="DContainerType"/>
+ </xsd:choice>
+ <xsd:attribute name="priority" type="xsd:integer" use="required"/>
+ </xsd:complexType>
+ </xsd:element>
+</xsd:schema>
diff --git a/schemas/deps.xsd b/schemas/deps.xsd
index 539d9a7e4..b1400c320 100644
--- a/schemas/deps.xsd
+++ b/schemas/deps.xsd
@@ -9,7 +9,7 @@
<xsd:complexType name='StructureEntry'>
<xsd:choice minOccurs='0' maxOccurs='unbounded'>
- <xsd:element name='Package' type='StructureEntry'/>
+ <xsd:element name='Package' type='PackageStructure'/>
<xsd:element name='Service' type='StructureEntry'/>
<xsd:element name='Path' type='StructureEntry'/>
</xsd:choice>
@@ -18,7 +18,7 @@
<xsd:complexType name='GroupType'>
<xsd:choice minOccurs='0' maxOccurs='unbounded'>
- <xsd:element name='Package' type='StructureEntry'/>
+ <xsd:element name='Package' type='PackageStructure'/>
<xsd:element name='Service' type='StructureEntry'/>
<xsd:element name='Path' type='StructureEntry'/>
<xsd:element name='Group' type='GroupType'/>
@@ -29,7 +29,7 @@
<xsd:element name='Dependencies'>
<xsd:complexType>
<xsd:choice minOccurs='0' maxOccurs='unbounded'>
- <xsd:element name='Package' type='StructureEntry'/>
+ <xsd:element name='Package' type='PackageStructure'/>
<xsd:element name='Service' type='StructureEntry'/>
<xsd:element name='Path' type='StructureEntry'/>
<xsd:element name='Group' type='GroupType'/>
diff --git a/schemas/grouppatterns.xsd b/schemas/grouppatterns.xsd
index 712d21f6c..f2bdceccd 100644
--- a/schemas/grouppatterns.xsd
+++ b/schemas/grouppatterns.xsd
@@ -7,20 +7,21 @@
</xsd:documentation>
</xsd:annotation>
- <xsd:complexType name='PatternType'>
- <xsd:choice minOccurs='1' maxOccurs='unbounded'>
+ <xsd:complexType name="PatternType">
+ <xsd:choice minOccurs="1" maxOccurs="unbounded">
<xsd:element name="NameRange" type="xsd:string"/>
<xsd:element name="NamePattern" type="xsd:string"/>
- <xsd:element name="Group" type="xsd:string" minOccurs='1'
- maxOccurs='unbounded'/>
+ <xsd:element name="Group" type="xsd:string" minOccurs="1"
+ maxOccurs="unbounded"/>
</xsd:choice>
</xsd:complexType>
- <xsd:element name='GroupPatterns'>
- <xsd:complexType>
- <xsd:choice minOccurs='1' maxOccurs='unbounded'>
- <xsd:element name='GroupPattern' type='PatternType'/>
- </xsd:choice>
- </xsd:complexType>
- </xsd:element>
+ <xsd:complexType name="GroupPatternsType">
+ <xsd:choice minOccurs="1" maxOccurs="unbounded">
+ <xsd:element name="GroupPattern" type="PatternType"/>
+ <xsd:element name="GroupPatterns" type="GroupPatternsType"/>
+ </xsd:choice>
+ </xsd:complexType>
+
+ <xsd:element name="GroupPatterns" type="GroupPatternsType"/>
</xsd:schema>
diff --git a/schemas/packages.xsd b/schemas/packages.xsd
index 8b5f31118..9f16a23c0 100644
--- a/schemas/packages.xsd
+++ b/schemas/packages.xsd
@@ -17,7 +17,7 @@
</xsd:simpleType>
<xsd:complexType name="sourceType">
- <xsd:sequence>
+ <xsd:sequence minOccurs="0" maxOccurs="unbounded">
<xsd:element name="Component" type="xsd:string" minOccurs="0"
maxOccurs="unbounded"/>
<xsd:choice>
@@ -26,14 +26,14 @@
<xsd:element name="Whitelist" type="xsd:string" minOccurs="0"
maxOccurs="unbounded"/>
</xsd:choice>
- <xsd:element name="Arch" type="xsd:string" minOccurs="1"
+ <xsd:element name="Arch" type="xsd:string" minOccurs="1"
maxOccurs="unbounded"/>
- <xsd:element name="GPGKey" type="xsd:string" minOccurs="0"
+ <xsd:element name="GPGKey" type="xsd:string" minOccurs="0"
maxOccurs="unbounded"/>
</xsd:sequence>
<xsd:attribute type="xsd:boolean" name="recommended"/>
<xsd:attribute type="sourceTypeEnum" name="type"/>
- <xsd:attribute type="xsd:string" name="id"/>
+ <xsd:attribute type="xsd:string" name="pulp_id"/>
<xsd:attribute type="xsd:string" name="url"/>
<xsd:attribute type="xsd:string" name="rawurl"/>
<xsd:attribute type="xsd:string" name="version"/>
diff --git a/schemas/pathentry.xsd b/schemas/pathentry.xsd
index 24be22612..40aa4ff2b 100644
--- a/schemas/pathentry.xsd
+++ b/schemas/pathentry.xsd
@@ -21,10 +21,13 @@
<xsd:complexType name='BoundPathEntry'>
<xsd:attribute type='xsd:string' name='name' use='required'/>
<xsd:attribute type='xsd:string' name='group' use='optional'/>
+ <xsd:attribute type='xsd:string' name='important' use='optional'/>
<xsd:attribute type='xsd:string' name='owner' use='optional'/>
+ <xsd:attribute type='xsd:string' name='paranoid' use='optional'/>
<xsd:attribute type='xsd:string' name='perms' use='optional'/>
<xsd:attribute type='xsd:string' name='prune' use='optional'/>
<xsd:attribute type='xsd:string' name='recursive' use='optional'/>
+ <xsd:attribute type='xsd:string' name='sensitive' use='optional'/>
<xsd:attribute type='xsd:string' name='to' use='optional'/>
<xsd:attribute type='xsd:string' name='type' use='optional'/>
<xsd:attributeGroup ref="py:genshiAttrs"/>
diff --git a/schemas/pkgtype.xsd b/schemas/pkgtype.xsd
index 70a466448..23768e518 100644
--- a/schemas/pkgtype.xsd
+++ b/schemas/pkgtype.xsd
@@ -13,20 +13,27 @@
<xsd:import namespace="http://genshi.edgewall.org/"
schemaLocation="genshi.xsd"/>
- <xsd:complexType name='PackageType'>
- <xsd:choice minOccurs='0' maxOccurs='unbounded'>
- <xsd:element name='Instance'>
+ <xsd:complexType name="PackageStructure">
+ <xsd:attribute type="xsd:string" name="name"/>
+ <xsd:attribute type="xsd:string" name="group"/>
+ <xsd:attribute type="xsd:string" name="verify" use="optional"/>
+ <xsd:attributeGroup ref="py:genshiAttrs"/>
+ </xsd:complexType>
+
+ <xsd:complexType name="PackageType">
+ <xsd:choice minOccurs="0" maxOccurs="unbounded">
+ <xsd:element name="Instance">
<xsd:complexType>
- <xsd:attribute name='arch' type='xsd:string'/>
- <xsd:attribute name='epoch' type='xsd:string'/>
- <xsd:attribute name='version' type='xsd:string'/>
- <xsd:attribute name='release' type='xsd:string'/>
- <xsd:attribute name='simplefile' type='xsd:string'/>
- <xsd:attribute name='pkg_verify' type='xsd:string'/>
- <xsd:attribute name='verify_flags' type='xsd:string'/>
- <xsd:attribute name='installed_action' type='xsd:string'/>
- <xsd:attribute name='version_fail_action' type='xsd:string'/>
- <xsd:attribute name='verify_fail_action' type='xsd:string'/>
+ <xsd:attribute name="arch" type="xsd:string"/>
+ <xsd:attribute name="epoch" type="xsd:string"/>
+ <xsd:attribute name="version" type="xsd:string"/>
+ <xsd:attribute name="release" type="xsd:string"/>
+ <xsd:attribute name="simplefile" type="xsd:string"/>
+ <xsd:attribute name="pkg_verify" type="xsd:boolean"/>
+ <xsd:attribute name="verify_flags" type="xsd:string"/>
+ <xsd:attribute name="installed_action" type="xsd:string"/>
+ <xsd:attribute name="version_fail_action" type="xsd:string"/>
+ <xsd:attribute name="verify_fail_action" type="xsd:string"/>
<xsd:attributeGroup ref="py:genshiAttrs"/>
</xsd:complexType>
</xsd:element>
@@ -38,17 +45,18 @@
<xsd:element ref="py:with"/>
<xsd:element ref="py:replace"/>
</xsd:choice>
- <xsd:attribute type='xsd:string' name='name' use="required"/>
- <xsd:attribute type='xsd:string' name='version'/>
- <xsd:attribute type='xsd:string' name='file'/>
- <xsd:attribute type='xsd:string' name='verify'/>
- <xsd:attribute type='xsd:string' name='simplefile'/>
- <xsd:attribute type='xsd:string' name='multiarch'/>
- <xsd:attribute type='xsd:string' name='srcs'/>
- <xsd:attribute type='xsd:string' name='type'/>
- <xsd:attribute type='xsd:string' name='bname'/>
- <xsd:attribute name='pkg_checks' type='xsd:string'/>
- <xsd:attribute name='verify_flags' type='xsd:string'/>
+ <xsd:attribute type="xsd:string" name="name"/>
+ <xsd:attribute type="xsd:string" name="group"/>
+ <xsd:attribute type="xsd:string" name="version"/>
+ <xsd:attribute type="xsd:string" name="file"/>
+ <xsd:attribute type="xsd:boolean" name="verify"/>
+ <xsd:attribute type="xsd:string" name="simplefile"/>
+ <xsd:attribute type="xsd:string" name="multiarch"/>
+ <xsd:attribute type="xsd:string" name="srcs"/>
+ <xsd:attribute type="PackageTypeEnum" name="type"/>
+ <xsd:attribute type="xsd:string" name="bname"/>
+ <xsd:attribute name="pkg_checks" type="xsd:string"/>
+ <xsd:attribute name="verify_flags" type="xsd:string"/>
<xsd:attributeGroup ref="py:genshiAttrs"/>
</xsd:complexType>
</xsd:schema>
diff --git a/schemas/rules.xsd b/schemas/rules.xsd
index 5446f9a95..193d63c99 100644
--- a/schemas/rules.xsd
+++ b/schemas/rules.xsd
@@ -16,11 +16,12 @@
schemaLocation="genshi.xsd"/>
<xsd:complexType name='ActionType'>
- <xsd:attribute type='ActionTimingEnum' name='timing' use='required'/>
- <xsd:attribute type='ActionWhenEnum' name='when' use='required'/>
- <xsd:attribute type='ActionStatusEnum' name='status' use='required'/>
- <xsd:attribute type='xsd:string' name='name' use='required'/>
- <xsd:attribute type='xsd:string' name='command' use='required'/>
+ <xsd:attribute type='ActionTimingEnum' name='timing'/>
+ <xsd:attribute type='ActionWhenEnum' name='when'/>
+ <xsd:attribute type='ActionStatusEnum' name='status'/>
+ <xsd:attribute type="xsd:boolean" name="build"/>
+ <xsd:attribute type='xsd:string' name='name'/>
+ <xsd:attribute type='xsd:string' name='command'/>
<xsd:attributeGroup ref="py:genshiAttrs"/>
</xsd:complexType>
diff --git a/schemas/servicetype.xsd b/schemas/servicetype.xsd
index 07971a427..f88260c39 100644
--- a/schemas/servicetype.xsd
+++ b/schemas/servicetype.xsd
@@ -9,15 +9,16 @@
</xsd:documentation>
</xsd:annotation>
+ <xsd:include schemaLocation="types.xsd"/>
<xsd:import namespace="http://genshi.edgewall.org/"
schemaLocation="genshi.xsd"/>
- <xsd:complexType name='ServiceType'>
- <xsd:choice minOccurs='0' maxOccurs='unbounded'>
- <xsd:element name='User'>
+ <xsd:complexType name="ServiceType">
+ <xsd:choice minOccurs="0" maxOccurs="unbounded">
+ <xsd:element name="User">
<xsd:complexType>
- <xsd:attribute name='address' type='xsd:string' use='required'/>
- <xsd:attribute name='mask' type='xsd:string' use='required'/>
+ <xsd:attribute name="address" type="xsd:string" use="required"/>
+ <xsd:attribute name="mask" type="xsd:string" use="required"/>
</xsd:complexType>
</xsd:element>
<xsd:element ref="py:def"/>
@@ -28,18 +29,18 @@
<xsd:element ref="py:with"/>
<xsd:element ref="py:replace"/>
</xsd:choice>
- <xsd:attribute name='name' type='xsd:string' use='required'/>
- <xsd:attribute name='status' type='xsd:string' use='required'/>
- <xsd:attribute name='type' type='xsd:string' use='required'/>
- <xsd:attribute name='port' type='xsd:string'/>
- <xsd:attribute name='protocol' type='xsd:string'/>
- <xsd:attribute name='mode' type='xsd:string'/>
- <xsd:attribute name='custom' type='xsd:string'/>
- <xsd:attribute name='FMRI' type='xsd:string'/>
- <xsd:attribute name='supervised' type='xsd:string'/>
- <xsd:attribute name='sequence' type='xsd:string'/>
- <xsd:attribute name='target' type='xsd:string'/>
- <xsd:attribute name='parameters' type='xsd:string'/>
+ <xsd:attribute name="name" type="xsd:string" use="required"/>
+ <xsd:attribute name="status" type="StatusEnum"/>
+ <xsd:attribute name="type" type="ServiceTypeEnum"/>
+ <xsd:attribute name="port" type="xsd:string"/>
+ <xsd:attribute name="protocol" type="xsd:string"/>
+ <xsd:attribute name="mode" type="xsd:string"/>
+ <xsd:attribute name="custom" type="xsd:string"/>
+ <xsd:attribute name="FMRI" type="xsd:string"/>
+ <xsd:attribute name="supervised" type="xsd:string"/>
+ <xsd:attribute name="sequence" type="xsd:string"/>
+ <xsd:attribute name="target" type="xsd:string"/>
+ <xsd:attribute name="parameters" type="xsd:string"/>
<xsd:attributeGroup ref="py:genshiAttrs"/>
</xsd:complexType>
diff --git a/schemas/types.xsd b/schemas/types.xsd
index 44d2d3df9..dde7a856b 100644
--- a/schemas/types.xsd
+++ b/schemas/types.xsd
@@ -39,6 +39,7 @@
<xsd:restriction base='xsd:string'>
<xsd:enumeration value='on'/>
<xsd:enumeration value='off'/>
+ <xsd:enumeration value="ignore"/>
</xsd:restriction>
</xsd:simpleType>
@@ -64,4 +65,17 @@
</xsd:restriction>
</xsd:simpleType>
+ <xsd:simpleType name="ServiceTypeEnum">
+ <xsd:restriction base="xsd:string">
+ <xsd:enumeration value="chkconfig"/>
+ <xsd:enumeration value="deb"/>
+ <xsd:enumeration value="rc-update"/>
+ <xsd:enumeration value="smf"/>
+ <xsd:enumeration value="upstart"/>
+ <xsd:enumeration value="systemd"/>
+ <xsd:enumeration value="launchd"/>
+ <xsd:enumeration value="freebsd"/>
+ </xsd:restriction>
+ </xsd:simpleType>
+
</xsd:schema>
diff --git a/setup.py b/setup.py
index d1642a896..2498bb8d5 100644..100755
--- a/setup.py
+++ b/setup.py
@@ -134,6 +134,7 @@ setup(cmdclass=cmdclass,
"Bcfg2.Server.Hostbase.hostbase",
"Bcfg2.Server.Lint",
"Bcfg2.Server.Plugins",
+ "Bcfg2.Server.Plugins.Packages",
"Bcfg2.Server.Reports",
"Bcfg2.Server.Reports.reports",
"Bcfg2.Server.Reports.reports.templatetags",
diff --git a/src/lib/Bcfg2Py3k.py b/src/lib/Bcfg2Py3k.py
index c9e48a49b..606379d1f 100644
--- a/src/lib/Bcfg2Py3k.py
+++ b/src/lib/Bcfg2Py3k.py
@@ -79,3 +79,8 @@ if sys.hexversion >= 0x03000000:
else:
def fprint(s, f):
print >> f, s
+
+if sys.hexversion >= 0x03000000:
+ from io import FileIO as file
+else:
+ file = file
diff --git a/src/lib/Client/Frame.py b/src/lib/Client/Frame.py
index dec3b42c2..eca8960c1 100644
--- a/src/lib/Client/Frame.py
+++ b/src/lib/Client/Frame.py
@@ -172,23 +172,23 @@ class Frame:
# Need to process decision stuff early so that dryrun mode works with it
self.whitelist = [entry for entry in self.states \
if not self.states[entry]]
- if self.setup['decision'] == 'whitelist':
- dwl = self.setup['decision_list']
- w_to_rem = [e for e in self.whitelist \
- if not matches_white_list(e, dwl)]
- if w_to_rem:
- self.logger.info("In whitelist mode: suppressing installation of:")
- self.logger.info(["%s:%s" % (e.tag, e.get('name')) for e in w_to_rem])
- self.whitelist = [x for x in self.whitelist \
- if x not in w_to_rem]
-
- elif self.setup['decision'] == 'blacklist':
- b_to_rem = [e for e in self.whitelist \
- if not passes_black_list(e, self.setup['decision_list'])]
- if b_to_rem:
- self.logger.info("In blacklist mode: suppressing installation of:")
- self.logger.info(["%s:%s" % (e.tag, e.get('name')) for e in b_to_rem])
- self.whitelist = [x for x in self.whitelist if x not in b_to_rem]
+ if not self.setup['file']:
+ if self.setup['decision'] == 'whitelist':
+ dwl = self.setup['decision_list']
+ w_to_rem = [e for e in self.whitelist \
+ if not matches_white_list(e, dwl)]
+ if w_to_rem:
+ self.logger.info("In whitelist mode: suppressing installation of:")
+ self.logger.info(["%s:%s" % (e.tag, e.get('name')) for e in w_to_rem])
+ self.whitelist = [x for x in self.whitelist \
+ if x not in w_to_rem]
+ elif self.setup['decision'] == 'blacklist':
+ b_to_rem = [e for e in self.whitelist \
+ if not passes_black_list(e, self.setup['decision_list'])]
+ if b_to_rem:
+ self.logger.info("In blacklist mode: suppressing installation of:")
+ self.logger.info(["%s:%s" % (e.tag, e.get('name')) for e in b_to_rem])
+ self.whitelist = [x for x in self.whitelist if x not in b_to_rem]
# take care of important entries first
if not self.dryrun and not self.setup['bundle']:
@@ -206,7 +206,8 @@ class Frame:
continue
try:
self.states[cfile] = tl[0].InstallPath(cfile)
- tl[0].modified.append(cfile)
+ if self.states[cfile]:
+ tl[0].modified.append(cfile)
except:
self.logger.error("Unexpected tool failure",
exc_info=1)
diff --git a/src/lib/Client/Tools/APT.py b/src/lib/Client/Tools/APT.py
index d268fe9f4..338ec98fd 100644
--- a/src/lib/Client/Tools/APT.py
+++ b/src/lib/Client/Tools/APT.py
@@ -8,6 +8,7 @@ warnings.filterwarnings("ignore", "apt API not stable yet",
warnings.filterwarnings("ignore", "Accessed deprecated property Package.installedVersion, please see the Version class for alternatives.", DeprecationWarning)
warnings.filterwarnings("ignore", "Accessed deprecated property Package.candidateVersion, please see the Version class for alternatives.", DeprecationWarning)
warnings.filterwarnings("ignore", "Deprecated, please use 'is_installed' instead", DeprecationWarning)
+warnings.filterwarnings("ignore", "Deprecated, please use 'mark_delete()' instead", DeprecationWarning)
warnings.filterwarnings("ignore", "Attribute 'IsUpgradable' of the 'apt_pkg.DepCache' object is deprecated, use 'is_upgradable' instead.", DeprecationWarning)
warnings.filterwarnings("ignore", "Attribute 'VersionList' of the 'apt_pkg.Package' object is deprecated, use 'version_list' instead.", DeprecationWarning)
warnings.filterwarnings("ignore", "Attribute 'VerStr' of the 'apt_pkg.Version' object is deprecated, use 'ver_str' instead.", DeprecationWarning)
diff --git a/src/lib/Client/Tools/DebInit.py b/src/lib/Client/Tools/DebInit.py
index d6ce16c52..022332602 100644
--- a/src/lib/Client/Tools/DebInit.py
+++ b/src/lib/Client/Tools/DebInit.py
@@ -35,10 +35,12 @@ class DebInit(Bcfg2.Client.Tools.SvcTool):
if entry.get('sequence'):
if (deb_version in DEBIAN_OLD_STYLE_BOOT_SEQUENCE or
- deb_version.startswith('5')):
+ deb_version.startswith('5') or
+ os.path.exists('/etc/init.d/.legacy-bootordering')):
start_sequence = int(entry.get('sequence'))
kill_sequence = 100 - start_sequence
else:
+ start_sequence = None
self.logger.warning("Your debian version boot sequence is "
"dependency based \"sequence\" attribute "
"will be ignored.")
diff --git a/src/lib/Client/Tools/POSIX.py b/src/lib/Client/Tools/POSIX.py
index a7a0c4f63..372d4d9e4 100644
--- a/src/lib/Client/Tools/POSIX.py
+++ b/src/lib/Client/Tools/POSIX.py
@@ -115,13 +115,6 @@ class POSIX(Bcfg2.Client.Tools.Tool):
setup.parse([])
ppath = setup['ppath']
max_copies = setup['max_copies']
- """
- Python uses the OS mknod(2) implementation which modifies the mode
- based on the umask of the running process (at least on some Linuxes
- that were tested). We set this to zero so that POSIX-related paths
- will be created as specified in the Bcfg2 configuration.
- """
- os.umask(0)
def canInstall(self, entry):
"""Check if entry is complete for installation."""
@@ -257,6 +250,13 @@ class POSIX(Bcfg2.Client.Tools.Tool):
os.mknod(entry.get('name'), mode, device)
else:
os.mknod(entry.get('name'), mode)
+ """
+ Python uses the OS mknod(2) implementation which modifies the
+ mode based on the umask of the running process. Therefore, the
+ following chmod(2) call is needed to make sure the permissions
+ are set as specified by the user.
+ """
+ os.chmod(entry.get('name'), mode)
os.chown(entry.get('name'), normUid(entry), normGid(entry))
return True
except KeyError:
diff --git a/src/lib/Client/Tools/YUMng.py b/src/lib/Client/Tools/YUMng.py
index 24605ca44..04174b3a1 100644
--- a/src/lib/Client/Tools/YUMng.py
+++ b/src/lib/Client/Tools/YUMng.py
@@ -147,6 +147,14 @@ class YUMng(Bcfg2.Client.Tools.PkgTool):
def __init__(self, logger, setup, config):
self.yb = yum.YumBase()
+
+ if setup['debug']:
+ self.yb.preconf.debuglevel = 3
+ elif setup['verbose']:
+ self.yb.preconf.debuglevel = 2
+ else:
+ self.yb.preconf.debuglevel = 1
+
Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config)
self.ignores = [entry.get('name') for struct in config \
for entry in struct \
@@ -488,9 +496,10 @@ class YUMng(Bcfg2.Client.Tools.PkgTool):
package_fail = True
stat['version_fail'] = True
# Just chose the first pkg for the error message
- self.logger.info(" Wrong version installed. "\
- "Want %s, but have %s" % (nevraString(nevra),
- nevraString(POs[0])))
+ self.logger.info(" %s: Wrong version installed. "
+ "Want %s, but have %s" % (entry.get("name"),
+ nevraString(nevra),
+ nevraString(POs[0])))
qtext_versions.append("U(%s)" % str(POs[0]))
continue
diff --git a/src/lib/Client/Tools/__init__.py b/src/lib/Client/Tools/__init__.py
index 88609c2f6..9d0c69892 100644
--- a/src/lib/Client/Tools/__init__.py
+++ b/src/lib/Client/Tools/__init__.py
@@ -110,7 +110,8 @@ class Tool:
try:
func = getattr(self, "Install%s" % (entry.tag))
states[entry] = func(entry)
- self.modified.append(entry)
+ if states[entry]:
+ self.modified.append(entry)
except:
self.logger.error("Unexpected failure of install method for entry type %s" \
% (entry.tag), exc_info=1)
diff --git a/src/lib/Options.py b/src/lib/Options.py
index 6b3110107..fcd9107a9 100644
--- a/src/lib/Options.py
+++ b/src/lib/Options.py
@@ -201,7 +201,8 @@ INSTALL_PREFIX = Option('Installation location', cf=('server', 'prefix'),
default=DEFAULT_INSTALL_PREFIX, odesc='</path>')
SENDMAIL_PATH = Option('Path to sendmail', cf=('reports', 'sendmailpath'),
default='/usr/lib/sendmail')
-INTERACTIVE = Option('Prompt the user for each change', default=False,
+INTERACTIVE = Option('Run interactively, prompting the user for each change',
+ default=False,
cmd='-I', )
ENCODING = Option('Encoding of cfg files',
default='UTF-8',
@@ -253,7 +254,6 @@ SERVER_REPOSITORY = Option('Server repository path', '/var/lib/bcfg2',
SERVER_PLUGINS = Option('Server plugin list', cf=('server', 'plugins'),
# default server plugins
default=[
- 'Base',
'Bundler',
'Cfg',
'Metadata',
@@ -313,7 +313,10 @@ CLIENT_DRYRUN = Option('Do not actually change the system',
CLIENT_EXTRA_DISPLAY = Option('enable extra entry output',
default=False, cmd='-e', )
CLIENT_PARANOID = Option('Make automatic backups of config files',
- default=False, cmd='-P', cf=('client', 'paranoid'))
+ default=False,
+ cmd='-P',
+ cook=get_bool,
+ cf=('client', 'paranoid'))
CLIENT_DRIVERS = Option('Specify tool driver set', cmd='-D',
cf=('client', 'drivers'),
odesc="<driver1,driver2>", cook=list_split,
diff --git a/src/lib/SSLServer.py b/src/lib/SSLServer.py
index 21bf48d3e..6d053b802 100644
--- a/src/lib/SSLServer.py
+++ b/src/lib/SSLServer.py
@@ -47,11 +47,10 @@ class XMLRPCDispatcher (SimpleXMLRPCServer.SimpleXMLRPCDispatcher):
params = (address, ) + params
response = self.instance._dispatch(method, params, self.funcs)
# py3k compatibility
- if isinstance(response, bool) or isinstance(response, str) \
- or isinstance(response, list):
- response = (response, )
- else:
+ if type(response) not in [bool, str, list, dict]:
response = (response.decode('utf-8'), )
+ else:
+ response = (response, )
raw_response = xmlrpclib.dumps(response, methodresponse=1,
allow_none=self.allow_none,
encoding=self.encoding)
@@ -289,21 +288,27 @@ class XMLRPCRequestHandler (SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
except:
(type, msg) = sys.exc_info()[:2]
if str(type) == 'socket.error' and msg[0] == 32:
- self.logger.warning("Connection dropped from %s" % self.client_address[0])
+ self.logger.warning("Connection dropped from %s" %
+ self.client_address[0])
elif str(type) == 'socket.error' and msg[0] == 104:
- self.logger.warning("Connection reset by peer: %s" % self.client_address[0])
+ self.logger.warning("Connection reset by peer: %s" %
+ self.client_address[0])
elif str(type) == 'ssl.SSLError':
- self.logger.warning("SSLError handling client %s: %s" % \
- (self.client_address[0], msg))
+ self.logger.warning("SSLError handling client %s: %s" %
+ (self.client_address[0], msg))
else:
- self.logger.error("Error sending response (%s): %s" % \
- (type, msg))
+ self.logger.error("Error sending response (%s): %s" %
+ (type, msg))
def finish(self):
# shut down the connection
if not self.wfile.closed:
- self.wfile.flush()
- self.wfile.close()
+ try:
+ self.wfile.flush()
+ self.wfile.close()
+ except socket.error:
+ err = sys.exc_info()[1]
+ self.logger.warning("Error closing connection: %s" % err)
self.rfile.close()
diff --git a/src/lib/Server/Admin/Init.py b/src/lib/Server/Admin/Init.py
index 9a6ad9de9..aba6bbd32 100644
--- a/src/lib/Server/Admin/Init.py
+++ b/src/lib/Server/Admin/Init.py
@@ -36,9 +36,9 @@ web_debug = True
[communication]
protocol = %s
password = %s
-certificate = %s/%s
-key = %s/%s
-ca = %s/%s
+certificate = %s
+key = %s
+ca = %s
[components]
bcfg2 = %s
@@ -103,12 +103,15 @@ plugin_list = ['Account',
'TGenshi']
# Default list of plugins to use
-default_plugins = ['Bundler',
- 'Cfg',
- 'Metadata',
- 'Pkgmgr',
- 'Rules',
- 'SSHbase']
+default_plugins = Bcfg2.Options.SERVER_PLUGINS.default
+
+
+def get_input(prompt):
+ """py3k compatible function to get input"""
+ try:
+ return raw_input(prompt)
+ except NameError:
+ return input(prompt)
def gen_password(length):
@@ -144,12 +147,7 @@ def create_key(hostname, keypath, certpath, country, state, location):
def create_conf(confpath, confdata, keypath):
# Don't overwrite existing bcfg2.conf file
if os.path.exists(confpath):
- # py3k compatibility
- try:
- result = raw_input("\nWarning: %s already exists. "
- "Overwrite? [y/N]: " % confpath)
- except NameError:
- result = input("\nWarning: %s already exists. "
+ result = get_input("\nWarning: %s already exists. "
"Overwrite? [y/N]: " % confpath)
if result not in ['Y', 'y']:
print("Leaving %s unchanged" % confpath)
@@ -211,13 +209,8 @@ class Init(Bcfg2.Server.Admin.Mode):
def _prompt_hostname(self):
"""Ask for the server hostname."""
- # py3k compatibility
- try:
- data = raw_input("What is the server's hostname [%s]: " %
- socket.getfqdn())
- except NameError:
- data = input("What is the server's hostname [%s]: " %
- socket.getfqdn())
+ data = get_input("What is the server's hostname [%s]: " %
+ socket.getfqdn())
if data != '':
self.shostname = data
else:
@@ -225,36 +218,21 @@ class Init(Bcfg2.Server.Admin.Mode):
def _prompt_config(self):
"""Ask for the configuration file path."""
- # py3k compatibility
- try:
- newconfig = raw_input("Store Bcfg2 configuration in [%s]: " %
- self.configfile)
- except NameError:
- newconfig = input("Store Bcfg2 configuration in [%s]: " %
- self.configfile)
+ newconfig = get_input("Store Bcfg2 configuration in [%s]: " %
+ self.configfile)
if newconfig != '':
- self.configfile = newconfig
+ self.configfile = os.path.abspath(newconfig)
def _prompt_repopath(self):
"""Ask for the repository path."""
while True:
- # py3k compatibility
- try:
- newrepo = raw_input("Location of Bcfg2 repository [%s]: " %
- self.repopath)
- except NameError:
- newrepo = input("Location of Bcfg2 repository [%s]: " %
- self.repopath)
+ newrepo = get_input("Location of Bcfg2 repository [%s]: " %
+ self.repopath)
if newrepo != '':
- self.repopath = newrepo
+ self.repopath = os.path.abspath(newrepo)
if os.path.isdir(self.repopath):
- # py3k compatibility
- try:
- response = raw_input("Directory %s exists. Overwrite? [y/N]:" \
- % self.repopath)
- except NameError:
- response = input("Directory %s exists. Overwrite? [y/N]:" \
- % self.repopath)
+ response = get_input("Directory %s exists. Overwrite? [y/N]:" \
+ % self.repopath)
if response.lower().strip() == 'y':
break
else:
@@ -270,13 +248,8 @@ class Init(Bcfg2.Server.Admin.Mode):
def _prompt_server(self):
"""Ask for the server name."""
- # py3k compatibility
- try:
- newserver = raw_input("Input the server location [%s]: " %
- self.server_uri)
- except NameError:
- newserver = input("Input the server location [%s]: " %
- self.server_uri)
+ newserver = get_input("Input the server location [%s]: " %
+ self.server_uri)
if newserver != '':
self.server_uri = newserver
@@ -288,32 +261,19 @@ class Init(Bcfg2.Server.Admin.Mode):
prompt += ': '
while True:
try:
- # py3k compatibility
- try:
- osidx = int(raw_input(prompt))
- except NameError:
- osidx = int(input(prompt))
+ osidx = int(get_input(prompt))
self.os_sel = os_list[osidx - 1][1]
break
except ValueError:
continue
def _prompt_plugins(self):
- # py3k compatibility
- try:
- default = raw_input("Use default plugins? (%s) [Y/n]: " %
- ''.join(default_plugins)).lower()
- except NameError:
- default = input("Use default plugins? (%s) [Y/n]: " %
+ default = get_input("Use default plugins? (%s) [Y/n]: " %
''.join(default_plugins)).lower()
if default != 'y' or default != '':
while True:
plugins_are_valid = True
- # py3k compatibility
- try:
- plug_str = raw_input("Specify plugins: ")
- except NameError:
- plug_str = input("Specify plugins: ")
+ plug_str = get_input("Specify plugins: ")
plugins = plug_str.split(',')
for plugin in plugins:
plugin = plugin.strip()
@@ -327,42 +287,26 @@ class Init(Bcfg2.Server.Admin.Mode):
"""Ask for the key details (country, state, and location)."""
print("The following questions affect SSL certificate generation.")
print("If no data is provided, the default values are used.")
- # py3k compatibility
- try:
- newcountry = raw_input("Country name (2 letter code) for certificate: ")
- except NameError:
- newcountry = input("Country name (2 letter code) for certificate: ")
+ newcountry = get_input("Country name (2 letter code) for certificate: ")
if newcountry != '':
if len(newcountry) == 2:
self.country = newcountry
else:
while len(newcountry) != 2:
- # py3k compatibility
- try:
- newcountry = raw_input("2 letter country code (eg. US): ")
- except NameError:
- newcountry = input("2 letter country code (eg. US): ")
+ newcountry = get_input("2 letter country code (eg. US): ")
if len(newcountry) == 2:
self.country = newcountry
break
else:
self.country = 'US'
- # py3k compatibility
- try:
- newstate = raw_input("State or Province Name (full name) for certificate: ")
- except NameError:
- newstate = input("State or Province Name (full name) for certificate: ")
+ newstate = get_input("State or Province Name (full name) for certificate: ")
if newstate != '':
self.state = newstate
else:
self.state = 'Illinois'
- # py3k compatibility
- try:
- newlocation = raw_input("Locality Name (eg, city) for certificate: ")
- except NameError:
- newlocation = input("Locality Name (eg, city) for certificate: ")
+ newlocation = get_input("Locality Name (eg, city) for certificate: ")
if newlocation != '':
self.location = newlocation
else:
@@ -389,26 +333,27 @@ class Init(Bcfg2.Server.Admin.Mode):
def init_repo(self):
"""Setup a new repo and create the content of the configuration file."""
- keypath = os.path.dirname(os.path.abspath(self.configfile))
+ keypath = os.path.dirname(self.configfile)
+ kpath = os.path.join(keypath, 'bcfg2.key')
+ cpath = os.path.join(keypath, 'bcfg2.crt')
+
confdata = config % (self.repopath,
- ','.join(self.opts['plugins']),
+ ','.join(self.plugins),
self.opts['sendmail'],
self.opts['proto'],
self.password,
- keypath, 'bcfg2.crt',
- keypath, 'bcfg2.key',
- keypath, 'bcfg2.crt',
+ cpath,
+ kpath,
+ cpath,
self.server_uri)
# Create the configuration file and SSL key
create_conf(self.configfile, confdata, keypath)
- kpath = keypath + '/bcfg2.key'
- cpath = keypath + '/bcfg2.crt'
create_key(self.shostname, kpath, cpath, self.country,
self.state, self.location)
# Create the repository
- path = "%s/%s" % (self.repopath, 'etc')
+ path = os.path.join(self.repopath, 'etc')
try:
os.makedirs(path)
self._init_plugins()
diff --git a/src/lib/Server/Admin/__init__.py b/src/lib/Server/Admin/__init__.py
index 41c485d6c..96d9703ba 100644
--- a/src/lib/Server/Admin/__init__.py
+++ b/src/lib/Server/Admin/__init__.py
@@ -61,7 +61,10 @@ class Mode(object):
def get_repo_path(self):
"""Return repository path"""
- return self.cfp.get('server', 'repository')
+ try:
+ return self.cfp.get('server', 'repository')
+ except ConfigParser.NoSectionError:
+ self.errExit("Unable to find server section in bcfg2.conf")
def load_stats(self, client):
stats = lxml.etree.parse("%s/etc/statistics.xml" %
diff --git a/src/lib/Server/Core.py b/src/lib/Server/Core.py
index 2d735133b..daa439db1 100644
--- a/src/lib/Server/Core.py
+++ b/src/lib/Server/Core.py
@@ -313,7 +313,7 @@ class Core(Component):
except:
logger.error("error in BindStructure", exc_info=1)
self.validate_goals(meta, config)
- logger.info("Generated config for %s in %.03fs" % \
+ logger.info("Generated config for %s in %.03f seconds" % \
(client, time.time() - start))
return config
diff --git a/src/lib/Server/Lint/Bundles.py b/src/lib/Server/Lint/Bundles.py
index 67ae14fbd..472915cfd 100644
--- a/src/lib/Server/Lint/Bundles.py
+++ b/src/lib/Server/Lint/Bundles.py
@@ -10,11 +10,9 @@ class Bundles(Bcfg2.Server.Lint.ServerPlugin):
self.missing_bundles()
for bundle in self.core.plugins['Bundler'].entries.values():
if self.HandlesFile(bundle.name):
- if (Bcfg2.Server.Plugins.Bundler.have_genshi and
- type(bundle) is
+ if (not Bcfg2.Server.Plugins.Bundler.have_genshi or
+ type(bundle) is not
Bcfg2.Server.Plugins.SGenshi.SGenshiTemplateFile):
- self.sgenshi_groups(bundle)
- else:
self.bundle_names(bundle)
def missing_bundles(self):
diff --git a/src/lib/Server/Lint/Comments.py b/src/lib/Server/Lint/Comments.py
index 1ea5f295e..19fae1b08 100644
--- a/src/lib/Server/Lint/Comments.py
+++ b/src/lib/Server/Lint/Comments.py
@@ -57,7 +57,7 @@ class Comments(Bcfg2.Server.Lint.ServerPlugin):
try:
xdata = lxml.etree.XML(bundle.data)
rtype = "bundler"
- except AttributeError:
+ except (lxml.etree.XMLSyntaxError, AttributeError):
xdata = lxml.etree.parse(bundle.template.filepath).getroot()
rtype = "sgenshi"
diff --git a/src/lib/Server/Lint/MergeFiles.py b/src/lib/Server/Lint/MergeFiles.py
index 27e7aa99a..52fea3d9b 100644
--- a/src/lib/Server/Lint/MergeFiles.py
+++ b/src/lib/Server/Lint/MergeFiles.py
@@ -1,7 +1,6 @@
import os
from copy import deepcopy
from difflib import SequenceMatcher
-import Bcfg2.Options
import Bcfg2.Server.Lint
class MergeFiles(Bcfg2.Server.Lint.ServerPlugin):
@@ -27,10 +26,10 @@ class MergeFiles(Bcfg2.Server.Lint.ServerPlugin):
def check_probes(self):
probes = self.core.plugins['Probes'].probes.entries
for mset in self.get_similar(probes):
- self.LintError("merge-cfg",
- "The following probes are similar: %s. "
- "Consider merging them into a single probe." %
- ", ".join([p for p in mset]))
+ self.LintError("merge-cfg",
+ "The following probes are similar: %s. "
+ "Consider merging them into a single probe." %
+ ", ".join([p for p in mset]))
def get_similar(self, entries):
if "threshold" in self.config:
diff --git a/src/lib/Server/Lint/RequiredAttrs.py b/src/lib/Server/Lint/RequiredAttrs.py
index 9f00a4e24..55206d2ba 100644
--- a/src/lib/Server/Lint/RequiredAttrs.py
+++ b/src/lib/Server/Lint/RequiredAttrs.py
@@ -1,7 +1,7 @@
import os.path
import lxml.etree
import Bcfg2.Server.Lint
-import Bcfg2.Server.Plugins.Packages
+from Bcfg2.Server.Plugins.Packages import Apt, Yum
class RequiredAttrs(Bcfg2.Server.Lint.ServerPlugin):
""" verify attributes for configuration entries (as defined in
@@ -10,41 +10,54 @@ class RequiredAttrs(Bcfg2.Server.Lint.ServerPlugin):
def __init__(self, *args, **kwargs):
Bcfg2.Server.Lint.ServerPlugin.__init__(self, *args, **kwargs)
self.required_attrs = {
- 'device': ['name', 'owner', 'group', 'dev_type'],
- 'directory': ['name', 'owner', 'group', 'perms'],
- 'file': ['name', 'owner', 'group', 'perms'],
- 'hardlink': ['name', 'to'],
- 'symlink': ['name', 'to'],
- 'ignore': ['name'],
- 'nonexistent': ['name'],
- 'permissions': ['name', 'owner', 'group', 'perms'],
- 'vcs': ['vcstype', 'revision', 'sourceurl']}
+ 'Path': {
+ 'device': ['name', 'owner', 'group', 'dev_type'],
+ 'directory': ['name', 'owner', 'group', 'perms'],
+ 'file': ['name', 'owner', 'group', 'perms', '__text__'],
+ 'hardlink': ['name', 'to'],
+ 'symlink': ['name', 'to'],
+ 'ignore': ['name'],
+ 'nonexistent': ['name'],
+ 'permissions': ['name', 'owner', 'group', 'perms'],
+ 'vcs': ['vcstype', 'revision', 'sourceurl']},
+ 'Service': {
+ 'chkconfig': ['name'],
+ 'deb': ['name'],
+ 'rc-update': ['name'],
+ 'smf': ['name', 'FMRI'],
+ 'upstart': ['name']},
+ 'Action': ['name', 'timing', 'when', 'status', 'command'],
+ 'Package': ['name']}
def Run(self):
- self.check_rules()
- self.check_bundles()
self.check_packages()
+ if "Defaults" in self.core.plugins:
+ self.logger.info("Defaults plugin enabled; skipping required "
+ "attribute checks")
+ else:
+ self.check_rules()
+ self.check_bundles()
def check_packages(self):
""" check package sources for Source entries with missing attrs """
if 'Packages' in self.core.plugins:
for source in self.core.plugins['Packages'].sources:
- if isinstance(source, Bcfg2.Server.Plugins.Packages.PulpSource):
- if not source.id:
+ if isinstance(source, Yum.YumSource):
+ if (not source.pulp_id and not source.url and
+ not source.rawurl):
self.LintError("required-attrs-missing",
- "The required attribute id is missing "
- "from a Pulp source: %s" %
- self.RenderXML(source.xsource))
- else:
- if not source.url and not source.rawurl:
- self.LintError("required-attrs-missing",
- "A %s source must have either a url or "
- "rawurl attribute: %s" %
+ "A %s source must have either a url, "
+ "rawurl, or pulp_id attribute: %s" %
(source.ptype,
self.RenderXML(source.xsource)))
+ elif not source.url and not source.rawurl:
+ self.LintError("required-attrs-missing",
+ "A %s source must have either a url or "
+ "rawurl attribute: %s" %
+ (source.ptype,
+ self.RenderXML(source.xsource)))
- if (not isinstance(source,
- Bcfg2.Server.Plugins.Packages.APTSource) and
+ if (not isinstance(source, Apt.AptSource) and
source.recommended):
self.LintError("extra-attrs",
"The recommended attribute is not "
@@ -67,25 +80,37 @@ class RequiredAttrs(Bcfg2.Server.Lint.ServerPlugin):
for bundle in self.core.plugins['Bundler'].entries.values():
try:
xdata = lxml.etree.XML(bundle.data)
- except AttributeError:
+ except (lxml.etree.XMLSyntaxError, AttributeError):
xdata = lxml.etree.parse(bundle.template.filepath).getroot()
- for path in xdata.xpath("//BoundPath"):
+ for path in xdata.xpath("//*[substring(name(), 1, 5) = 'Bound']"):
self.check_entry(path, bundle.name)
def check_entry(self, entry, filename):
""" generic entry check """
if self.HandlesFile(filename):
- pathname = entry.get('name')
- pathtype = entry.get('type')
- pathset = set(entry.attrib.keys())
- try:
- required_attrs = set(self.required_attrs[pathtype] + ['type'])
- except KeyError:
- self.LintError("unknown-path-type",
- "Unknown path type %s: %s" %
- (pathtype, self.RenderXML(entry)))
- return
+ name = entry.get('name')
+ tag = entry.tag
+ if tag.startswith("Bound"):
+ tag = tag[5:]
+ if tag not in self.required_attrs:
+ self.LintError("unknown-entry-tag",
+ "Unknown entry tag '%s': %s" %
+ (entry.tag, self.RenderXML(entry)))
+
+ if isinstance(self.required_attrs[tag], dict):
+ etype = entry.get('type')
+ if etype in self.required_attrs[tag]:
+ required_attrs = set(self.required_attrs[tag][etype] +
+ ['type'])
+ else:
+ self.LintError("unknown-entry-type",
+ "Unknown %s type %s: %s" %
+ (tag, etype, self.RenderXML(entry)))
+ return
+ else:
+ required_attrs = set(self.required_attrs[tag])
+ attrs = set(entry.attrib.keys())
if 'dev_type' in required_attrs:
dev_type = entry.get('dev_type')
@@ -93,17 +118,21 @@ class RequiredAttrs(Bcfg2.Server.Lint.ServerPlugin):
# check if major/minor are specified
required_attrs |= set(['major', 'minor'])
- if pathtype == 'file' and not entry.text:
- self.LintError("required-attrs-missing",
- "Text missing for %s %s in %s: %s" %
- (entry.tag, pathname, filename,
- self.RenderXML(entry)))
+ if '__text__' in required_attrs:
+ required_attrs.remove('__text__')
+ if (not entry.text and
+ not entry.get('empty', 'false').lower() == 'true'):
+ self.LintError("required-attrs-missing",
+ "Text missing for %s %s in %s: %s" %
+ (entry.tag, name, filename,
+ self.RenderXML(entry)))
- if not pathset.issuperset(required_attrs):
+ if not attrs.issuperset(required_attrs):
self.LintError("required-attrs-missing",
- "The required attributes %s are missing for %s %sin %s:\n%s" %
- (",".join([attr
- for attr in
- required_attrs.difference(pathset)]),
- entry.tag, pathname, filename,
+ "The following required attribute(s) are "
+ "missing for %s %s in %s: %s\n%s" %
+ (entry.tag, name, filename,
+ ", ".join([attr
+ for attr in
+ required_attrs.difference(attrs)]),
self.RenderXML(entry)))
diff --git a/src/lib/Server/Lint/Validate.py b/src/lib/Server/Lint/Validate.py
index ebf621c22..19fd61d25 100644
--- a/src/lib/Server/Lint/Validate.py
+++ b/src/lib/Server/Lint/Validate.py
@@ -5,7 +5,6 @@ import os
from subprocess import Popen, PIPE, STDOUT
import sys
-import Bcfg2.Options
import Bcfg2.Server.Lint
class Validate(Bcfg2.Server.Lint.ServerlessPlugin):
@@ -21,6 +20,7 @@ class Validate(Bcfg2.Server.Lint.ServerlessPlugin):
"%s/Pkgmgr/*.xml":"%s/pkglist.xsd",
"%s/Base/*.xml":"%s/base.xsd",
"%s/Rules/*.xml":"%s/rules.xsd",
+ "%s/Defaults/*.xml":"%s/defaults.xsd",
"%s/etc/report-configuration.xml":"%s/report-configuration.xsd",
"%s/Svcmgr/*.xml":"%s/services.xsd",
"%s/Deps/*.xml":"%s/deps.xsd",
@@ -45,21 +45,21 @@ class Validate(Bcfg2.Server.Lint.ServerlessPlugin):
if filelist:
# avoid loading schemas for empty file lists
+ schemafile = schemaname % schemadir
try:
- schema = lxml.etree.XMLSchema(lxml.etree.parse(schemaname %
- schemadir))
+ schema = lxml.etree.XMLSchema(lxml.etree.parse(schemafile))
except IOError:
e = sys.exc_info()[1]
- self.LintError("input-output-error", e.message)
+ self.LintError("input-output-error", str(e))
continue
- except:
+ except lxml.etree.XMLSchemaParseError:
+ e = sys.exc_info()[1]
self.LintError("schema-failed-to-parse",
- "Failed to process schema %s" %
- (schemaname % schemadir))
+ "Failed to process schema %s: %s" %
+ (schemafile, e))
continue
for filename in filelist:
- self.validate(filename, schemaname % schemadir,
- schema=schema)
+ self.validate(filename, schemafile, schema=schema)
self.check_properties()
diff --git a/src/lib/Server/Lint/__init__.py b/src/lib/Server/Lint/__init__.py
index f15c90557..f47059ac4 100644
--- a/src/lib/Server/Lint/__init__.py
+++ b/src/lib/Server/Lint/__init__.py
@@ -107,7 +107,7 @@ class ErrorHandler (object):
"duplicate-package":"error",
"multiple-default-groups":"error",
"required-infoxml-attrs-missing":"error",
- "unknown-path-type":"error",
+ "unknown-entry-type":"error",
"required-attrs-missing":"error",
"extra-attrs":"warning",
"schema-failed-to-parse":"warning",
diff --git a/src/lib/Server/Plugin.py b/src/lib/Server/Plugin.py
index bf55ad271..1a6399d48 100644
--- a/src/lib/Server/Plugin.py
+++ b/src/lib/Server/Plugin.py
@@ -365,7 +365,8 @@ class FileBacked(object):
self.data = BUILTIN_FILE_TYPE(self.name).read()
self.Index()
except IOError:
- logger.error("Failed to read file %s" % (self.name))
+ err = sys.exc_info()[1]
+ logger.error("Failed to read file %s: %s" % (self.name, err))
def Index(self):
"""Update local data structures based on current file state"""
@@ -518,11 +519,10 @@ class DirectoryBacked(object):
if ((event.filename[-1] == '~') or
(event.filename[:2] == '.#') or
(event.filename[-4:] == '.swp') or
- (event.filename in ['SCCS', '.svn', '4913'])):
+ (event.filename in ['SCCS', '.svn', '4913']) or
+ (not self.patterns.match(event.filename))):
return
if action in ['exists', 'created']:
- if not self.patterns.match(event.filename):
- return
self.add_entry(relpath, event)
elif action == 'changed':
if relpath in self.entries:
@@ -572,7 +572,38 @@ class SingleXMLFileBacked(XMLFileBacked):
"""This object is a coherent cache for an independent XML file."""
def __init__(self, filename, fam):
XMLFileBacked.__init__(self, filename)
- fam.AddMonitor(filename, self)
+ self.extras = []
+ self.fam = fam
+ self.fam.AddMonitor(filename, self)
+
+ def Index(self):
+ """Build local data structures."""
+ try:
+ self.xdata = lxml.etree.XML(self.data, base_url=self.name)
+ except lxml.etree.XMLSyntaxError:
+ err = sys.exc_info()[1]
+ logger.error("Failed to parse %s: %s" % (self.name, err))
+ raise Bcfg2.Server.Plugin.PluginInitError
+
+ included = [ent.get('href')
+ for ent in self.xdata.findall('./{http://www.w3.org/2001/XInclude}include')]
+ if included:
+ for name in included:
+ if name not in self.extras:
+ self.fam.AddMonitor(os.path.join(os.path.dirname(self.name),
+ name),
+ self)
+ self.extras.append(name)
+ try:
+ self.xdata.getroottree().xinclude()
+ except lxml.etree.XIncludeError:
+ err = sys.exc_info()[1]
+ logger.error("XInclude failed on %s: %s" % (self.name, err))
+
+
+ self.entries = self.xdata.getchildren()
+ if self.__identifier__ is not None:
+ self.label = self.xdata.attrib[self.__identifier__]
class StructFile(XMLFileBacked):
@@ -789,10 +820,10 @@ class PrioDir(Plugin, Generator, XMLDirectoryBacked):
def get_attrs(self, entry, metadata):
""" get a list of attributes to add to the entry during the bind """
- if False in [src.Cache(metadata)
- for src in list(self.entries.values())]:
- self.logger.error("Called before data loaded")
- raise PluginExecutionError
+ for src in list(self.entries.values()):
+ if src.Cache(metadata) == False:
+ self.logger.error("Called before data loaded")
+ raise PluginExecutionError
matching = [src for src in list(self.entries.values())
if (src.cache and
entry.tag in src.cache[1] and
diff --git a/src/lib/Server/Plugins/Cfg.py b/src/lib/Server/Plugins/Cfg.py
index 2c0c69926..beea2c747 100644
--- a/src/lib/Server/Plugins/Cfg.py
+++ b/src/lib/Server/Plugins/Cfg.py
@@ -196,7 +196,7 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet):
if specific.all:
return bfname
elif specific.group:
- return "%s.G%d_%s" % (bfname, specific.prio, specific.group)
+ return "%s.G%02d_%s" % (bfname, specific.prio, specific.group)
elif specific.hostname:
return "%s.H_%s" % (bfname, specific.hostname)
diff --git a/src/lib/Server/Plugins/Defaults.py b/src/lib/Server/Plugins/Defaults.py
new file mode 100644
index 000000000..23104946e
--- /dev/null
+++ b/src/lib/Server/Plugins/Defaults.py
@@ -0,0 +1,51 @@
+"""This generator provides rule-based entry mappings."""
+__revision__ = '$Revision$'
+
+import re
+import Bcfg2.Server.Plugin
+import Bcfg2.Server.Plugins.Rules
+
+class Defaults(Bcfg2.Server.Plugins.Rules.Rules,
+ Bcfg2.Server.Plugin.StructureValidator):
+ """Set default attributes on bound entries"""
+ name = 'Defaults'
+ __version__ = '$Id$'
+ __author__ = 'bcfg-dev@mcs.anl.gov'
+
+ # Rules is a Generator that happens to implement all of the
+ # functionality we want, so we overload it, but Defaults should
+ # _not_ handle any entries; it does its stuff in the structure
+ # validation phase. so we overload Handle(s)Entry and HandleEvent
+ # to ensure that Defaults handles no entries, even though it's a
+ # Generator.
+
+ def HandlesEntry(self, entry, metadata):
+ return False
+
+ def HandleEntry(self, entry, metadata):
+ raise PluginExecutionError
+
+ def HandleEvent(self, event):
+ Bcfg2.Server.Plugin.XMLDirectoryBacked.HandleEvent(self, event)
+
+ def validate_structures(self, metadata, structures):
+ """ Apply defaults """
+ for struct in structures:
+ for entry in struct.iter():
+ if entry.tag.startswith("Bound"):
+ is_bound = True
+ entry.tag = entry.tag[5:]
+ else:
+ is_bound = False
+ try:
+ try:
+ self.BindEntry(entry, metadata)
+ except Bcfg2.Server.Plugin.PluginExecutionError:
+ # either no matching defaults (which is okay),
+ # or multiple matching defaults (which is not
+ # okay, but is logged). either way, we don't
+ # care about the error.
+ pass
+ finally:
+ if is_bound:
+ entry.tag = "Bound" + entry.tag
diff --git a/src/lib/Server/Plugins/GroupPatterns.py b/src/lib/Server/Plugins/GroupPatterns.py
index 7faead39a..76a628931 100644
--- a/src/lib/Server/Plugins/GroupPatterns.py
+++ b/src/lib/Server/Plugins/GroupPatterns.py
@@ -70,18 +70,16 @@ class PatternMap(object):
class PatternFile(Bcfg2.Server.Plugin.SingleXMLFileBacked):
+ __identifier__ = None
+
def __init__(self, filename, fam):
Bcfg2.Server.Plugin.SingleXMLFileBacked.__init__(self, filename, fam)
self.patterns = []
def Index(self):
+ Bcfg2.Server.Plugin.SingleXMLFileBacked.Index(self)
self.patterns = []
- try:
- parsed = lxml.etree.XML(self.data)
- except:
- Bcfg2.Server.Plugin.logger.error("Failed to read file %s" % self.name)
- return
- for entry in parsed.findall('GroupPattern'):
+ for entry in self.xdata.xpath('//GroupPattern'):
try:
groups = [g.text for g in entry.findall('Group')]
for pat_ent in entry.findall('NamePattern'):
@@ -91,9 +89,8 @@ class PatternFile(Bcfg2.Server.Plugin.SingleXMLFileBacked):
rng = range_ent.text
self.patterns.append(PatternMap(None, rng, groups))
except:
- Bcfg2.Server.Plugin.logger.error(\
- "GroupPatterns: Failed to initialize pattern %s" % \
- (entry.get('pattern')))
+ self.logger.error("GroupPatterns: Failed to initialize pattern "
+ "%s" % entry.get('pattern'))
def process_patterns(self, hostname):
ret = []
@@ -103,9 +100,9 @@ class PatternFile(Bcfg2.Server.Plugin.SingleXMLFileBacked):
if gn is not None:
ret.extend(gn)
except:
- Bcfg2.Server.Plugin.logger.error(\
- "GroupPatterns: Failed to process pattern %s for %s" % \
- (pattern.pattern, hostname), exc_info=1)
+ self.logger.error("GroupPatterns: Failed to process pattern %s "
+ "for %s" % (pattern.pattern, hostname),
+ exc_info=1)
return ret
diff --git a/src/lib/Server/Plugins/Packages.py b/src/lib/Server/Plugins/Packages.py
deleted file mode 100644
index 155b78581..000000000
--- a/src/lib/Server/Plugins/Packages.py
+++ /dev/null
@@ -1,1320 +0,0 @@
-import os
-import re
-import sys
-import copy
-import gzip
-import glob
-import base64
-import logging
-import tarfile
-import lxml.etree
-
-# Compatibility imports
-from Bcfg2.Bcfg2Py3k import cPickle
-from Bcfg2.Bcfg2Py3k import HTTPBasicAuthHandler
-from Bcfg2.Bcfg2Py3k import HTTPPasswordMgrWithDefaultRealm
-from Bcfg2.Bcfg2Py3k import HTTPError
-from Bcfg2.Bcfg2Py3k import install_opener
-from Bcfg2.Bcfg2Py3k import build_opener
-from Bcfg2.Bcfg2Py3k import urlopen
-from Bcfg2.Bcfg2Py3k import ConfigParser
-
-# py3k compatibility
-if sys.hexversion >= 0x03000000:
- from io import FileIO as BUILTIN_FILE_TYPE
-else:
- BUILTIN_FILE_TYPE = file
-
-try:
- import yum.misc
- has_yum = True
-except ImportError:
- has_yum = False
-
-try:
- import pulp.client.server
- import pulp.client.config
- import pulp.client.api.repository
- import pulp.client.api.consumer
- has_pulp = True
-except ImportError:
- has_pulp = False
-
-try:
- from hashlib import md5
-except ImportError:
- from md5 import md5
-
-import Bcfg2.Logger
-import Bcfg2.Server.Plugin
-
-# build sources.list?
-# caching for yum
-
-class NoData(Exception):
- pass
-
-
-class SomeData(Exception):
- pass
-
-logger = logging.getLogger('Packages')
-
-
-def source_from_xml(xsource, cachepath):
- """ create a *Source object from its XML representation in
- sources.xml """
- stype = xsource.get("type")
- if stype is None:
- logger.error("No type specified for source, skipping")
- return None
-
- try:
- cls = globals()["%sSource" % stype.upper()]
- except KeyError:
- logger.error("Unknown source type %s")
- return None
-
- return cls(cachepath, xsource)
-
-
-def _fetch_url(url):
- if '@' in url:
- mobj = re.match('(\w+://)([^:]+):([^@]+)@(.*)$', url)
- if not mobj:
- raise ValueError
- user = mobj.group(2)
- passwd = mobj.group(3)
- url = mobj.group(1) + mobj.group(4)
- auth = HTTPBasicAuthHandler(HTTPPasswordMgrWithDefaultRealm())
- auth.add_password(None, url, user, passwd)
- install_opener(build_opener(auth))
- return urlopen(url).read()
-
-
-class Source(object):
- basegroups = []
-
- def __init__(self, basepath, xsource):
- self.basepath = basepath
- self.xsource = xsource
-
- try:
- self.version = xsource.find('Version').text
- except AttributeError:
- pass
-
- for key, tag in [('components', 'Component'), ('arches', 'Arch'),
- ('blacklist', 'Blacklist'),
- ('whitelist', 'Whitelist')]:
- self.__dict__[key] = [item.text for item in xsource.findall(tag)]
-
- self.gpgkeys = [el.text for el in xsource.findall("GPGKey")]
-
- self.recommended = xsource.get('recommended', 'false').lower() == 'true'
- self.id = xsource.get('id')
-
- self.rawurl = xsource.get('rawurl', '')
- if self.rawurl and not self.rawurl.endswith("/"):
- self.rawurl += "/"
- self.url = xsource.get('url', '')
- if self.url and not self.url.endswith("/"):
- self.url += "/"
- self.version = xsource.get('version', '')
-
- # build the set of conditions to see if this source applies to
- # a given set of metadata
- self.conditions = []
- self.groups = [] # provided for some limited backwards compat
- for el in xsource.iterancestors():
- if el.tag == "Group":
- if el.get("negate", "false").lower() == "true":
- self.conditions.append(lambda m, el=el:
- el.get("name") not in m.groups)
- else:
- self.groups.append(el.get("name"))
- self.conditions.append(lambda m, el=el:
- el.get("name") in m.groups)
- elif el.tag == "Client":
- if el.get("negate", "false").lower() == "true":
- self.conditions.append(lambda m, el=el:
- el.get("name") != m.hostname)
- else:
- self.conditions.append(lambda m, el=el:
- el.get("name") == m.hostname)
-
- self.deps = dict()
- self.provides = dict()
-
- self.cachefile = \
- os.path.join(self.basepath,
- "cache-%s" %
- md5(cPickle.dumps([self.version, self.components,
- self.url, self.rawurl,
- self.arches])).hexdigest())
- self.url_map = []
-
- def load_state(self):
- pass
-
- def setup_data(self, force_update=False):
- should_read = True
- should_download = False
- if os.path.exists(self.cachefile):
- try:
- self.load_state()
- should_read = False
- except:
- logger.error("Cachefile %s load failed; "
- "falling back to file read" % self.cachefile)
- if should_read:
- try:
- self.read_files()
- except:
- logger.error("Packages: File read failed; "
- "falling back to file download")
- should_download = True
-
- if should_download or force_update:
- try:
- self.update()
- self.read_files()
- except:
- logger.error("Failed to update source", exc_info=1)
-
- def get_urls(self):
- return []
- urls = property(get_urls)
-
- def get_files(self):
- return [self.escape_url(url) for url in self.urls]
- files = property(get_files)
-
- def get_vpkgs(self, meta):
- agroups = ['global'] + [a for a in self.arches if a in meta.groups]
- vdict = dict()
- for agrp in agroups:
- for key, value in list(self.provides[agrp].items()):
- if key not in vdict:
- vdict[key] = set(value)
- else:
- vdict[key].update(value)
- return vdict
-
- def escape_url(self, url):
- return os.path.join(self.basepath, url.replace('/', '@'))
-
- def file_init(self):
- pass
-
- def read_files(self):
- pass
-
- def update(self):
- for url in self.urls:
- logger.info("Packages: Updating %s" % url)
- fname = self.escape_url(url)
- try:
- data = _fetch_url(url)
- except ValueError:
- logger.error("Packages: Bad url string %s" % url)
- continue
- except HTTPError:
- err = sys.exc_info()[1]
- logger.error("Packages: Failed to fetch url %s. code=%s" %
- (url, err.code))
- continue
- BUILTIN_FILE_TYPE(fname, 'w').write(data)
-
- def applies(self, metadata):
- # check base groups
- if len([g for g in self.basegroups if g in metadata.groups]) == 0:
- return False
-
- # check Group/Client tags from sources.xml
- for condition in self.conditions:
- if not condition(metadata):
- return False
-
- return True
-
- def get_arches(self, metadata):
- return ['global'] + [a for a in self.arches if a in metadata.groups]
-
- def get_deps(self, metadata, pkgname):
- for arch in self.get_arches(metadata):
- if pkgname in self.deps[arch]:
- return self.deps[arch][pkgname]
- raise NoData
-
- def get_provides(self, metadata, required):
- for arch in self.get_arches(metadata):
- if required in self.provides[arch]:
- return self.provides[arch][required]
- raise NoData
-
- def is_package(self, metadata, _):
- return False
-
-
-class YUMSource(Source):
- xp = '{http://linux.duke.edu/metadata/common}'
- rp = '{http://linux.duke.edu/metadata/rpm}'
- rpo = '{http://linux.duke.edu/metadata/repo}'
- fl = '{http://linux.duke.edu/metadata/filelists}'
- basegroups = ['yum', 'redhat', 'centos', 'fedora']
- ptype = 'yum'
-
- def __init__(self, basepath, xsource):
- Source.__init__(self, basepath, xsource)
- if not self.rawurl:
- self.baseurl = self.url + "%(version)s/%(component)s/%(arch)s/"
- else:
- self.baseurl = self.rawurl
- self.packages = dict()
- self.deps = dict([('global', dict())])
- self.provides = dict([('global', dict())])
- self.filemap = dict([(x, dict()) for x in ['global'] + self.arches])
- self.needed_paths = set()
- self.file_to_arch = dict()
-
- def save_state(self):
- cache = BUILTIN_FILE_TYPE(self.cachefile, 'wb')
- cPickle.dump((self.packages, self.deps, self.provides,
- self.filemap, self.url_map), cache, 2)
- cache.close()
-
- def load_state(self):
- data = BUILTIN_FILE_TYPE(self.cachefile)
- (self.packages, self.deps, self.provides,
- self.filemap, self.url_map) = cPickle.load(data)
-
- def get_urls(self):
- surls = list()
- self.url_map = []
- for arch in self.arches:
- if self.url:
- usettings = [{'version':self.version, 'component':comp,
- 'arch':arch}
- for comp in self.components]
- else: # rawurl given
- usettings = [{'version':self.version, 'component':None,
- 'arch':arch}]
-
- for setting in usettings:
- setting['url'] = self.baseurl % setting
- self.url_map.append(copy.deepcopy(setting))
- surls.append((arch, [setting['url'] for setting in usettings]))
- urls = []
- for (sarch, surl_list) in surls:
- for surl in surl_list:
- urls.extend(self._get_urls_from_repodata(surl, sarch))
- return urls
- urls = property(get_urls)
-
- def _get_urls_from_repodata(self, url, arch):
- rmdurl = '%srepodata/repomd.xml' % url
- try:
- repomd = _fetch_url(rmdurl)
- xdata = lxml.etree.XML(repomd)
- except ValueError:
- logger.error("Packages: Bad url string %s" % rmdurl)
- return []
- except HTTPError:
- err = sys.exc_info()[1]
- logger.error("Packages: Failed to fetch url %s. code=%s" %
- (rmdurl, err.code))
- return []
- except lxml.etree.XMLSyntaxError:
- err = sys.exc_info()[1]
- logger.error("Packages: Failed to process metadata at %s: %s" %
- (rmdurl, err))
- return []
-
- urls = []
- for elt in xdata.findall(self.rpo + 'data'):
- if elt.get('type') in ['filelists', 'primary']:
- floc = elt.find(self.rpo + 'location')
- fullurl = url + floc.get('href')
- urls.append(fullurl)
- self.file_to_arch[self.escape_url(fullurl)] = arch
- return urls
-
- def read_files(self):
- # we have to read primary.xml first, and filelists.xml afterwards;
- primaries = list()
- filelists = list()
- for fname in self.files:
- if fname.endswith('primary.xml.gz'):
- primaries.append(fname)
- elif fname.endswith('filelists.xml.gz'):
- filelists.append(fname)
-
- for fname in primaries:
- farch = self.file_to_arch[fname]
- fdata = lxml.etree.parse(fname).getroot()
- self.parse_primary(fdata, farch)
- for fname in filelists:
- farch = self.file_to_arch[fname]
- fdata = lxml.etree.parse(fname).getroot()
- self.parse_filelist(fdata, farch)
-
- # merge data
- sdata = list(self.packages.values())
- try:
- self.packages['global'] = copy.deepcopy(sdata.pop())
- except IndexError:
- logger.error("No packages in repo")
- while sdata:
- self.packages['global'] = \
- self.packages['global'].intersection(sdata.pop())
-
- for key in self.packages:
- if key == 'global':
- continue
- self.packages[key] = \
- self.packages[key].difference(self.packages['global'])
- self.save_state()
-
- def parse_filelist(self, data, arch):
- if arch not in self.filemap:
- self.filemap[arch] = dict()
- for pkg in data.findall(self.fl + 'package'):
- for fentry in pkg.findall(self.fl + 'file'):
- if fentry.text in self.needed_paths:
- if fentry.text in self.filemap[arch]:
- self.filemap[arch][fentry.text].add(pkg.get('name'))
- else:
- self.filemap[arch][fentry.text] = set([pkg.get('name')])
-
- def parse_primary(self, data, arch):
- if arch not in self.packages:
- self.packages[arch] = set()
- if arch not in self.deps:
- self.deps[arch] = dict()
- if arch not in self.provides:
- self.provides[arch] = dict()
- for pkg in data.getchildren():
- if not pkg.tag.endswith('package'):
- continue
- pkgname = pkg.find(self.xp + 'name').text
- self.packages[arch].add(pkgname)
-
- pdata = pkg.find(self.xp + 'format')
- pre = pdata.find(self.rp + 'requires')
- self.deps[arch][pkgname] = set()
- for entry in pre.getchildren():
- self.deps[arch][pkgname].add(entry.get('name'))
- if entry.get('name').startswith('/'):
- self.needed_paths.add(entry.get('name'))
- pro = pdata.find(self.rp + 'provides')
- if pro != None:
- for entry in pro.getchildren():
- prov = entry.get('name')
- if prov not in self.provides[arch]:
- self.provides[arch][prov] = list()
- self.provides[arch][prov].append(pkgname)
-
- def is_package(self, metadata, item):
- arch = [a for a in self.arches if a in metadata.groups]
- if not arch:
- return False
- return ((item in self.packages['global'] or
- item in self.packages[arch[0]]) and
- item not in self.blacklist and
- (len(self.whitelist) == 0 or item in self.whitelist))
-
- def get_vpkgs(self, metadata):
- rv = Source.get_vpkgs(self, metadata)
- for arch, fmdata in list(self.filemap.items()):
- if arch not in metadata.groups and arch != 'global':
- continue
- for filename, pkgs in list(fmdata.items()):
- rv[filename] = pkgs
- return rv
-
- def filter_unknown(self, unknown):
- filtered = set([u for u in unknown if u.startswith('rpmlib')])
- unknown.difference_update(filtered)
-
-
-class PulpSource(Source):
- basegroups = ['yum', 'redhat', 'centos', 'fedora']
- ptype = 'yum'
-
- def __init__(self, basepath, xsource):
- Source.__init__(self, basepath, xsource)
- if not has_pulp:
- logger.error("Cannot create pulp source: pulp libraries not found")
- raise Bcfg2.Server.Plugin.PluginInitError
-
- self._config = pulp.client.config.Config()
-
- self._repoapi = pulp.client.api.repository.RepositoryAPI()
- self._repo = self._repoapi.repository(self.id)
- if self._repo is None:
- logger.error("Repo id %s not found")
- else:
- self.baseurl = "%s/%s" % (self._config.cds.baseurl,
- self._repo['relative_path'])
-
- self.gpgkeys = ["%s/%s" % (self._config.cds.keyurl, key)
- for key in self._repoapi.listkeys(self.id)]
-
- self.url_map = [{'version': self.version, 'component': None,
- 'arch': self.arches[0], 'url': self.baseurl}]
-
- def save_state(self):
- cache = BUILTIN_FILE_TYPE(self.cachefile, 'wb')
- cPickle.dump((self.packages, self.deps, self.provides, self._config,
- self.filemap, self.url_map, self._repoapi, self._repo),
- cache, 2)
- cache.close()
-
- def load_state(self):
- cache = BUILTIN_FILE_TYPE(self.cachefile)
- (self.packages, self.deps, self.provides, self._config, self.filemap,
- self.url_map, self._repoapi, self._repo) = cPickle.load(cache)
- cache.close()
-
- def read_files(self):
- """ ignore the yum files; we can get this information directly
- from pulp """
- for pkg in self._repoapi.packages(self.id):
- try:
- self.packages[pkg['arch']].append(pkg['name'])
- except KeyError:
- self.packages[pkg['arch']] = [pkg['name']]
- self.save_state()
-
-
-class APTSource(Source):
- basegroups = ['apt', 'debian', 'ubuntu', 'nexenta']
- ptype = 'deb'
-
- def __init__(self, basepath, xsource):
- Source.__init__(self, basepath, xsource)
- self.pkgnames = set()
-
- self.url_map = [{'rawurl': self.rawurl, 'url': self.url,
- 'version': self.version,
- 'components': self.components, 'arches': self.arches}]
-
- def save_state(self):
- cache = BUILTIN_FILE_TYPE(self.cachefile, 'wb')
- cPickle.dump((self.pkgnames, self.deps, self.provides),
- cache, 2)
- cache.close()
-
- def load_state(self):
- data = BUILTIN_FILE_TYPE(self.cachefile)
- self.pkgnames, self.deps, self.provides = cPickle.load(data)
-
- def filter_unknown(self, unknown):
- filtered = set([u for u in unknown if u.startswith('choice')])
- unknown.difference_update(filtered)
-
- def get_urls(self):
- if not self.rawurl:
- rv = []
- for part in self.components:
- for arch in self.arches:
- rv.append("%sdists/%s/%s/binary-%s/Packages.gz" %
- (self.url, self.version, part, arch))
- return rv
- else:
- return ["%sPackages.gz" % self.rawurl]
- urls = property(get_urls)
-
- def read_files(self):
- bdeps = dict()
- bprov = dict()
- if self.recommended:
- depfnames = ['Depends', 'Pre-Depends', 'Recommends']
- else:
- depfnames = ['Depends', 'Pre-Depends']
- for fname in self.files:
- if not self.rawurl:
- barch = [x
- for x in fname.split('@')
- if x.startswith('binary-')][0][7:]
- else:
- # RawURL entries assume that they only have one <Arch></Arch>
- # element and that it is the architecture of the source.
- barch = self.arches[0]
- if barch not in bdeps:
- bdeps[barch] = dict()
- bprov[barch] = dict()
- try:
- reader = gzip.GzipFile(fname)
- except:
- print("Failed to read file %s" % fname)
- raise
- for line in reader.readlines():
- words = str(line.strip()).split(':', 1)
- if words[0] == 'Package':
- pkgname = words[1].strip().rstrip()
- self.pkgnames.add(pkgname)
- bdeps[barch][pkgname] = []
- elif words[0] in depfnames:
- vindex = 0
- for dep in words[1].split(','):
- if '|' in dep:
- cdeps = [re.sub('\s+', '',
- re.sub('\(.*\)', '', cdep))
- for cdep in dep.split('|')]
- dyn_dname = "choice-%s-%s-%s" % (pkgname,
- barch,
- vindex)
- vindex += 1
- bdeps[barch][pkgname].append(dyn_dname)
- bprov[barch][dyn_dname] = set(cdeps)
- else:
- raw_dep = re.sub('\(.*\)', '', dep)
- raw_dep = raw_dep.rstrip().strip()
- bdeps[barch][pkgname].append(raw_dep)
- elif words[0] == 'Provides':
- for pkg in words[1].split(','):
- dname = pkg.rstrip().strip()
- if dname not in bprov[barch]:
- bprov[barch][dname] = set()
- bprov[barch][dname].add(pkgname)
-
- self.deps['global'] = dict()
- self.provides['global'] = dict()
- for barch in bdeps:
- self.deps[barch] = dict()
- self.provides[barch] = dict()
- for pkgname in self.pkgnames:
- pset = set()
- for barch in bdeps:
- if pkgname not in bdeps[barch]:
- bdeps[barch][pkgname] = []
- pset.add(tuple(bdeps[barch][pkgname]))
- if len(pset) == 1:
- self.deps['global'][pkgname] = pset.pop()
- else:
- for barch in bdeps:
- self.deps[barch][pkgname] = bdeps[barch][pkgname]
- provided = set()
- for bprovided in list(bprov.values()):
- provided.update(set(bprovided))
- for prov in provided:
- prset = set()
- for barch in bprov:
- if prov not in bprov[barch]:
- continue
- prset.add(tuple(bprov[barch].get(prov, ())))
- if len(prset) == 1:
- self.provides['global'][prov] = prset.pop()
- else:
- for barch in bprov:
- self.provides[barch][prov] = bprov[barch].get(prov, ())
- self.save_state()
-
- def is_package(self, _, pkg):
- return (pkg in self.pkgnames and
- pkg not in self.blacklist and
- (len(self.whitelist) == 0 or pkg in self.whitelist))
-
-
-class PACSource(Source):
- basegroups = ['arch', 'parabola']
- ptype = 'pacman'
-
- def __init__(self, basepath, xsource):
- Source.__init__(self, basepath, xsource)
- self.pkgnames = set()
-
- self.url_map = [{'rawurl': self.rawurl, 'url': self.url,
- 'version': self.version,
- 'components': self.components, 'arches': self.arches}]
-
- def save_state(self):
- cache = BUILTIN_FILE_TYPE(self.cachefile, 'wb')
- cPickle.dump((self.pkgnames, self.deps, self.provides),
- cache, 2)
- cache.close()
-
- def load_state(self):
- data = BUILTIN_FILE_TYPE(self.cachefile)
- self.pkgnames, self.deps, self.provides = cPickle.load(data)
-
- def filter_unknown(self, unknown):
- filtered = set([u for u in unknown if u.startswith('choice')])
- unknown.difference_update(filtered)
-
- def get_urls(self):
- if not self.rawurl:
- rv = []
- for part in self.components:
- for arch in self.arches:
- rv.append("%s%s/os/%s/%s.db.tar.gz" %
- (self.url, part, arch, part))
- return rv
- else:
- raise Exception("PACSource : RAWUrl not supported (yet)")
- urls = property(get_urls)
-
- def read_files(self):
- bdeps = dict()
- bprov = dict()
-
- if self.recommended:
- depfnames = ['Depends', 'Pre-Depends', 'Recommends']
- else:
- depfnames = ['Depends', 'Pre-Depends']
-
- for fname in self.files:
- if not self.rawurl:
- barch = [x for x in fname.split('@') if x in self.arches][0]
- else:
- # RawURL entries assume that they only have one <Arch></Arch>
- # element and that it is the architecture of the source.
- barch = self.arches[0]
-
- if barch not in bdeps:
- bdeps[barch] = dict()
- bprov[barch] = dict()
- try:
- print("try to read : " + fname)
- tar = tarfile.open(fname, "r")
- reader = gzip.GzipFile(fname)
- except:
- print("Failed to read file %s" % fname)
- raise
-
- for tarinfo in tar:
- if tarinfo.isdir():
- self.pkgnames.add(tarinfo.name.rsplit("-", 2)[0])
- print("added : " + tarinfo.name.rsplit("-", 2)[0])
- tar.close()
-
- self.deps['global'] = dict()
- self.provides['global'] = dict()
- for barch in bdeps:
- self.deps[barch] = dict()
- self.provides[barch] = dict()
- for pkgname in self.pkgnames:
- pset = set()
- for barch in bdeps:
- if pkgname not in bdeps[barch]:
- bdeps[barch][pkgname] = []
- pset.add(tuple(bdeps[barch][pkgname]))
- if len(pset) == 1:
- self.deps['global'][pkgname] = pset.pop()
- else:
- for barch in bdeps:
- self.deps[barch][pkgname] = bdeps[barch][pkgname]
- provided = set()
- for bprovided in list(bprov.values()):
- provided.update(set(bprovided))
- for prov in provided:
- prset = set()
- for barch in bprov:
- if prov not in bprov[barch]:
- continue
- prset.add(tuple(bprov[barch].get(prov, ())))
- if len(prset) == 1:
- self.provides['global'][prov] = prset.pop()
- else:
- for barch in bprov:
- self.provides[barch][prov] = bprov[barch].get(prov, ())
- self.save_state()
-
- def is_package(self, _, pkg):
- return (pkg in self.pkgnames and
- pkg not in self.blacklist and
- (len(self.whitelist) == 0 or pkg in self.whitelist))
-
-
-class PackagesSources(Bcfg2.Server.Plugin.SingleXMLFileBacked,
- Bcfg2.Server.Plugin.StructFile):
- def __init__(self, filename, cachepath, fam, packages):
- Bcfg2.Server.Plugin.SingleXMLFileBacked.__init__(self, filename, fam)
- Bcfg2.Server.Plugin.StructFile.__init__(self, filename)
- self.cachepath = cachepath
- if not os.path.exists(self.cachepath):
- # create cache directory if needed
- os.makedirs(self.cachepath)
- self.extras = []
- self.fam = fam
- self.pkg_obj = packages
-
- def Index(self):
- try:
- self.xdata = lxml.etree.XML(self.data, base_url=self.name)
- except lxml.etree.XMLSyntaxError:
- err = sys.exc_info()[1]
- logger.error("Packages: Error processing sources: %s" % err)
- raise Bcfg2.Server.Plugin.PluginInitError
-
- included = [ent.get('href')
- for ent in self.xdata.findall('./{http://www.w3.org/2001/XInclude}include')]
- if included:
- for name in included:
- if name not in self.extras:
- self.add_monitor(name)
- try:
- self.xdata.getroottree().xinclude()
- except lxml.etree.XIncludeError:
- err = sys.exc_info()[1]
- logger.error("Packages: Error processing sources: %s" % err)
-
- if self.__identifier__ is not None:
- self.label = self.xdata.attrib[self.__identifier__]
-
- self.entries = []
- for xsource in self.xdata.findall('.//Source'):
- source = source_from_xml(xsource, self.cachepath)
- if source is not None:
- self.entries.append(source)
-
- self.pkg_obj.Reload()
-
- def add_monitor(self, fname):
- """Add a fam monitor for an included file"""
- self.fam.AddMonitor(os.path.join(os.path.dirname(self.name), fname),
- self)
- self.extras.append(fname)
-
-
-class PackagesConfig(Bcfg2.Server.Plugin.FileBacked,
- ConfigParser.SafeConfigParser):
- def __init__(self, filename, fam):
- Bcfg2.Server.Plugin.FileBacked.__init__(self, filename)
- ConfigParser.SafeConfigParser.__init__(self)
- # packages.conf isn't strictly necessary, so only set a
- # monitor if it exists. if it gets added, that will require a
- # server restart
- if os.path.exists(filename):
- fam.AddMonitor(filename, self)
-
- def Index(self):
- """ Build local data structures """
- for section in self.sections():
- self.remove_section(section)
- self.read(self.name)
-
-
-class Packages(Bcfg2.Server.Plugin.Plugin,
- Bcfg2.Server.Plugin.StructureValidator,
- Bcfg2.Server.Plugin.Generator,
- Bcfg2.Server.Plugin.Connector):
- name = 'Packages'
- conflicts = ['Pkgmgr']
- experimental = True
- __rmi__ = Bcfg2.Server.Plugin.Plugin.__rmi__ + ['Refresh', 'Reload']
-
- def __init__(self, core, datastore):
- Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
- Bcfg2.Server.Plugin.StructureValidator.__init__(self)
- Bcfg2.Server.Plugin.Generator.__init__(self)
- Bcfg2.Server.Plugin.Connector.__init__(self)
- Bcfg2.Server.Plugin.Probing.__init__(self)
-
- self.sentinels = set()
- self.virt_pkgs = dict()
- self.ptypes = dict()
- self.cachepath = os.path.join(self.data, 'cache')
- self.keypath = os.path.join(self.data, 'keys')
- if not os.path.exists(self.keypath):
- # create key directory if needed
- os.makedirs(self.keypath)
-
- # set up config files
- self.config = PackagesConfig(os.path.join(self.data, "packages.conf"),
- core.fam)
- self.sources = PackagesSources(os.path.join(self.data, "sources.xml"),
- self.cachepath, core.fam, self)
-
- @property
- def disableResolver(self):
- try:
- return self.config.get("global", "resolver").lower() == "disabled"
- except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
- return False
-
- @property
- def disableMetaData(self):
- try:
- return self.config.get("global", "metadata").lower() == "disabled"
- except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
- return False
-
- def create_apt_conf(self, entry, metadata):
- """ create apt config for the specified host """
- raise NotImplementedError
-
- def create_yum_conf(self, entry, metadata):
- """ create yum config for the specified host """
- yum_attrib = {'encoding': 'ascii',
- 'owner': 'root',
- 'group': 'root',
- 'type': 'file',
- 'perms': '0644'}
-
- stanzas = []
- reponame_re = re.compile(r'.*/(?:RPMS\.)?([^/]+)')
- for source in self.get_matching_sources(metadata):
- for url_map in source.url_map:
- if url_map['arch'] in metadata.groups:
- # try to find a sensible name for the repo
- name = None
- if source.id:
- reponame = source.id
- else:
- match = reponame_re.search(url_map['url'])
- if url_map['component']:
- name = url_map['component']
- elif match:
- name = match.group(1)
- else:
- # couldn't figure out the name from the
- # source ID, URL or URL map (which
- # probably means its a screwy URL), so we
- # just generate a random one
- name = base64.b64encode(os.urandom(16))[:-2]
- reponame = "%s-%s" % (source.groups[0], name)
-
- stanza = ["[%s]" % reponame,
- "name=%s" % reponame,
- "baseurl=%s" % url_map['url'],
- "enabled=1"]
- if len(source.gpgkeys):
- stanza.append("gpgcheck=1")
- stanza.append("gpgkey=%s" %
- " ".join(source.gpgkeys))
- else:
- stanza.append("gpgcheck=0")
- stanzas.append("\n".join(stanza))
-
- entry.text = "%s\n" % "\n\n".join(stanzas)
- for (key, value) in list(yum_attrib.items()):
- entry.attrib.__setitem__(key, value)
-
- def get_relevant_groups(self, meta):
- mgrps = []
- for source in self.get_matching_sources(meta):
- mgrps.extend(list(set([g for g in meta.groups
- if (g in source.basegroups or
- g in source.groups or
- g in source.arches)])))
- mgrps.sort()
- return tuple(mgrps)
-
- def _setup_pulp(self):
- try:
- rouser = self.config.get("pulp", "rouser")
- ropass = self.config.get("pulp", "ropass")
- except ConfigParser.NoSectionError:
- logger.error("No [pulp] section found in Packages/packages.conf")
- raise Bcfg2.Server.Plugin.PluginInitError
- except ConfigParser.NoOptionError:
- err = sys.exc_info()[1]
- logger.error("Required option not found in "
- "Packages/packages.conf: %s" % err)
- raise Bcfg2.Server.Plugin.PluginInitError
-
- pulpconfig = pulp.client.config.Config()
- serveropts = pulpconfig.server
-
- self._server = pulp.client.server.PulpServer(serveropts['host'],
- int(serveropts['port']),
- serveropts['scheme'],
- serveropts['path'])
- self._server.set_basic_auth_credentials(rouser, ropass)
- pulp.client.server.set_active_server(self._server)
-
- def build_vpkgs_entry(self, meta):
- # build single entry for all matching sources
- vpkgs = dict()
- for source in self.get_matching_sources(meta):
- s_vpkgs = source.get_vpkgs(meta)
- for name, prov_set in list(s_vpkgs.items()):
- if name not in vpkgs:
- vpkgs[name] = set(prov_set)
- else:
- vpkgs[name].update(prov_set)
- return vpkgs
-
- def get_matching_sources(self, meta):
- return [s for s in self.sources if s.applies(meta)]
-
- def get_ptype(self, metadata):
- """ return the package type relevant to this client """
- if metadata.hostname not in self.ptypes:
- for source in self.sources:
- for grp in metadata.groups:
- if grp in source.basegroups:
- self.ptypes[metadata.hostname] = source.ptype
- break
- try:
- return self.ptypes[metadata.hostname]
- except KeyError:
- return None
-
- def HandleEntry(self, entry, metadata):
- if entry.tag == 'Package':
- entry.set('version', 'auto')
- entry.set('type', self.get_ptype(metadata))
- elif entry.tag == 'Path':
- if (self.config.has_option("global", "yum_config") and
- entry.get("name") == self.config.get("global", "yum_config")):
- self.create_yum_conf(entry, metadata)
- elif (self.config.has_option("global", "apt_config") and
- entry.get("name") == self.config.get("global", "apt_config")):
- self.create_apt_conf(entry, metadata)
-
- def HandlesEntry(self, entry, metadata):
- if entry.tag == 'Package':
- for grp in metadata.groups:
- if grp in self.sentinels:
- return True
- elif entry.tag == 'Path':
- # managed entries for yum/apt configs
- if ((self.config.has_option("global", "yum_config") and
- entry.get("name") == self.config.get("global",
- "yum_config")) or
- (self.config.has_option("global", "apt_config") and
- entry.get("name") == self.config.get("global", "apt_config"))):
- return True
- return False
-
- def complete(self, meta, input_requirements, debug=False):
- '''Build the transitive closure of all package dependencies
-
- Arguments:
- meta - client metadata instance
- packages - set of package names
- debug - print out debug information for the decision making process
- returns => (set(packages), set(unsatisfied requirements), package type)
- '''
- sources = self.get_matching_sources(meta)
- # reverse list so that priorities correspond to file order
- sources.reverse()
- if len(sources) == 0:
- self.logger.error("Packages: No matching sources for client %s; "
- "improper group memberships?" % meta.hostname)
- return set(), set(), 'failed'
- ptype = self.get_ptype(meta)
- if ptype is None:
- return set(), set(), 'failed'
-
- # setup vpkg cache
- pgrps = self.get_relevant_groups(meta)
- if pgrps not in self.virt_pkgs:
- self.virt_pkgs[pgrps] = self.build_vpkgs_entry(meta)
- vpkg_cache = self.virt_pkgs[pgrps]
-
- # unclassified is set of unsatisfied requirements (may be pkg for vpkg)
- unclassified = set(input_requirements)
- vpkgs = set()
- both = set()
- pkgs = set(input_requirements)
-
- packages = set()
- examined = set()
- unknown = set()
-
- final_pass = False
- really_done = False
- # do while unclassified or vpkgs or both or pkgs
- while unclassified or pkgs or both or final_pass:
- if really_done:
- break
- if len(unclassified) + len(pkgs) + len(both) == 0:
- # one more pass then exit
- really_done = True
-
- while unclassified:
- current = unclassified.pop()
- examined.add(current)
- is_pkg = False
- for source in sources:
- if source.is_package(meta, current):
- is_pkg = True
- break
-
- is_vpkg = current in vpkg_cache
-
- if is_pkg and is_vpkg:
- both.add(current)
- elif is_pkg and not is_vpkg:
- pkgs.add(current)
- elif is_vpkg and not is_pkg:
- vpkgs.add(current)
- elif not is_vpkg and not is_pkg:
- unknown.add(current)
-
- while pkgs:
- # direct packages; current can be added, and all deps
- # should be resolved
- current = pkgs.pop()
- if debug:
- self.logger.debug("Packages: handling package requirement "
- "%s" % current)
- deps = ()
- for source in sources:
- if source.is_package(meta, current):
- try:
- deps = source.get_deps(meta, current)
- break
- except:
- continue
- packages.add(current)
- newdeps = set(deps).difference(examined)
- if debug and newdeps:
- self.logger.debug("Packages: Package %s added "
- "requirements %s" % (current, newdeps))
- unclassified.update(newdeps)
-
- satisfied_vpkgs = set()
- for current in vpkgs:
- # virtual dependencies, satisfied if one of N in the
- # config, or can be forced if only one provider
- if len(vpkg_cache[current]) == 1:
- if debug:
- self.logger.debug("Packages: requirement %s satisfied "
- "by %s" % (current,
- vpkg_cache[current]))
- unclassified.update(vpkg_cache[current].difference(examined))
- satisfied_vpkgs.add(current)
- elif [item for item in vpkg_cache[current] if item in packages]:
- if debug:
- self.logger.debug("Packages: requirement %s satisfied "
- "by %s" %
- (current,
- [item for item in vpkg_cache[current]
- if item in packages]))
- satisfied_vpkgs.add(current)
- vpkgs.difference_update(satisfied_vpkgs)
-
- satisfied_both = set()
- for current in both:
- # packages that are both have virtual providers as
- # well as a package with that name. allow use of virt
- # through explicit specification, then fall back to
- # forcing current on last pass
- if [item for item in vpkg_cache[current] if item in packages]:
- if debug:
- self.logger.debug("Packages: requirement %s satisfied "
- "by %s" %
- (current,
- [item for item in vpkg_cache[current]
- if item in packages]))
- satisfied_both.add(current)
- elif current in input_requirements or final_pass:
- pkgs.add(current)
- satisfied_both.add(current)
- both.difference_update(satisfied_both)
-
- if len(unclassified) + len(pkgs) == 0:
- final_pass = True
- else:
- final_pass = False
-
- for source in sources:
- source.filter_unknown(unknown)
-
- return packages, unknown, ptype
-
- def validate_structures(self, metadata, structures):
- '''Ensure client configurations include all needed prerequisites
-
- Arguments:
- metadata - client metadata instance
- structures - a list of structure-stage entry combinations
- '''
- indep = lxml.etree.Element('Independent')
- self._build_packages(metadata, indep, structures)
- self._build_gpgkeys(metadata, indep)
- self._build_pulp_entries(metadata, indep)
- structures.append(indep)
-
- def _build_pulp_entries(self, metadata, independent):
- """ build list of Pulp actions that need to be included in the
- specification by validate_structures() """
- if not has_pulp:
- return
-
- # if there are no Pulp sources for this host, we don't need to
- # worry about registering it
- build_actions = False
- for source in self.get_matching_sources(metadata):
- if isinstance(source, PulpSource):
- build_actions = True
- break
-
- if not build_actions:
- self.logger.debug("No Pulp sources apply to %s, skipping Pulp "
- "registration" % metadata.hostname)
- return
-
- consumerapi = pulp.client.api.consumer.ConsumerAPI()
- try:
- consumer = consumerapi.consumer(metadata.hostname)
- except pulp.client.server.ServerRequestError:
- try:
- reguser = self.config.get("pulp", "reguser")
- regpass = self.config.get("pulp", "regpass")
- reg_cmd = ("pulp-client -u '%s' -p '%s' consumer create "
- "--id='%s'" % (reguser, regpass, metadata.hostname))
- lxml.etree.SubElement(independent, "BoundAction",
- name="pulp-register", timing="pre",
- when="always", status="check",
- command=reg_cmd)
- except ConfigParser.NoOptionError:
- err = sys.exc_info()[1]
- self.logger.error("Required option not found in "
- "Packages/packages.conf: %s. Pulp consumers "
- "will not be registered" % err)
- return
-
- for source in self.get_matching_sources(metadata):
- # each pulp source can only have one arch, so we don't
- # have to check the arch in url_map
- if source.id not in consumer['repoids']:
- bind_cmd = "pulp-client consumer bind --repoid=%s" % source.id
- lxml.etree.SubElement(independent, "BoundAction",
- name="pulp-bind-%s" % source.id,
- timing="pre", when="always",
- status="check", command=bind_cmd)
-
- def _build_packages(self, metadata, independent, structures):
- """ build list of packages that need to be included in the
- specification by validate_structures() """
- if self.disableResolver:
- # Config requests no resolver
- return
-
- initial = set([pkg.get('name')
- for struct in structures
- for pkg in struct.findall('Package') + \
- struct.findall('BoundPackage')])
- packages, unknown, ptype = self.complete(metadata, initial,
- debug=self.debug_flag)
- if unknown:
- self.logger.info("Got unknown entries")
- self.logger.info(list(unknown))
- newpkgs = list(packages.difference(initial))
- newpkgs.sort()
- for pkg in newpkgs:
- lxml.etree.SubElement(independent, 'BoundPackage', name=pkg,
- type=ptype, version='auto', origin='Packages')
-
- def _build_gpgkeys(self, metadata, independent):
- """ build list of gpg keys to be added to the specification by
- validate_structures() """
- needkeys = set()
- for source in self.get_matching_sources(metadata):
- for key in source.gpgkeys:
- needkeys.add(key)
-
- if len(needkeys):
- keypkg = lxml.etree.Element('BoundPackage', name="gpg-pubkey",
- type=self.get_ptype(metadata),
- origin='Packages')
-
- for key in needkeys:
- # figure out the path of the key on the client
- try:
- keydir = self.config.get("global", "gpg_keypath")
- except ConfigParser.NoOptionError:
- keydir = "/etc/pki/rpm-gpg"
- except ConfigParser.NoSectionError:
- keydir = "/etc/pki/rpm-gpg"
- remotekey = os.path.join(keydir, os.path.basename(key))
- localkey = os.path.join(self.keypath, os.path.basename(key))
- kdata = open(localkey).read()
-
- # copy the key to the client
- keypath = lxml.etree.Element("BoundPath", name=remotekey,
- encoding='ascii',
- owner='root', group='root',
- type='file', perms='0644',
- important='true')
- keypath.text = kdata
- independent.append(keypath)
-
- if has_yum:
- # add the key to the specification to ensure it
- # gets installed
- try:
- kinfo = yum.misc.getgpgkeyinfo(kdata)
- version = yum.misc.keyIdToRPMVer(kinfo['keyid'])
- release = yum.misc.keyIdToRPMVer(kinfo['timestamp'])
-
- lxml.etree.SubElement(keypkg, 'Instance',
- version=version,
- release=release,
- simplefile=remotekey)
- except ValueError:
- err = sys.exc_info()[1]
- self.logger.error("Could not read GPG key %s: %s" %
- (localkey, err))
- else:
- self.logger.info("Yum libraries not found; GPG keys will "
- "not be handled automatically")
- independent.append(keypkg)
-
- def Refresh(self):
- '''Packages.Refresh() => True|False\nReload configuration
- specification and download sources\n'''
- self._load_config(force_update=True)
- return True
-
- def Reload(self):
- '''Packages.Refresh() => True|False\nReload configuration
- specification and sources\n'''
- self._load_config()
- return True
-
- def _load_config(self, force_update=False):
- '''
- Load the configuration data and setup sources
-
- Keyword args:
- force_update Force downloading repo data
- '''
- self._load_sources(force_update)
- self._load_gpg_keys(force_update)
-
- def _load_sources(self, force_update):
- """ Load sources from the config """
- self.virt_pkgs = dict()
- self.sentinels = set()
-
- cachefiles = []
- for source in self.sources:
- cachefiles.append(source.cachefile)
- if not self.disableMetaData:
- source.setup_data(force_update)
- self.sentinels.update(source.basegroups)
-
- for cfile in glob.glob(os.path.join(self.cachepath, "cache-*")):
- if cfile not in cachefiles:
- os.unlink(cfile)
-
- def _load_gpg_keys(self, force_update):
- """ Load gpg keys from the config """
- keyfiles = []
- for source in self.sources:
- for key in source.gpgkeys:
- localfile = os.path.join(self.keypath, os.path.basename(key))
- if localfile not in keyfiles:
- keyfiles.append(localfile)
- if force_update or not os.path.exists(localfile):
- logger.debug("Downloading and parsing %s" % key)
- response = urlopen(key)
- open(localfile, 'w').write(response.read())
-
- for kfile in glob.glob(os.path.join(self.keypath, "*")):
- if kfile not in keyfiles:
- os.unlink(kfile)
-
- def get_additional_data(self, meta):
- sdata = []
- [sdata.extend(copy.deepcopy(src.url_map))
- for src in self.get_matching_sources(meta)]
- return dict(sources=sdata)
diff --git a/src/lib/Server/Plugins/Packages/Apt.py b/src/lib/Server/Plugins/Packages/Apt.py
new file mode 100644
index 000000000..5c80200a4
--- /dev/null
+++ b/src/lib/Server/Plugins/Packages/Apt.py
@@ -0,0 +1,142 @@
+import re
+import gzip
+import logging
+from Bcfg2.Server.Plugins.Packages.Collection import Collection
+from Bcfg2.Server.Plugins.Packages.Source import Source
+from Bcfg2.Bcfg2Py3k import cPickle, file
+
+logger = logging.getLogger("Packages")
+
+class AptCollection(Collection):
+ def get_group(self, group):
+ self.logger.warning("Package groups are not supported by APT")
+ return []
+
+class AptSource(Source):
+ basegroups = ['apt', 'debian', 'ubuntu', 'nexenta']
+ ptype = 'deb'
+
+ def __init__(self, basepath, xsource, config):
+ Source.__init__(self, basepath, xsource, config)
+ self.pkgnames = set()
+
+ self.url_map = [{'rawurl': self.rawurl, 'url': self.url,
+ 'version': self.version,
+ 'components': self.components, 'arches': self.arches}]
+
+ def save_state(self):
+ cache = file(self.cachefile, 'wb')
+ cPickle.dump((self.pkgnames, self.deps, self.provides),
+ cache, 2)
+ cache.close()
+
+ def load_state(self):
+ data = file(self.cachefile)
+ self.pkgnames, self.deps, self.provides = cPickle.load(data)
+
+ def filter_unknown(self, unknown):
+ filtered = set([u for u in unknown if u.startswith('choice')])
+ unknown.difference_update(filtered)
+
+ def get_urls(self):
+ if not self.rawurl:
+ rv = []
+ for part in self.components:
+ for arch in self.arches:
+ rv.append("%sdists/%s/%s/binary-%s/Packages.gz" %
+ (self.url, self.version, part, arch))
+ return rv
+ else:
+ return ["%sPackages.gz" % self.rawurl]
+ urls = property(get_urls)
+
+ def read_files(self):
+ bdeps = dict()
+ bprov = dict()
+ if self.recommended:
+ depfnames = ['Depends', 'Pre-Depends', 'Recommends']
+ else:
+ depfnames = ['Depends', 'Pre-Depends']
+ for fname in self.files:
+ if not self.rawurl:
+ barch = [x
+ for x in fname.split('@')
+ if x.startswith('binary-')][0][7:]
+ else:
+ # RawURL entries assume that they only have one <Arch></Arch>
+ # element and that it is the architecture of the source.
+ barch = self.arches[0]
+ if barch not in bdeps:
+ bdeps[barch] = dict()
+ bprov[barch] = dict()
+ try:
+ reader = gzip.GzipFile(fname)
+ except:
+ print("Failed to read file %s" % fname)
+ raise
+ for line in reader.readlines():
+ words = str(line.strip()).split(':', 1)
+ if words[0] == 'Package':
+ pkgname = words[1].strip().rstrip()
+ self.pkgnames.add(pkgname)
+ bdeps[barch][pkgname] = []
+ elif words[0] in depfnames:
+ vindex = 0
+ for dep in words[1].split(','):
+ if '|' in dep:
+ cdeps = [re.sub('\s+', '',
+ re.sub('\(.*\)', '', cdep))
+ for cdep in dep.split('|')]
+ dyn_dname = "choice-%s-%s-%s" % (pkgname,
+ barch,
+ vindex)
+ vindex += 1
+ bdeps[barch][pkgname].append(dyn_dname)
+ bprov[barch][dyn_dname] = set(cdeps)
+ else:
+ raw_dep = re.sub('\(.*\)', '', dep)
+ raw_dep = raw_dep.rstrip().strip()
+ bdeps[barch][pkgname].append(raw_dep)
+ elif words[0] == 'Provides':
+ for pkg in words[1].split(','):
+ dname = pkg.rstrip().strip()
+ if dname not in bprov[barch]:
+ bprov[barch][dname] = set()
+ bprov[barch][dname].add(pkgname)
+
+ self.deps['global'] = dict()
+ self.provides['global'] = dict()
+ for barch in bdeps:
+ self.deps[barch] = dict()
+ self.provides[barch] = dict()
+ for pkgname in self.pkgnames:
+ pset = set()
+ for barch in bdeps:
+ if pkgname not in bdeps[barch]:
+ bdeps[barch][pkgname] = []
+ pset.add(tuple(bdeps[barch][pkgname]))
+ if len(pset) == 1:
+ self.deps['global'][pkgname] = pset.pop()
+ else:
+ for barch in bdeps:
+ self.deps[barch][pkgname] = bdeps[barch][pkgname]
+ provided = set()
+ for bprovided in list(bprov.values()):
+ provided.update(set(bprovided))
+ for prov in provided:
+ prset = set()
+ for barch in bprov:
+ if prov not in bprov[barch]:
+ continue
+ prset.add(tuple(bprov[barch].get(prov, ())))
+ if len(prset) == 1:
+ self.provides['global'][prov] = prset.pop()
+ else:
+ for barch in bprov:
+ self.provides[barch][prov] = bprov[barch].get(prov, ())
+ self.save_state()
+
+ def is_package(self, _, pkg):
+ return (pkg in self.pkgnames and
+ pkg not in self.blacklist and
+ (len(self.whitelist) == 0 or pkg in self.whitelist))
diff --git a/src/lib/Server/Plugins/Packages/Collection.py b/src/lib/Server/Plugins/Packages/Collection.py
new file mode 100644
index 000000000..aed85fe77
--- /dev/null
+++ b/src/lib/Server/Plugins/Packages/Collection.py
@@ -0,0 +1,336 @@
+import copy
+import logging
+
+try:
+ from hashlib import md5
+except ImportError:
+ import md5
+
+logger = logging.getLogger("Packages")
+
+collections = dict()
+
+class Collection(object):
+ def __init__(self, metadata, sources, basepath):
+ """ don't call this directly; use the Factory method """
+ self.metadata = metadata
+ self.sources = sources
+ self.logger = logging.getLogger("Packages")
+ self.basepath = basepath
+ self.virt_pkgs = dict()
+
+ try:
+ self.config = sources[0].config
+ self.cachepath = sources[0].basepath
+ self.ptype = sources[0].ptype
+ except IndexError:
+ self.config = None
+ self.cachepath = None
+ self.ptype = "unknown"
+
+ self.cachefile = None
+
+ @property
+ def cachekey(self):
+ return md5(self.get_config()).hexdigest()
+
+ def get_config(self):
+ self.logger.error("Cannot generate config for host with multiple "
+ "source types (%s)" % self.metadata.hostname)
+ return ""
+
+ def get_relevant_groups(self):
+ groups = []
+ for source in self.sources:
+ groups.extend(source.get_relevant_groups(self.metadata))
+ return sorted(list(set(groups)))
+
+ @property
+ def basegroups(self):
+ groups = set()
+ for source in self.sources:
+ groups.update(source.basegroups)
+ return list(groups)
+
+ @property
+ def cachefiles(self):
+ cachefiles = set([self.cachefile])
+ for source in self.sources:
+ cachefiles.add(source.cachefile)
+ return list(cachefiles)
+
+ def get_group(self, group):
+ for source in self.sources:
+ pkgs = source.get_group(self.metadata, group)
+ if pkgs:
+ return pkgs
+ self.logger.warning("'%s' is not a valid group" % group)
+ return []
+
+ def is_package(self, package):
+ for source in self.sources:
+ if source.is_package(self.metadata, package):
+ return True
+ return False
+
+ def is_virtual_package(self, package):
+ for source in self.sources:
+ if source.is_virtual_package(self.metadata, package):
+ return True
+ return False
+
+ def get_deps(self, package):
+ for source in self.sources:
+ if source.is_package(self.metadata, package):
+ return source.get_deps(self.metadata, package)
+ return []
+
+ def get_provides(self, package):
+ for source in self.sources:
+ providers = source.get_provides(self.metadata, package)
+ if providers:
+ return providers
+ return []
+
+ def get_vpkgs(self):
+ """ get virtual packages """
+ vpkgs = dict()
+ for source in self.sources:
+ s_vpkgs = source.get_vpkgs(self.metadata)
+ for name, prov_set in list(s_vpkgs.items()):
+ if name not in vpkgs:
+ vpkgs[name] = set(prov_set)
+ else:
+ vpkgs[name].update(prov_set)
+ return vpkgs
+
+ def filter_unknown(self, unknown):
+ for source in self.sources:
+ source.filter_unknown(unknown)
+
+ def magic_groups_match(self):
+ for source in self.sources:
+ if source.magic_groups_match(self.metadata):
+ return True
+
+ def build_extra_structures(self, independent):
+ pass
+
+ def get_additional_data(self):
+ sdata = []
+ for source in self.sources:
+ sdata.extend(copy.deepcopy(source.url_map))
+ return sdata
+
+ def setup_data(self, force_update=False):
+ """ do any collection-level data setup tasks """
+ for source in self.sources:
+ source.setup_data(force_update)
+
+ def complete(self, packagelist):
+ '''Build the transitive closure of all package dependencies
+
+ Arguments:
+ packageslist - set of package names
+ returns => (set(packages), set(unsatisfied requirements))
+ '''
+
+ # setup vpkg cache
+ pgrps = tuple(self.get_relevant_groups())
+ if pgrps not in self.virt_pkgs:
+ self.virt_pkgs[pgrps] = self.get_vpkgs()
+ vpkg_cache = self.virt_pkgs[pgrps]
+
+ # unclassified is set of unsatisfied requirements (may be pkg
+ # for vpkg)
+ unclassified = set(packagelist)
+ vpkgs = set()
+ both = set()
+ pkgs = set(packagelist)
+
+ packages = set()
+ examined = set()
+ unknown = set()
+
+ final_pass = False
+ really_done = False
+ # do while unclassified or vpkgs or both or pkgs
+ while unclassified or pkgs or both or final_pass:
+ if really_done:
+ break
+ if len(unclassified) + len(pkgs) + len(both) == 0:
+ # one more pass then exit
+ really_done = True
+
+ while unclassified:
+ current = unclassified.pop()
+ examined.add(current)
+ is_pkg = False
+ if self.is_package(current):
+ is_pkg = True
+
+ is_vpkg = current in vpkg_cache
+
+ if is_pkg and is_vpkg:
+ both.add(current)
+ elif is_pkg and not is_vpkg:
+ pkgs.add(current)
+ elif is_vpkg and not is_pkg:
+ vpkgs.add(current)
+ elif not is_vpkg and not is_pkg:
+ unknown.add(current)
+
+ while pkgs:
+ # direct packages; current can be added, and all deps
+ # should be resolved
+ current = pkgs.pop()
+ self.logger.debug("Packages: handling package requirement %s" %
+ current)
+ packages.add(current)
+ deps = self.get_deps(current)
+ newdeps = set(deps).difference(examined)
+ if newdeps:
+ self.logger.debug("Packages: Package %s added "
+ "requirements %s" % (current, newdeps))
+ unclassified.update(newdeps)
+
+ satisfied_vpkgs = set()
+ for current in vpkgs:
+ # virtual dependencies, satisfied if one of N in the
+ # config, or can be forced if only one provider
+ if len(vpkg_cache[current]) == 1:
+ self.logger.debug("Packages: requirement %s satisfied by "
+ "%s" % (current,
+ vpkg_cache[current]))
+ unclassified.update(vpkg_cache[current].difference(examined))
+ satisfied_vpkgs.add(current)
+ else:
+ satisfiers = [item for item in vpkg_cache[current]
+ if item in packages]
+ self.logger.debug("Packages: requirement %s satisfied by "
+ "%s" % (current, satisfiers))
+ satisfied_vpkgs.add(current)
+ vpkgs.difference_update(satisfied_vpkgs)
+
+ satisfied_both = set()
+ for current in both:
+ # packages that are both have virtual providers as
+ # well as a package with that name. allow use of virt
+ # through explicit specification, then fall back to
+ # forcing current on last pass
+ satisfiers = [item for item in vpkg_cache[current]
+ if item in packages]
+ if satisfiers:
+ self.logger.debug("Packages: requirement %s satisfied by "
+ "%s" % (current, satisfiers))
+ satisfied_both.add(current)
+ elif current in packagelist or final_pass:
+ pkgs.add(current)
+ satisfied_both.add(current)
+ both.difference_update(satisfied_both)
+
+ if len(unclassified) + len(pkgs) == 0:
+ final_pass = True
+ else:
+ final_pass = False
+
+ self.filter_unknown(unknown)
+
+ return packages, unknown
+
+ def __len__(self):
+ return len(self.sources)
+
+ def __getitem__(self, item):
+ return self.sources[item]
+
+ def __setitem__(self, item, value):
+ self.sources[item] = value
+
+ def __delitem__(self, item):
+ del self.sources[item]
+
+ def append(self, item):
+ self.sources.append(item)
+
+ def count(self):
+ return self.sources.count()
+
+ def index(self, item):
+ return self.sources.index(item)
+
+ def extend(self, items):
+ self.sources.extend(items)
+
+ def insert(self, index, item):
+ self.sources.insert(index, item)
+
+ def pop(self, index=None):
+ self.sources.pop(index)
+
+ def remove(self, item):
+ self.sources.remove(item)
+
+ def reverse(self):
+ self.sources.reverse()
+
+ def sort(self, cmp=None, key=None, reverse=False):
+ self.sources.sort(cmp, key, reverse)
+
+def clear_cache():
+ global collections
+ collections = dict()
+
+def factory(metadata, sources, basepath):
+ global collections
+
+ if not sources.loaded:
+ # if sources.xml has not received a FAM event yet, defer;
+ # instantiate a dummy Collection object, but do not cache it
+ # in collections
+ return Collection(metadata, [], basepath)
+
+ sclasses = set()
+ relevant = list()
+
+ for source in sources:
+ if source.applies(metadata):
+ relevant.append(source)
+ sclasses.update([source.__class__])
+
+ # collections is a cache dict of Collection objects that is keyed
+ # off of the set of source urls that apply to each Collection
+ ckeydata = set()
+ for source in relevant:
+ ckeydata.update(source.urls)
+ ckey = tuple(sorted(list(ckeydata)))
+ if ckey not in collections:
+ if len(sclasses) > 1:
+ logger.warning("Multiple source types found for %s: %s" %
+ ",".join([s.__name__ for s in sclasses]))
+ cclass = Collection
+ elif len(sclasses) == 0:
+ logger.warning("No sources found for %s" % metadata.hostname)
+ cclass = Collection
+ else:
+ stype = sclasses.pop().__name__.replace("Source", "")
+ try:
+ module = \
+ getattr(__import__("Bcfg2.Server.Plugins.Packages.%s" %
+ stype.title()).Server.Plugins.Packages,
+ stype.title())
+ cclass = getattr(module, "%sCollection" % stype.title())
+ except ImportError:
+ logger.error("Unknown source type %s" % stype)
+ except AttributeError:
+ logger.warning("No collection class found for %s sources" %
+ stype)
+
+ logger.debug("Using %s for Collection of sources for %s" %
+ (cclass.__name__, metadata.hostname))
+
+ collection = cclass(metadata, relevant, basepath)
+ # reverse so that file order determines precedence
+ collection.reverse()
+ collections[ckey] = collection
+ return collections[ckey]
diff --git a/src/lib/Server/Plugins/Packages/Pac.py b/src/lib/Server/Plugins/Packages/Pac.py
new file mode 100644
index 000000000..8b75c1e1d
--- /dev/null
+++ b/src/lib/Server/Plugins/Packages/Pac.py
@@ -0,0 +1,122 @@
+import gzip
+import tarfile
+import logging
+from Bcfg2.Bcfg2Py3k import cPickle, file
+from Bcfg2.Server.Plugins.Packages.Collection import Collection
+from Bcfg2.Server.Plugins.Packages.Source import Source
+
+logger = logging.getLogger("Packages")
+
+class PacCollection(Collection):
+ def get_group(self, group):
+ self.logger.warning("Package groups are not supported by APT")
+ return []
+
+class PacSource(Source):
+ basegroups = ['arch', 'parabola']
+ ptype = 'pacman'
+
+ def __init__(self, basepath, xsource, config):
+ Source.__init__(self, basepath, xsource, config)
+ self.pkgnames = set()
+
+ self.url_map = [{'rawurl': self.rawurl, 'url': self.url,
+ 'version': self.version,
+ 'components': self.components, 'arches': self.arches}]
+
+ def save_state(self):
+ cache = file(self.cachefile, 'wb')
+ cPickle.dump((self.pkgnames, self.deps, self.provides),
+ cache, 2)
+ cache.close()
+
+ def load_state(self):
+ data = file(self.cachefile)
+ self.pkgnames, self.deps, self.provides = cPickle.load(data)
+
+ def filter_unknown(self, unknown):
+ filtered = set([u for u in unknown if u.startswith('choice')])
+ unknown.difference_update(filtered)
+
+ def get_urls(self):
+ if not self.rawurl:
+ rv = []
+ for part in self.components:
+ for arch in self.arches:
+ rv.append("%s%s/os/%s/%s.db.tar.gz" %
+ (self.url, part, arch, part))
+ return rv
+ else:
+ raise Exception("PacSource : RAWUrl not supported (yet)")
+ urls = property(get_urls)
+
+ def read_files(self):
+ bdeps = dict()
+ bprov = dict()
+
+ if self.recommended:
+ depfnames = ['Depends', 'Pre-Depends', 'Recommends']
+ else:
+ depfnames = ['Depends', 'Pre-Depends']
+
+ for fname in self.files:
+ if not self.rawurl:
+ barch = [x for x in fname.split('@') if x in self.arches][0]
+ else:
+ # RawURL entries assume that they only have one <Arch></Arch>
+ # element and that it is the architecture of the source.
+ barch = self.arches[0]
+
+ if barch not in bdeps:
+ bdeps[barch] = dict()
+ bprov[barch] = dict()
+ try:
+ print("try to read : " + fname)
+ tar = tarfile.open(fname, "r")
+ reader = gzip.GzipFile(fname)
+ except:
+ print("Failed to read file %s" % fname)
+ raise
+
+ for tarinfo in tar:
+ if tarinfo.isdir():
+ self.pkgnames.add(tarinfo.name.rsplit("-", 2)[0])
+ print("added : " + tarinfo.name.rsplit("-", 2)[0])
+ tar.close()
+
+ self.deps['global'] = dict()
+ self.provides['global'] = dict()
+ for barch in bdeps:
+ self.deps[barch] = dict()
+ self.provides[barch] = dict()
+ for pkgname in self.pkgnames:
+ pset = set()
+ for barch in bdeps:
+ if pkgname not in bdeps[barch]:
+ bdeps[barch][pkgname] = []
+ pset.add(tuple(bdeps[barch][pkgname]))
+ if len(pset) == 1:
+ self.deps['global'][pkgname] = pset.pop()
+ else:
+ for barch in bdeps:
+ self.deps[barch][pkgname] = bdeps[barch][pkgname]
+ provided = set()
+ for bprovided in list(bprov.values()):
+ provided.update(set(bprovided))
+ for prov in provided:
+ prset = set()
+ for barch in bprov:
+ if prov not in bprov[barch]:
+ continue
+ prset.add(tuple(bprov[barch].get(prov, ())))
+ if len(prset) == 1:
+ self.provides['global'][prov] = prset.pop()
+ else:
+ for barch in bprov:
+ self.provides[barch][prov] = bprov[barch].get(prov, ())
+ self.save_state()
+
+ def is_package(self, _, pkg):
+ return (pkg in self.pkgnames and
+ pkg not in self.blacklist and
+ (len(self.whitelist) == 0 or pkg in self.whitelist))
diff --git a/src/lib/Server/Plugins/Packages/PackagesConfig.py b/src/lib/Server/Plugins/Packages/PackagesConfig.py
new file mode 100644
index 000000000..1bb250007
--- /dev/null
+++ b/src/lib/Server/Plugins/Packages/PackagesConfig.py
@@ -0,0 +1,28 @@
+import os
+import logging
+from Bcfg2.Bcfg2Py3k import ConfigParser
+from Bcfg2.Server.Plugins.Packages import *
+
+logger = logging.getLogger('Packages')
+
+class PackagesConfig(Bcfg2.Server.Plugin.FileBacked,
+ ConfigParser.SafeConfigParser):
+ def __init__(self, filename, fam, packages):
+ Bcfg2.Server.Plugin.FileBacked.__init__(self, filename)
+ ConfigParser.SafeConfigParser.__init__(self)
+
+ self.fam = fam
+ # packages.conf isn't strictly necessary, so only set a
+ # monitor if it exists. if it gets added, that will require a
+ # server restart
+ if os.path.exists(self.name):
+ self.fam.AddMonitor(self.name, self)
+
+ self.pkg_obj = packages
+
+ def Index(self):
+ """ Build local data structures """
+ for section in self.sections():
+ self.remove_section(section)
+ self.read(self.name)
+ self.pkg_obj.Reload()
diff --git a/src/lib/Server/Plugins/Packages/PackagesSources.py b/src/lib/Server/Plugins/Packages/PackagesSources.py
new file mode 100644
index 000000000..5f82deb1f
--- /dev/null
+++ b/src/lib/Server/Plugins/Packages/PackagesSources.py
@@ -0,0 +1,66 @@
+import os
+import sys
+import lxml.etree
+import logging
+import Bcfg2.Server.Plugin
+
+logger = logging.getLogger("Packages")
+
+
+class PackagesSources(Bcfg2.Server.Plugin.SingleXMLFileBacked,
+ Bcfg2.Server.Plugin.StructFile):
+ __identifier__ = None
+
+ def __init__(self, filename, cachepath, fam, packages, config):
+ try:
+ Bcfg2.Server.Plugin.SingleXMLFileBacked.__init__(self,
+ filename,
+ fam)
+ except OSError:
+ err = sys.exc_info()[1]
+ msg = "Packages: Failed to read configuration file: %s" % err
+ if not os.path.exists(self.name):
+ msg += " Have you created it?"
+ logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginInitError(msg)
+ Bcfg2.Server.Plugin.StructFile.__init__(self, filename)
+ self.cachepath = cachepath
+ self.config = config
+ if not os.path.exists(self.cachepath):
+ # create cache directory if needed
+ os.makedirs(self.cachepath)
+ self.pkg_obj = packages
+ self.loaded = False
+
+ def Index(self):
+ Bcfg2.Server.Plugin.SingleXMLFileBacked.Index(self)
+ self.entries = []
+ for xsource in self.xdata.findall('.//Source'):
+ source = self.source_from_xml(xsource)
+ if source is not None:
+ self.entries.append(source)
+
+ self.pkg_obj.Reload()
+ self.loaded = True
+
+ def source_from_xml(self, xsource):
+ """ create a *Source object from its XML representation in
+ sources.xml """
+ stype = xsource.get("type")
+ if stype is None:
+ logger.error("No type specified for source, skipping")
+ return None
+
+ try:
+ module = getattr(__import__("Bcfg2.Server.Plugins.Packages.%s" %
+ stype.title()).Server.Plugins.Packages,
+ stype.title())
+ cls = getattr(module, "%sSource" % stype.title())
+ except (ImportError, AttributeError):
+ logger.error("Unknown source type %s" % stype)
+ return None
+
+ return cls(self.cachepath, xsource, self.config)
+
+ def __getitem__(self, key):
+ return self.entries[key]
diff --git a/src/lib/Server/Plugins/Packages/Source.py b/src/lib/Server/Plugins/Packages/Source.py
new file mode 100644
index 000000000..255f3ea7a
--- /dev/null
+++ b/src/lib/Server/Plugins/Packages/Source.py
@@ -0,0 +1,262 @@
+import os
+import re
+import sys
+import base64
+import logging
+from Bcfg2.Bcfg2Py3k import HTTPError, HTTPBasicAuthHandler, \
+ HTTPPasswordMgrWithDefaultRealm, install_opener, build_opener, \
+ urlopen, file, cPickle
+
+try:
+ from hashlib import md5
+except ImportError:
+ import md5
+
+logger = logging.getLogger('Packages')
+
+def fetch_url(url):
+ if '@' in url:
+ mobj = re.match('(\w+://)([^:]+):([^@]+)@(.*)$', url)
+ if not mobj:
+ raise ValueError
+ user = mobj.group(2)
+ passwd = mobj.group(3)
+ url = mobj.group(1) + mobj.group(4)
+ auth = HTTPBasicAuthHandler(HTTPPasswordMgrWithDefaultRealm())
+ auth.add_password(None, url, user, passwd)
+ install_opener(build_opener(auth))
+ return urlopen(url).read()
+
+
+class Source(object):
+ reponame_re = re.compile(r'.*/(?:RPMS\.)?([^/]+)')
+ basegroups = []
+
+ def __init__(self, basepath, xsource, config):
+ self.basepath = basepath
+ self.xsource = xsource
+ self.config = config
+
+ try:
+ self.version = xsource.find('Version').text
+ except AttributeError:
+ pass
+
+ for key, tag in [('components', 'Component'), ('arches', 'Arch'),
+ ('blacklist', 'Blacklist'),
+ ('whitelist', 'Whitelist')]:
+ self.__dict__[key] = [item.text for item in xsource.findall(tag)]
+
+ self.gpgkeys = [el.text for el in xsource.findall("GPGKey")]
+
+ self.recommended = xsource.get('recommended', 'false').lower() == 'true'
+
+ self.rawurl = xsource.get('rawurl', '')
+ if self.rawurl and not self.rawurl.endswith("/"):
+ self.rawurl += "/"
+ self.url = xsource.get('url', '')
+ if self.url and not self.url.endswith("/"):
+ self.url += "/"
+ self.version = xsource.get('version', '')
+
+ # build the set of conditions to see if this source applies to
+ # a given set of metadata
+ self.conditions = []
+ self.groups = [] # provided for some limited backwards compat
+ for el in xsource.iterancestors():
+ if el.tag == "Group":
+ if el.get("negate", "false").lower() == "true":
+ self.conditions.append(lambda m, el=el:
+ el.get("name") not in m.groups)
+ else:
+ self.groups.append(el.get("name"))
+ self.conditions.append(lambda m, el=el:
+ el.get("name") in m.groups)
+ elif el.tag == "Client":
+ if el.get("negate", "false").lower() == "true":
+ self.conditions.append(lambda m, el=el:
+ el.get("name") != m.hostname)
+ else:
+ self.conditions.append(lambda m, el=el:
+ el.get("name") == m.hostname)
+
+ self.deps = dict()
+ self.provides = dict()
+
+ self.cachefile = os.path.join(self.basepath,
+ "cache-%s" % self.cachekey)
+ self.url_map = []
+
+ @property
+ def cachekey(self):
+ return md5(cPickle.dumps([self.version, self.components, self.url,
+ self.rawurl, self.arches])).hexdigest()
+
+ def get_relevant_groups(self, metadata):
+ return sorted(list(set([g for g in metadata.groups
+ if (g in self.basegroups or
+ g in self.groups or
+ g in self.arches)])))
+
+ def load_state(self):
+ pass
+
+ def setup_data(self, force_update=False):
+ should_read = True
+ should_download = False
+ if os.path.exists(self.cachefile):
+ try:
+ self.load_state()
+ should_read = False
+ except:
+ logger.error("Cachefile %s load failed; "
+ "falling back to file read" % self.cachefile)
+ if should_read:
+ try:
+ self.read_files()
+ except:
+ logger.error("Packages: File read failed; "
+ "falling back to file download")
+ should_download = True
+
+ if should_download or force_update:
+ try:
+ self.update()
+ self.read_files()
+ except:
+ logger.error("Failed to update source", exc_info=1)
+
+ def get_repo_name(self, url_map):
+ # try to find a sensible name for a repo
+ match = self.reponame_re.search(url_map['url'])
+ if url_map['component']:
+ return url_map['component']
+ elif match:
+ return match.group(1)
+ else:
+ # couldn't figure out the name from the URL or URL map
+ # (which probably means its a screwy URL), so we just
+ # generate a random one
+ name = base64.b64encode(os.urandom(16))[:-2]
+ return "%s-%s" % (self.groups[0], name)
+
+ def __str__(self):
+ if self.rawurl:
+ return "%s at %s" % (self.__class__.__name__, self.rawurl)
+ elif self.url:
+ return "%s at %s" % (self.__class__.__name__, self.url)
+ else:
+ return self.__class__.__name__
+
+ def get_urls(self):
+ return []
+ urls = property(get_urls)
+
+ def get_files(self):
+ return [self.escape_url(url) for url in self.urls]
+ files = property(get_files)
+
+ def get_vpkgs(self, metadata):
+ agroups = ['global'] + [a for a in self.arches
+ if a in metadata.groups]
+ vdict = dict()
+ for agrp in agroups:
+ for key, value in list(self.provides[agrp].items()):
+ if key not in vdict:
+ vdict[key] = set(value)
+ else:
+ vdict[key].update(value)
+ return vdict
+
+ def is_virtual_package(self, metadata, package):
+ """ called to determine if a package is a virtual package.
+ this is only invoked if the package is not listed in the dict
+ returned by get_vpkgs """
+ return False
+
+ def escape_url(self, url):
+ return os.path.join(self.basepath, url.replace('/', '@'))
+
+ def file_init(self):
+ pass
+
+ def read_files(self):
+ pass
+
+ def filter_unknown(self, unknown):
+ pass
+
+ def update(self):
+ for url in self.urls:
+ logger.info("Packages: Updating %s" % url)
+ fname = self.escape_url(url)
+ try:
+ data = fetch_url(url)
+ except ValueError:
+ logger.error("Packages: Bad url string %s" % url)
+ continue
+ except HTTPError:
+ err = sys.exc_info()[1]
+ logger.error("Packages: Failed to fetch url %s. code=%s" %
+ (url, err.code))
+ continue
+ file(fname, 'w').write(data)
+
+ def applies(self, metadata):
+ # check base groups
+ if not self.magic_groups_match(metadata):
+ return False
+
+ # check Group/Client tags from sources.xml
+ for condition in self.conditions:
+ if not condition(metadata):
+ return False
+
+ return True
+
+ def get_arches(self, metadata):
+ return ['global'] + [a for a in self.arches if a in metadata.groups]
+
+ def get_deps(self, metadata, pkgname):
+ for arch in self.get_arches(metadata):
+ if pkgname in self.deps[arch]:
+ return self.deps[arch][pkgname]
+ return []
+
+ def get_provides(self, metadata, required):
+ for arch in self.get_arches(metadata):
+ if required in self.provides[arch]:
+ return self.provides[arch][required]
+ return []
+
+ def is_package(self, metadata, _):
+ return False
+
+ def get_package(self, metadata, package):
+ return package
+
+ def get_group(self, metadata, package):
+ return []
+
+ def magic_groups_match(self, metadata):
+ """ check to see if this source applies to the given host
+ metadata by checking 'magic' (base) groups only, or if magic
+ groups are off """
+ # we always check that arch matches
+ found_arch = False
+ for arch in self.arches:
+ if arch in metadata.groups:
+ found_arch = True
+ break
+ if not found_arch:
+ return False
+
+ if (self.config.has_section("global") and
+ self.config.has_option("global", "magic_groups") and
+ self.config.getboolean("global", "magic_groups") == False):
+ return True
+ else:
+ for group in self.basegroups:
+ if group in metadata.groups:
+ return True
+ return False
diff --git a/src/lib/Server/Plugins/Packages/Yum.py b/src/lib/Server/Plugins/Packages/Yum.py
new file mode 100644
index 000000000..fa0dc527e
--- /dev/null
+++ b/src/lib/Server/Plugins/Packages/Yum.py
@@ -0,0 +1,950 @@
+import os
+import sys
+import time
+import copy
+import glob
+import socket
+import random
+import logging
+import threading
+import lxml.etree
+from UserDict import DictMixin
+import Bcfg2.Server.Plugin
+from Bcfg2.Bcfg2Py3k import StringIO, cPickle, HTTPError, ConfigParser, file
+from Bcfg2.Server.Plugins.Packages.Collection import Collection
+from Bcfg2.Server.Plugins.Packages.Source import Source, fetch_url
+
+logger = logging.getLogger("Packages")
+
+try:
+ from pulp.client.consumer.config import ConsumerConfig
+ from pulp.client.api.repository import RepositoryAPI
+ from pulp.client.api.consumer import ConsumerAPI
+ from pulp.client.api import server
+ has_pulp = True
+except ImportError:
+ has_pulp = False
+
+try:
+ import yum
+ has_yum = True
+except ImportError:
+ has_yum = False
+ logger.info("No yum libraries found; forcing use of internal dependency "
+ "resolver")
+
+XP = '{http://linux.duke.edu/metadata/common}'
+RP = '{http://linux.duke.edu/metadata/rpm}'
+RPO = '{http://linux.duke.edu/metadata/repo}'
+FL = '{http://linux.duke.edu/metadata/filelists}'
+
+PULPSERVER = None
+PULPCONFIG = None
+
+def _setup_pulp(config):
+ global PULPSERVER, PULPCONFIG
+ if not has_pulp:
+ logger.error("Cannot create Pulp collection: Pulp libraries not "
+ "found")
+ raise Bcfg2.Server.Plugin.PluginInitError
+
+ if PULPSERVER is None:
+ try:
+ username = config.get("pulp", "username")
+ password = config.get("pulp", "password")
+ except ConfigParser.NoSectionError:
+ logger.error("No [pulp] section found in Packages/packages.conf")
+ raise Bcfg2.Server.Plugin.PluginInitError
+ except ConfigParser.NoOptionError:
+ err = sys.exc_info()[1]
+ logger.error("Required option not found in "
+ "Packages/packages.conf: %s" % err)
+ raise Bcfg2.Server.Plugin.PluginInitError
+
+ PULPCONFIG = ConsumerConfig()
+ serveropts = PULPCONFIG.server
+
+ PULPSERVER = server.PulpServer(serveropts['host'],
+ int(serveropts['port']),
+ serveropts['scheme'],
+ serveropts['path'])
+ PULPSERVER.set_basic_auth_credentials(username, password)
+ server.set_active_server(PULPSERVER)
+ return PULPSERVER
+
+
+class CacheItem(object):
+ def __init__(self, value, expiration=None):
+ self.value = value
+ if expiration:
+ self.expiration = time.time() + expiration
+
+ def expired(self):
+ if self.expiration:
+ return time.time() > self.expiration
+ else:
+ return False
+
+
+class Cache(DictMixin):
+ def __init__(self, expiration=None, tidy=None):
+ """ params:
+ - expiration: How many seconds a cache entry stays alive for.
+ Specify None for no expiration.
+ - tidy: How frequently to tidy the cache (remove all expired
+ entries). Without this, entries are only expired as they
+ are accessed. Cache will be tidied once per every <tidy>
+ accesses to cache data; a sensible value might be, e.g.,
+ 10000. Specify 0 to fully tidy the cache every access; this
+ makes the cache much slower, but also smaller in memory.
+ Specify None to never tidy the cache; this makes the cache
+ faster, but potentially much larger in memory, especially if
+ cache items are accessed infrequently."""
+ self.cache = dict()
+ self.expiration = expiration
+ self.tidy = tidy
+ self.access_count = 0
+
+ def __getitem__(self, key):
+ self._expire(key)
+ if key in self.cache:
+ return self.cache[key].value
+ else:
+ raise KeyError(key)
+
+ def __setitem__(self, key, value):
+ self.cache[key] = CacheItem(value, self.expiration)
+
+ def __delitem__(self, key):
+ del self.cache[key]
+
+ def __contains__(self, key):
+ self.expire(key)
+ return key in self.cache
+
+ def keys(self):
+ return self.cache.keys()
+
+ def __iter__(self):
+ for k in self.cache.keys():
+ try:
+ yield k
+ except KeyError:
+ pass
+
+ def iteritems(self):
+ for k in self:
+ try:
+ yield (k, self[k])
+ except KeyError:
+ pass
+
+ def _expire(self, *args):
+ if args:
+ self.access_count += 1
+ if self.access_count >= self.tidy:
+ self.access_count = 0
+ candidates = self.cache.items()
+ else:
+ candidates = [(k, self.cache[k]) for k in args]
+ else:
+ candidates = self.cache.items()
+
+ expire = []
+ for key, item in candidates:
+ if item.expired():
+ expire.append(key)
+ for key in expire:
+ del self.cache[key]
+
+ def clear(self):
+ self.cache = dict()
+
+
+class YumCollection(Collection):
+ def __init__(self, metadata, sources, basepath):
+ Collection.__init__(self, metadata, sources, basepath)
+ self.keypath = os.path.join(self.basepath, "keys")
+
+ if len(sources):
+ config = sources[0].config
+ self.use_yum = has_yum
+ try:
+ self.use_yum &= config.getboolean("yum", "use_yum_libraries")
+ except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
+ self.use_yum = False
+ else:
+ self.use_yum = False
+
+ if self.use_yum:
+ self._yb = None
+ self.cachefile = os.path.join(self.cachepath,
+ "cache-%s" % self.cachekey)
+ if not os.path.exists(self.cachefile):
+ os.mkdir(self.cachefile)
+
+ self.configdir = os.path.join(self.basepath, "yum")
+ if not os.path.exists(self.configdir):
+ os.mkdir(self.configdir)
+ self.cfgfile = os.path.join(self.configdir,
+ "%s-yum.conf" % self.cachekey)
+ if self.config.has_option("yum", "metadata_expire"):
+ cache_expire = self.config.getint("yum", "metadata_expire")
+ else:
+ cache_expire = 21600
+
+ self.pkgs_cache = Cache(expiration=cache_expire)
+ self.deps_cache = Cache(expiration=cache_expire)
+ self.vpkgs_cache = Cache(expiration=cache_expire)
+ self.group_cache = Cache(expiration=cache_expire)
+ self.pkgset_cache = Cache(expiration=cache_expire)
+
+ if has_pulp:
+ _setup_pulp(self.config)
+
+ @property
+ def yumbase(self):
+ """ if we try to access a Yum SQLitePackageSack object in a
+ different thread from the one it was created in, we get a
+ nasty error. but I can't find a way to detect when a new
+ thread is started (which happens for every new client
+ connection, I think), so this property creates a new YumBase
+ object if the old YumBase object was created in a different
+ thread than the current one. (I definitely don't want to
+ create a new YumBase object every time it's used, because that
+ involves writing a temp file, at least for now.) """
+ if not self.use_yum:
+ self._yb = None
+ self._yb_thread = None
+ elif (self._yb is None or
+ self._yb_thread != threading.current_thread().ident):
+ self._yb = yum.YumBase()
+ self._yb_thread = threading.current_thread().ident
+
+ if not os.path.exists(self.cfgfile):
+ # todo: detect yum version. Supposedly very new
+ # versions of yum have better support for
+ # reconfiguring on the fly using the RepoStorage API
+ yumconf = self.get_config(raw=True)
+ yumconf.add_section("main")
+
+ mainopts = dict(cachedir=self.cachefile,
+ keepcache="0",
+ sslverify="0",
+ reposdir="/dev/null")
+ try:
+ for opt in self.config.options("yum"):
+ if opt != "use_yum_libraries":
+ mainopts[opt] = self.config.get("yum", opt)
+ except ConfigParser.NoSectionError:
+ pass
+
+ for opt, val in list(mainopts.items()):
+ yumconf.set("main", opt, val)
+
+ yumconf.write(open(self.cfgfile, 'w'))
+
+ # it'd be nice if we could change this to be more verbose
+ # if -v was given, but Collection objects don't get setup.
+ # It'd also be nice if we could tell yum to log to syslog,
+ # but so would a unicorn.
+ self._yb.preconf.debuglevel = 1
+ self._yb.preconf.fn = self.cfgfile
+ return self._yb
+
+ def get_config(self, raw=False):
+ config = ConfigParser.SafeConfigParser()
+ for source in self.sources:
+ # get_urls() loads url_map as a side-effect
+ source.get_urls()
+ for url_map in source.url_map:
+ if url_map['arch'] in self.metadata.groups:
+ reponame = source.get_repo_name(url_map)
+ config.add_section(reponame)
+ config.set(reponame, "name", reponame)
+ config.set(reponame, "baseurl", url_map['url'])
+ config.set(reponame, "enabled", "1")
+ if len(source.gpgkeys):
+ config.set(reponame, "gpgcheck", "1")
+ config.set(reponame, "gpgkey",
+ " ".join(source.gpgkeys))
+ else:
+ config.set(reponame, "gpgcheck", "0")
+
+ if len(source.blacklist):
+ config.set(reponame, "exclude",
+ " ".join(source.blacklist))
+ if len(source.whitelist):
+ config.set(reponame, "includepkgs",
+ " ".join(source.whitelist))
+
+ if raw:
+ return config
+ else:
+ # configparser only writes to file, so we have to use a
+ # StringIO object to get the data out as a string
+ buf = StringIO()
+ config.write(buf)
+ return "# This config was generated automatically by the Bcfg2 " \
+ "Packages plugin\n\n" + buf.getvalue()
+
+ def build_extra_structures(self, independent):
+ """ build list of gpg keys to be added to the specification by
+ validate_structures() """
+ needkeys = set()
+ for source in self.sources:
+ for key in source.gpgkeys:
+ needkeys.add(key)
+
+ if len(needkeys):
+ keypkg = lxml.etree.Element('BoundPackage', name="gpg-pubkey",
+ type=self.ptype, origin='Packages')
+
+ for key in needkeys:
+ # figure out the path of the key on the client
+ try:
+ keydir = self.config.get("global", "gpg_keypath")
+ except (ConfigParser.NoOptionError,
+ ConfigParser.NoSectionError):
+ keydir = "/etc/pki/rpm-gpg"
+ remotekey = os.path.join(keydir, os.path.basename(key))
+ localkey = os.path.join(self.keypath, os.path.basename(key))
+ kdata = open(localkey).read()
+
+ # copy the key to the client
+ keypath = lxml.etree.Element("BoundPath", name=remotekey,
+ encoding='ascii',
+ owner='root', group='root',
+ type='file', perms='0644',
+ important='true')
+ keypath.text = kdata
+
+ # hook to add version/release info if possible
+ self._add_gpg_instances(keypkg, kdata, localkey, remotekey)
+ independent.append(keypath)
+ independent.append(keypkg)
+
+ # see if there are any pulp sources to handle
+ has_pulp_sources = False
+ for source in self.sources:
+ if source.pulp_id:
+ has_pulp_sources = True
+ break
+
+ if has_pulp_sources:
+ consumerapi = ConsumerAPI()
+ consumer = self._get_pulp_consumer(consumerapi=consumerapi)
+ if consumer is None:
+ consumer = consumerapi.create(self.metadata.hostname,
+ self.metadata.hostname)
+ lxml.etree.SubElement(independent, "BoundAction",
+ name="pulp-update", timing="pre",
+ when="always", status="check",
+ command="pulp-consumer consumer update")
+
+ for source in self.sources:
+ # each pulp source can only have one arch, so we don't
+ # have to check the arch in url_map
+ if (source.pulp_id and
+ source.pulp_id not in consumer['repoids']):
+ consumerapi.bind(self.metadata.hostname, source.pulp_id)
+
+ crt = lxml.etree.SubElement(independent, "BoundPath",
+ name="/etc/pki/consumer/cert.pem",
+ type="file", owner="root",
+ group="root", perms="0644")
+ crt.text = consumerapi.certificate(self.metadata.hostname)
+
+ def _get_pulp_consumer(self, consumerapi=None):
+ if consumerapi is None:
+ consumerapi = ConsumerAPI()
+ consumer = None
+ try:
+ consumer = consumerapi.consumer(self.metadata.hostname)
+ except server.ServerRequestError:
+ # consumer does not exist
+ pass
+ except socket.error:
+ err = sys.exc_info()[1]
+ logger.error("Could not contact Pulp server: %s" % err)
+ except:
+ err = sys.exc_info()[1]
+ logger.error("Unknown error querying Pulp server: %s" % err)
+ return consumer
+
+ def _add_gpg_instances(self, keyentry, keydata, localkey, remotekey):
+ """ add gpg keys to the specification to ensure they get
+ installed """
+ if self.use_yum:
+ try:
+ kinfo = yum.misc.getgpgkeyinfo(keydata)
+ version = yum.misc.keyIdToRPMVer(kinfo['keyid'])
+ release = yum.misc.keyIdToRPMVer(kinfo['timestamp'])
+
+ lxml.etree.SubElement(keyentry, 'Instance',
+ version=version,
+ release=release,
+ simplefile=remotekey)
+ except ValueError:
+ err = sys.exc_info()[1]
+ self.logger.error("Could not read GPG key %s: %s" %
+ (localkey, err))
+
+ def is_package(self, package):
+ if not self.use_yum:
+ return Collection.is_package(self, package)
+
+ if isinstance(package, tuple):
+ if package[1] is None and package[2] == (None, None, None):
+ package = package[0]
+ else:
+ return None
+
+ try:
+ return self.pkgs_cache[package]
+ except KeyError:
+ pass
+
+ self.pkgs_cache[package] = bool(self.get_package_object(package,
+ silent=True))
+ return self.pkgs_cache[package]
+
+ def is_virtual_package(self, package):
+ if self.use_yum:
+ try:
+ return bool(self.vpkgs_cache[package])
+ except KeyError:
+ return bool(self.get_provides(package, silent=True))
+ else:
+ return Collection.is_virtual_package(self, package)
+
+ def get_package_object(self, package, silent=False):
+ """ package objects cannot be cached since they are sqlite
+ objects, so they can't be reused between threads. """
+ try:
+ matches = self.yumbase.pkgSack.returnNewestByName(name=package)
+ except yum.Errors.PackageSackError:
+ if not silent:
+ self.logger.warning("Packages: Package '%s' not found" %
+ self.get_package_name(package))
+ matches = []
+ except yum.Errors.RepoError:
+ err = sys.exc_info()[1]
+ self.logger.error("Packages: Temporary failure loading metadata "
+ "for '%s': %s" %
+ (self.get_package_name(package), err))
+ matches = []
+
+ pkgs = self._filter_arch(matches)
+ if pkgs:
+ return pkgs[0]
+ else:
+ return None
+
+ def get_deps(self, package):
+ if not self.use_yum:
+ return Collection.get_deps(self, package)
+
+ try:
+ return self.deps_cache[package]
+ except KeyError:
+ pass
+
+ pkg = self.get_package_object(package)
+ deps = []
+ if pkg:
+ deps = set(pkg.requires)
+ # filter out things the package itself provides
+ deps.difference_update([dep for dep in deps
+ if pkg.checkPrco('provides', dep)])
+ else:
+ self.logger.error("Packages: No package available: %s" %
+ self.get_package_name(package))
+ self.deps_cache[package] = deps
+ return self.deps_cache[package]
+
+ def get_provides(self, required, all=False, silent=False):
+ if not self.use_yum:
+ return Collection.get_provides(self, package)
+
+ if not isinstance(required, tuple):
+ required = (required, None, (None, None, None))
+
+ try:
+ return self.vpkgs_cache[required]
+ except KeyError:
+ pass
+
+ try:
+ prov = \
+ self.yumbase.whatProvides(*required).returnNewestByNameArch()
+ except yum.Errors.NoMoreMirrorsRepoError:
+ err = sys.exc_info()[1]
+ self.logger.error("Packages: Temporary failure loading metadata "
+ "for '%s': %s" %
+ (self.get_package_name(required),
+ err))
+ self.vpkgs_cache[required] = None
+ return []
+
+ if prov and not all:
+ prov = self._filter_provides(required, prov)
+ elif not prov and not silent:
+ self.logger.error("Packages: No package provides %s" %
+ self.get_package_name(required))
+ self.vpkgs_cache[required] = prov
+ return self.vpkgs_cache[required]
+
+ def get_group(self, group):
+ if not self.use_yum:
+ self.logger.warning("Package groups are not supported by Bcfg2's "
+ "internal Yum dependency generator")
+ return []
+
+ if group.startswith("@"):
+ group = group[1:]
+
+ try:
+ return self.group_cache[group]
+ except KeyError:
+ pass
+
+ try:
+ if self.yumbase.comps.has_group(group):
+ pkgs = self.yumbase.comps.return_group(group).packages
+ else:
+ self.logger.warning("Packages: '%s' is not a valid group" %
+ group)
+ pkgs = []
+ except yum.Errors.GroupsError:
+ err = sys.exc_info()[1]
+ self.logger.warning("Packages: %s" % err)
+ pkgs = []
+
+ self.group_cache[group] = pkgs
+ return self.group_cache[group]
+
+ def _filter_provides(self, package, providers):
+ providers = [pkg for pkg in self._filter_arch(providers)]
+ if len(providers) > 1:
+ # go through each provider and make sure it's the newest
+ # package of its name available. If we have multiple
+ # providers, avoid installing old packages.
+ #
+ # For instance: on Fedora 14,
+ # perl-Sub-WrapPackages-2.0-2.fc14 erroneously provided
+ # perl(lib), which should not have been provided;
+ # perl(lib) is provided by the "perl" package. The bogus
+ # provide was removed in perl-Sub-WrapPackages-2.0-4.fc14,
+ # but if we just queried to resolve the "perl(lib)"
+ # dependency, we'd get both packages. By performing this
+ # check, we learn that there's a newer
+ # perl-Sub-WrapPackages available, so it can't be the best
+ # provider of perl(lib).
+ rv = []
+ for pkg in providers:
+ if self.get_package_object(pkg.name) == pkg:
+ rv.append(pkg)
+ else:
+ rv = providers
+ return [p.name for p in rv]
+
+ def _filter_arch(self, packages):
+ groups = set(list(self.get_relevant_groups()) + ["noarch"])
+ matching = [pkg for pkg in packages if pkg.arch in groups]
+ if matching:
+ return matching
+ else:
+ # no packages match architecture; we'll assume that the
+ # user knows what s/he is doing and this is a multiarch
+ # box.
+ return packages
+
+ def get_package_name(self, package):
+ """ get the name of a package or virtual package from the
+ internal representation used by this Collection class """
+ if self.use_yum and isinstance(package, tuple):
+ return yum.misc.prco_tuple_to_string(package)
+ else:
+ return str(package)
+
+ def complete(self, packagelist):
+ if not self.use_yum:
+ return Collection.complete(self, packagelist)
+
+ cachekey = cPickle.dumps(sorted(packagelist))
+ try:
+ return self.pkgset_cache[cachekey]
+ except KeyError: pass
+
+ packages = set()
+ pkgs = set(packagelist)
+ requires = set()
+ satisfied = set()
+ unknown = set()
+ final_pass = False
+
+ while requires or pkgs:
+ # infinite loop protection
+ start_reqs = len(requires)
+
+ while pkgs:
+ package = pkgs.pop()
+ if package in packages:
+ continue
+
+ if not self.is_package(package):
+ # try this package out as a requirement
+ requires.add((package, None, (None, None, None)))
+ continue
+
+ packages.add(package)
+ reqs = set(self.get_deps(package)).difference(satisfied)
+ if reqs:
+ requires.update(reqs)
+
+ reqs_satisfied = set()
+ for req in requires:
+ if req in satisfied:
+ reqs_satisfied.add(req)
+ continue
+
+ if req[1] is None and self.is_package(req[0]):
+ if req[0] not in packages:
+ pkgs.add(req[0])
+ reqs_satisfied.add(req)
+ continue
+
+ self.logger.debug("Packages: Handling requirement '%s'" %
+ self.get_package_name(req))
+ providers = list(set(self.get_provides(req)))
+ if len(providers) > 1:
+ # hopefully one of the providing packages is already
+ # included
+ best = [p for p in providers if p in packages]
+ if best:
+ providers = best
+ else:
+ # pick a provider whose name matches the requirement
+ best = [p for p in providers if p == req[0]]
+ if len(best) == 1:
+ providers = best
+ elif not final_pass:
+ # found no "best" package, so defer
+ providers = None
+ # else: found no "best" package, but it's the
+ # final pass, so include them all
+
+ if providers:
+ self.logger.debug("Packages: Requirement '%s' satisfied "
+ "by %s" %
+ (self.get_package_name(req),
+ ",".join([self.get_package_name(p)
+ for p in providers])))
+ newpkgs = set(providers).difference(packages)
+ if newpkgs:
+ for package in newpkgs:
+ if self.is_package(package):
+ pkgs.add(package)
+ else:
+ unknown.add(package)
+ reqs_satisfied.add(req)
+ elif providers is not None:
+ # nothing provided this requirement at all
+ unknown.add(req)
+ reqs_satisfied.add(req)
+ # else, defer
+ requires.difference_update(reqs_satisfied)
+
+ # infinite loop protection
+ if len(requires) == start_reqs and len(pkgs) == 0:
+ final_pass = True
+
+ if final_pass and requires:
+ unknown.update(requires)
+ requires = set()
+
+ self.filter_unknown(unknown)
+ unknown = [self.get_package_name(p) for p in unknown]
+
+ self.pkgset_cache[cachekey] = (packages, unknown)
+
+ return packages, unknown
+
+ def setup_data(self, force_update=False):
+ if not self.use_yum:
+ return Collection.setup_data(self, force_update)
+
+ for cfile in glob.glob(os.path.join(self.configdir, "*-yum.conf")):
+ os.unlink(cfile)
+ self._yb = None
+
+ self.pkgs_cache.clear()
+ self.deps_cache.clear()
+ self.vpkgs_cache.clear()
+ self.group_cache.clear()
+ self.pkgset_cache.clear()
+
+ if force_update:
+ for mdtype in ["Headers", "Packages", "Sqlite", "Metadata",
+ "ExpireCache"]:
+ # for reasons that are entirely obvious, all of the
+ # yum API clean* methods return a tuple of 0 (zero,
+ # always zero) and a list containing a single message
+ # about how many files were deleted. so useful.
+ # thanks, yum.
+ self.logger.info("Packages: %s" %
+ getattr(self.yumbase,
+ "clean%s" % mdtype)()[1][0])
+
+
+class YumSource(Source):
+ basegroups = ['yum', 'redhat', 'centos', 'fedora']
+ ptype = 'yum'
+
+ def __init__(self, basepath, xsource, config):
+ Source.__init__(self, basepath, xsource, config)
+ self.pulp_id = None
+ if has_pulp and xsource.get("pulp_id"):
+ self.pulp_id = xsource.get("pulp_id")
+
+ _setup_pulp(self.config)
+ repoapi = RepositoryAPI()
+ try:
+ self.repo = repoapi.repository(self.pulp_id)
+ self.gpgkeys = ["%s/%s" % (PULPCONFIG.cds['keyurl'], key)
+ for key in repoapi.listkeys(self.pulp_id)]
+ except server.ServerRequestError:
+ err = sys.exc_info()[1]
+ if err[0] == 401:
+ msg = "Error authenticating to Pulp: %s" % err[1]
+ elif err[0] == 404:
+ msg = "Pulp repo id %s not found: %s" % (self.pulp_id,
+ err[1])
+ else:
+ msg = "Error %d fetching pulp repo %s: %s" % (err[0],
+ self.pulp_id,
+ err[1])
+ logger.error(msg)
+ raise Bcfg2.Server.Plugin.PluginInitError
+ except socket.error:
+ err = sys.exc_info()[1]
+ logger.error("Could not contact Pulp server: %s" % err)
+ raise Bcfg2.Server.Plugin.PluginInitError
+ except:
+ err = sys.exc_info()[1]
+ logger.error("Unknown error querying Pulp server: %s" % err)
+ raise Bcfg2.Server.Plugin.PluginInitError
+ self.rawurl = "%s/%s" % (PULPCONFIG.cds['baseurl'],
+ self.repo['relative_path'])
+ self.arches = [self.repo['arch']]
+
+ if not self.rawurl:
+ self.baseurl = self.url + "%(version)s/%(component)s/%(arch)s/"
+ else:
+ self.baseurl = self.rawurl
+ self.packages = dict()
+ self.deps = dict([('global', dict())])
+ self.provides = dict([('global', dict())])
+ self.filemap = dict([(x, dict())
+ for x in ['global'] + self.arches])
+ self.needed_paths = set()
+ self.file_to_arch = dict()
+
+ self.use_yum = has_yum
+ try:
+ self.use_yum &= config.getboolean("yum", "use_yum_libraries")
+ except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
+ self.use_yum = False
+
+ def save_state(self):
+ if not self.use_yum:
+ cache = file(self.cachefile, 'wb')
+ cPickle.dump((self.packages, self.deps, self.provides,
+ self.filemap, self.url_map), cache, 2)
+ cache.close()
+
+
+ def load_state(self):
+ if not self.use_yum:
+ data = file(self.cachefile)
+ (self.packages, self.deps, self.provides,
+ self.filemap, self.url_map) = cPickle.load(data)
+
+ def get_urls(self):
+ surls = list()
+ self.url_map = []
+ for arch in self.arches:
+ if self.url:
+ usettings = [{'version':self.version, 'component':comp,
+ 'arch':arch}
+ for comp in self.components]
+ else: # rawurl given
+ usettings = [{'version':self.version, 'component':None,
+ 'arch':arch}]
+
+ for setting in usettings:
+ setting['url'] = self.baseurl % setting
+ self.url_map.append(copy.deepcopy(setting))
+ surls.append((arch, [setting['url'] for setting in usettings]))
+ urls = []
+ for (sarch, surl_list) in surls:
+ for surl in surl_list:
+ urls.extend(self._get_urls_from_repodata(surl, sarch))
+ return urls
+ urls = property(get_urls)
+
+ def _get_urls_from_repodata(self, url, arch):
+ if self.use_yum:
+ return [url]
+
+ rmdurl = '%srepodata/repomd.xml' % url
+ try:
+ repomd = fetch_url(rmdurl)
+ xdata = lxml.etree.XML(repomd)
+ except ValueError:
+ logger.error("Packages: Bad url string %s" % rmdurl)
+ return []
+ except HTTPError:
+ err = sys.exc_info()[1]
+ logger.error("Packages: Failed to fetch url %s. code=%s" %
+ (rmdurl, err.code))
+ return []
+ except lxml.etree.XMLSyntaxError:
+ err = sys.exc_info()[1]
+ logger.error("Packages: Failed to process metadata at %s: %s" %
+ (rmdurl, err))
+ return []
+
+ urls = []
+ for elt in xdata.findall(RPO + 'data'):
+ if elt.get('type') in ['filelists', 'primary']:
+ floc = elt.find(RPO + 'location')
+ fullurl = url + floc.get('href')
+ urls.append(fullurl)
+ self.file_to_arch[self.escape_url(fullurl)] = arch
+ return urls
+
+ def read_files(self):
+ # we have to read primary.xml first, and filelists.xml afterwards;
+ primaries = list()
+ filelists = list()
+ for fname in self.files:
+ if fname.endswith('primary.xml.gz'):
+ primaries.append(fname)
+ elif fname.endswith('filelists.xml.gz'):
+ filelists.append(fname)
+
+ for fname in primaries:
+ farch = self.file_to_arch[fname]
+ fdata = lxml.etree.parse(fname).getroot()
+ self.parse_primary(fdata, farch)
+ for fname in filelists:
+ farch = self.file_to_arch[fname]
+ fdata = lxml.etree.parse(fname).getroot()
+ self.parse_filelist(fdata, farch)
+
+ # merge data
+ sdata = list(self.packages.values())
+ try:
+ self.packages['global'] = copy.deepcopy(sdata.pop())
+ except IndexError:
+ logger.error("No packages in repo")
+ while sdata:
+ self.packages['global'] = \
+ self.packages['global'].intersection(sdata.pop())
+
+ for key in self.packages:
+ if key == 'global':
+ continue
+ self.packages[key] = \
+ self.packages[key].difference(self.packages['global'])
+ self.save_state()
+
+ def parse_filelist(self, data, arch):
+ if arch not in self.filemap:
+ self.filemap[arch] = dict()
+ for pkg in data.findall(FL + 'package'):
+ for fentry in pkg.findall(FL + 'file'):
+ if fentry.text in self.needed_paths:
+ if fentry.text in self.filemap[arch]:
+ self.filemap[arch][fentry.text].add(pkg.get('name'))
+ else:
+ self.filemap[arch][fentry.text] = \
+ set([pkg.get('name')])
+
+ def parse_primary(self, data, arch):
+ if arch not in self.packages:
+ self.packages[arch] = set()
+ if arch not in self.deps:
+ self.deps[arch] = dict()
+ if arch not in self.provides:
+ self.provides[arch] = dict()
+ for pkg in data.getchildren():
+ if not pkg.tag.endswith('package'):
+ continue
+ pkgname = pkg.find(XP + 'name').text
+ self.packages[arch].add(pkgname)
+
+ pdata = pkg.find(XP + 'format')
+ pre = pdata.find(RP + 'requires')
+ self.deps[arch][pkgname] = set()
+ for entry in pre.getchildren():
+ self.deps[arch][pkgname].add(entry.get('name'))
+ if entry.get('name').startswith('/'):
+ self.needed_paths.add(entry.get('name'))
+ pro = pdata.find(RP + 'provides')
+ if pro != None:
+ for entry in pro.getchildren():
+ prov = entry.get('name')
+ if prov not in self.provides[arch]:
+ self.provides[arch][prov] = list()
+ self.provides[arch][prov].append(pkgname)
+
+ def is_package(self, metadata, item):
+ arch = [a for a in self.arches if a in metadata.groups]
+ if not arch:
+ return False
+ return ((item in self.packages['global'] or
+ item in self.packages[arch[0]]) and
+ item not in self.blacklist and
+ (len(self.whitelist) == 0 or item in self.whitelist))
+
+ def get_vpkgs(self, metadata):
+ if self.use_yum:
+ return dict()
+
+ rv = Source.get_vpkgs(self, metadata)
+ for arch, fmdata in list(self.filemap.items()):
+ if arch not in metadata.groups and arch != 'global':
+ continue
+ for filename, pkgs in list(fmdata.items()):
+ rv[filename] = pkgs
+ return rv
+
+ def filter_unknown(self, unknown):
+ if self.use_yum:
+ filtered = set()
+ for unk in unknown:
+ try:
+ if unk.startswith('rpmlib'):
+ filtered.update(unk)
+ except AttributeError:
+ try:
+ if unk[0].startswith('rpmlib'):
+ filtered.update(unk)
+ except (IndexError, AttributeError):
+ pass
+ else:
+ filtered = set([u for u in unknown if u.startswith('rpmlib')])
+ unknown.difference_update(filtered)
+
+ def setup_data(self, force_update=False):
+ if not self.use_yum:
+ Source.setup_data(self, force_update=force_update)
+
+ def get_repo_name(self, url_map):
+ if self.pulp_id:
+ return self.pulp_id
+ else:
+ return Source.get_repo_name(self, url_map)
diff --git a/src/lib/Server/Plugins/Packages/__init__.py b/src/lib/Server/Plugins/Packages/__init__.py
new file mode 100644
index 000000000..1132543f1
--- /dev/null
+++ b/src/lib/Server/Plugins/Packages/__init__.py
@@ -0,0 +1,226 @@
+import os
+import sys
+import time
+import copy
+import glob
+import shutil
+import logging
+import lxml.etree
+import Bcfg2.Logger
+import Bcfg2.Server.Plugin
+from Bcfg2.Bcfg2Py3k import ConfigParser, urlopen
+from Bcfg2.Server.Plugins.Packages import Collection
+from Bcfg2.Server.Plugins.Packages.PackagesSources import PackagesSources
+from Bcfg2.Server.Plugins.Packages.PackagesConfig import PackagesConfig
+
+logger = logging.getLogger('Packages')
+
+class Packages(Bcfg2.Server.Plugin.Plugin,
+ Bcfg2.Server.Plugin.StructureValidator,
+ Bcfg2.Server.Plugin.Generator,
+ Bcfg2.Server.Plugin.Connector):
+ name = 'Packages'
+ conflicts = ['Pkgmgr']
+ experimental = True
+ __rmi__ = Bcfg2.Server.Plugin.Plugin.__rmi__ + ['Refresh', 'Reload']
+
+ def __init__(self, core, datastore):
+ Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
+ Bcfg2.Server.Plugin.StructureValidator.__init__(self)
+ Bcfg2.Server.Plugin.Generator.__init__(self)
+ Bcfg2.Server.Plugin.Connector.__init__(self)
+ Bcfg2.Server.Plugin.Probing.__init__(self)
+
+ self.sentinels = set()
+ self.cachepath = os.path.join(self.data, 'cache')
+ self.keypath = os.path.join(self.data, 'keys')
+ if not os.path.exists(self.keypath):
+ # create key directory if needed
+ os.makedirs(self.keypath)
+
+ # set up config files
+ self.config = PackagesConfig(os.path.join(self.data, "packages.conf"),
+ core.fam, self)
+ self.sources = PackagesSources(os.path.join(self.data, "sources.xml"),
+ self.cachepath, core.fam, self,
+ self.config)
+
+ @property
+ def disableResolver(self):
+ try:
+ return self.config.get("global", "resolver").lower() == "disabled"
+ except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
+ return False
+
+ @property
+ def disableMetaData(self):
+ try:
+ return self.config.get("global", "metadata").lower() == "disabled"
+ except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
+ return False
+
+ def create_config(self, entry, metadata):
+ """ create yum/apt config for the specified host """
+ attrib = {'encoding': 'ascii',
+ 'owner': 'root',
+ 'group': 'root',
+ 'type': 'file',
+ 'perms': '0644'}
+
+ collection = Collection.factory(metadata, self.sources, self.data)
+ entry.text = collection.get_config()
+ for (key, value) in list(attrib.items()):
+ entry.attrib.__setitem__(key, value)
+
+ def HandleEntry(self, entry, metadata):
+ if entry.tag == 'Package':
+ collection = Collection.factory(metadata, self.sources, self.data)
+ entry.set('version', 'auto')
+ entry.set('type', collection.ptype)
+ elif entry.tag == 'Path':
+ if (self.config.has_section("global") and
+ ((self.config.has_option("global", "yum_config") and
+ entry.get("name") == self.config.get("global",
+ "yum_config")) or
+ (self.config.has_option("global", "apt_config") and
+ entry.get("name") == self.config.get("global",
+ "apt_config")))):
+ self.create_config(entry, metadata)
+
+ def HandlesEntry(self, entry, metadata):
+ if entry.tag == 'Package':
+ collection = Collection.factory(metadata, self.sources, self.data)
+ if collection.magic_groups_match():
+ return True
+ elif entry.tag == 'Path':
+ # managed entries for yum/apt configs
+ if ((self.config.has_option("global", "yum_config") and
+ entry.get("name") == self.config.get("global",
+ "yum_config")) or
+ (self.config.has_option("global", "apt_config") and
+ entry.get("name") == self.config.get("global", "apt_config"))):
+ return True
+ return False
+
+ def validate_structures(self, metadata, structures):
+ '''Ensure client configurations include all needed prerequisites
+
+ Arguments:
+ metadata - client metadata instance
+ structures - a list of structure-stage entry combinations
+ '''
+ collection = Collection.factory(metadata, self.sources, self.data)
+ indep = lxml.etree.Element('Independent')
+ self._build_packages(metadata, indep, structures,
+ collection=collection)
+ collection.build_extra_structures(indep)
+ structures.append(indep)
+
+ def _build_packages(self, metadata, independent, structures,
+ collection=None):
+ """ build list of packages that need to be included in the
+ specification by validate_structures() """
+ if self.disableResolver:
+ # Config requests no resolver
+ return
+
+ if collection is None:
+ collection = Collection.factory(metadata, self.sources, self.data)
+ initial = set()
+ to_remove = []
+ for struct in structures:
+ for pkg in struct.xpath('//Package | //BoundPackage'):
+ if pkg.get("name"):
+ initial.add(pkg.get("name"))
+ elif pkg.get("group"):
+ initial.update(collection.get_group(pkg.get("group")))
+ to_remove.append(pkg)
+ else:
+ self.logger.error("Malformed Package: %s" %
+ lxml.etree.tostring(pkg))
+ for el in to_remove:
+ el.getparent().remove(el)
+
+ packages, unknown = collection.complete(initial)
+ if unknown:
+ self.logger.info("Got %d unknown entries" % len(unknown))
+ self.logger.info(list(unknown))
+ newpkgs = list(packages.difference(initial))
+ self.logger.debug("%d initial, %d complete, %d new" %
+ (len(initial), len(packages), len(newpkgs)))
+ newpkgs.sort()
+ for pkg in newpkgs:
+ lxml.etree.SubElement(independent, 'BoundPackage', name=pkg,
+ version='auto', type=collection.ptype,
+ origin='Packages')
+
+ def Refresh(self):
+ '''Packages.Refresh() => True|False\nReload configuration
+ specification and download sources\n'''
+ self._load_config(force_update=True)
+ return True
+
+ def Reload(self):
+ '''Packages.Refresh() => True|False\nReload configuration
+ specification and sources\n'''
+ self._load_config()
+ return True
+
+ def _load_config(self, force_update=False):
+ '''
+ Load the configuration data and setup sources
+
+ Keyword args:
+ force_update Force downloading repo data
+ '''
+ self._load_sources(force_update)
+ self._load_gpg_keys(force_update)
+
+ def _load_sources(self, force_update):
+ """ Load sources from the config """
+ self.sentinels = set()
+ cachefiles = []
+
+ for collection in list(Collection.collections.values()):
+ cachefiles.extend(collection.cachefiles)
+ if not self.disableMetaData:
+ collection.setup_data(force_update)
+ self.sentinels.update(collection.basegroups)
+
+ Collection.clear_cache()
+
+ for cfile in glob.glob(os.path.join(self.cachepath, "cache-*")):
+ if cfile not in cachefiles:
+ try:
+ if os.path.isdir(cfile):
+ shutil.rmtree(cfile)
+ else:
+ os.unlink(cfile)
+ except OSError:
+ err = sys.exc_info()[1]
+ logger.error("Packages: Could not remove cache file %s: %s"
+ % (cfile, err))
+
+ def _load_gpg_keys(self, force_update):
+ """ Load gpg keys from the config """
+ keyfiles = []
+ keys = []
+ for source in self.sources:
+ for key in source.gpgkeys:
+ localfile = os.path.join(self.keypath, os.path.basename(key))
+ if localfile not in keyfiles:
+ keyfiles.append(localfile)
+ if ((force_update and key not in keys) or
+ not os.path.exists(localfile)):
+ self.logger.info("Downloading and parsing %s" % key)
+ response = urlopen(key)
+ open(localfile, 'w').write(response.read())
+ keys.append(key)
+
+ for kfile in glob.glob(os.path.join(self.keypath, "*")):
+ if kfile not in keyfiles:
+ os.unlink(kfile)
+
+ def get_additional_data(self, metadata):
+ collection = Collection.factory(metadata, self.sources, self.data)
+ return dict(sources=collection.get_additional_data())
diff --git a/src/lib/Server/Plugins/Pkgmgr.py b/src/lib/Server/Plugins/Pkgmgr.py
index b96e7ea7d..bf674d0d0 100644
--- a/src/lib/Server/Plugins/Pkgmgr.py
+++ b/src/lib/Server/Plugins/Pkgmgr.py
@@ -4,6 +4,7 @@ __revision__ = '$Revision$'
import logging
import re
import Bcfg2.Server.Plugin
+import lxml
logger = logging.getLogger('Bcfg2.Plugins.Pkgmgr')
@@ -45,9 +46,9 @@ class PNode(Bcfg2.Server.Plugin.INode):
'encap': re.compile('^(?P<name>[\w-]+)-(?P<version>[\w\d\.+-]+).encap.*$')}
ignore = ['Package']
- def Match(self, metadata, data):
+ def Match(self, metadata, data, entry=lxml.etree.Element("None")):
"""Return a dictionary of package mappings."""
- if self.predicate(metadata):
+ if self.predicate(metadata, entry):
for key in self.contents:
try:
data[key].update(self.contents[key])
diff --git a/src/lib/Server/Plugins/SGenshi.py b/src/lib/Server/Plugins/SGenshi.py
index efd981956..3745834a8 100644
--- a/src/lib/Server/Plugins/SGenshi.py
+++ b/src/lib/Server/Plugins/SGenshi.py
@@ -5,6 +5,7 @@ import genshi.input
import genshi.template
import lxml.etree
import logging
+import copy
import sys
import Bcfg2.Server.Plugin
@@ -13,28 +14,45 @@ import Bcfg2.Server.Plugins.TGenshi
logger = logging.getLogger('Bcfg2.Plugins.SGenshi')
-class SGenshiTemplateFile(Bcfg2.Server.Plugins.TGenshi.TemplateFile):
+class SGenshiTemplateFile(Bcfg2.Server.Plugins.TGenshi.TemplateFile,
+ Bcfg2.Server.Plugin.StructFile):
+ def __init__(self, name, specific, encoding):
+ Bcfg2.Server.Plugins.TGenshi.TemplateFile.__init__(self, name,
+ specific, encoding)
+ Bcfg2.Server.Plugin.StructFile.__init__(self, name)
def get_xml_value(self, metadata):
if not hasattr(self, 'template'):
logger.error("No parsed template information for %s" % (self.name))
raise Bcfg2.Server.Plugin.PluginExecutionError
try:
- stream = self.template.generate(metadata=metadata,).filter( \
+ stream = self.template.generate(metadata=metadata).filter( \
Bcfg2.Server.Plugins.TGenshi.removecomment)
- data = stream.render('xml', strip_whitespace=False)
- return lxml.etree.XML(data)
+ data = lxml.etree.XML(stream.render('xml', strip_whitespace=False))
+ bundlename = self.name.split('/')[-1][:-4]
+ bundle = lxml.etree.Element('Bundle', name=bundlename)
+ for item in self.Match(metadata, data):
+ bundle.append(copy.deepcopy(item))
+ return bundle
except LookupError:
lerror = sys.exc_info()[1]
logger.error('Genshi lookup error: %s' % lerror)
except genshi.template.TemplateError:
terror = sys.exc_info()[1]
logger.error('Genshi template error: %s' % terror)
+ raise
except genshi.input.ParseError:
perror = sys.exc_info()[1]
logger.error('Genshi parse error: %s' % perror)
raise
+ def Match(self, metadata, xdata):
+ """Return matching fragments of parsed template."""
+ rv = []
+ for child in xdata.getchildren():
+ rv.extend(self._match(child, metadata))
+ logger.debug("File %s got %d match(es)" % (self.name, len(rv)))
+ return rv
class SGenshiEntrySet(Bcfg2.Server.Plugin.EntrySet):
diff --git a/src/lib/Server/Reports/settings.py b/src/lib/Server/Reports/settings.py
index 869f09f1f..128658ff1 100644
--- a/src/lib/Server/Reports/settings.py
+++ b/src/lib/Server/Reports/settings.py
@@ -1,10 +1,14 @@
import django
+import sys
# Compatibility import
from Bcfg2.Bcfg2Py3k import ConfigParser
# Django settings for bcfg2 reports project.
c = ConfigParser.ConfigParser()
-c.read(['/etc/bcfg2.conf', '/etc/bcfg2-web.conf'])
+if len(c.read(['/etc/bcfg2.conf', '/etc/bcfg2-web.conf'])) == 0:
+ print("Please check that bcfg2.conf or bcfg2-web.conf exists "
+ "and is readable by your web server.")
+ sys.exit(1)
try:
dset = c.get('statistics', 'web_debug')
@@ -23,8 +27,12 @@ ADMINS = (
)
MANAGERS = ADMINS
-
-db_engine = c.get('statistics', 'database_engine')
+try:
+ db_engine = c.get('statistics', 'database_engine')
+except ConfigParser.NoSectionError:
+ e = sys.exc_info()[1]
+ print("Failed to determine database engine: %s" % e)
+ sys.exit(1)
db_name = ''
if c.has_option('statistics', 'database_name'):
db_name = c.get('statistics', 'database_name')
diff --git a/src/sbin/bcfg2-admin b/src/sbin/bcfg2-admin
index 0056a97aa..09117a3f4 100755
--- a/src/sbin/bcfg2-admin
+++ b/src/sbin/bcfg2-admin
@@ -64,7 +64,7 @@ def main():
if setup['args'][0] in get_modes():
modname = setup['args'][0].capitalize()
- if len(setup['args']) == 1 or setup['args'][1] == 'help':
+ if len(setup['args']) > 1 and setup['args'][1] == 'help':
print(mode_import(modname).__longhelp__)
raise SystemExit(0)
try: