oslo.rootwrap-5.13.0/0000775000175100017510000000000013224676617014472 5ustar zuulzuul00000000000000oslo.rootwrap-5.13.0/test-requirements.txt0000666000175100017510000000132613224676415020733 0ustar zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0 fixtures>=3.0.0 # Apache-2.0/BSD testrepository>=0.0.18 # Apache-2.0/BSD testtools>=2.2.0 # MIT # this is required for the docs build jobs sphinx>=1.6.2 # BSD openstackdocstheme>=1.17.0 # Apache-2.0 oslotest>=1.10.0 # Apache-2.0 # mocking framework mock>=2.0.0 # BSD # rootwrap daemon's client should be verified to run in eventlet eventlet!=0.18.3,!=0.20.1,<0.21.0,>=0.18.2 # MIT reno>=2.5.0 # Apache-2.0 # Bandit security code scanner bandit>=1.1.0 # Apache-2.0 oslo.rootwrap-5.13.0/CONTRIBUTING.rst0000666000175100017510000000104013224676415017124 0ustar zuulzuul00000000000000If you would like to contribute to the development of OpenStack, you must follow the steps in this page: http://docs.openstack.org/infra/manual/developers.html Once those steps have been completed, changes to OpenStack should be submitted for review via the Gerrit tool, following the workflow documented at: http://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/oslo.rootwrap oslo.rootwrap-5.13.0/benchmark/0000775000175100017510000000000013224676617016424 5ustar zuulzuul00000000000000oslo.rootwrap-5.13.0/benchmark/benchmark.py0000666000175100017510000000631213224676415020730 0ustar zuulzuul00000000000000# Copyright (c) 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import print_function import atexit import math import os import subprocess import sys import timeit from oslo_rootwrap import client config_path = "rootwrap.conf" num_iterations = 100 def run_plain(cmd): obj = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = obj.communicate() return obj.returncode, out, err def run_sudo(cmd): return run_plain(["sudo"] + cmd) def run_rootwrap(cmd): return run_plain([ "sudo", sys.executable, "-c", "from oslo_rootwrap import cmd; cmd.main()", config_path] + cmd) run_daemon = client.Client([ "sudo", sys.executable, "-c", "from oslo_rootwrap import cmd; cmd.daemon()", config_path]).execute def run_one(runner, cmd): def __inner(): code, out, err = runner(cmd) assert err == "", "Stderr not empty:\n" + err assert code == 0, "Command failed" return __inner runners = [ ("{0}", run_plain), ("sudo {0}", run_sudo), ("sudo rootwrap conf {0}", run_rootwrap), ("daemon.run('{0}')", run_daemon), ] def get_time_string(sec): if sec > 0.9: return "{0:7.3f}s ".format(sec) elif sec > 0.0009: return "{0:7.3f}ms".format(sec * 1000.0) else: return "{0:7.3f}us".format(sec * 1000000.0) def run_bench(cmd, runners): strcmd = ' '.join(cmd) max_name_len = max(len(name) for name, _ in runners) + len(strcmd) - 3 print("Running '{0}':".format(strcmd)) print("{0:^{1}} :".format("method", max_name_len), "".join(map("{0:^10}".format, ["min", "avg", "max", "dev"]))) for name, runner in runners: results = timeit.repeat(run_one(runner, cmd), repeat=num_iterations, number=1) avg = sum(results) / num_iterations min_ = min(results) max_ = max(results) dev = math.sqrt(sum((r - avg) ** 2 for r in results) / num_iterations) print("{0:>{1}} :".format(name.format(strcmd), max_name_len), " ".join(map(get_time_string, [min_, avg, max_, dev]))) def main(): os.chdir(os.path.dirname(__file__)) code, _, _ = run_sudo(["-vn"]) if code: print("We need you to authorize with sudo to run this benchmark") run_sudo(["-v"]) run_bench(["ip", "a"], runners) run_sudo(["ip", "netns", "add", "bench_ns"]) atexit.register(run_sudo, ["ip", "netns", "delete", "bench_ns"]) run_bench('ip netns exec bench_ns ip a'.split(), runners[1:]) if __name__ == "__main__": main() oslo.rootwrap-5.13.0/benchmark/filters.d/0000775000175100017510000000000013224676617020316 5ustar zuulzuul00000000000000oslo.rootwrap-5.13.0/benchmark/filters.d/ip.filters0000666000175100017510000000010713224676415022314 0ustar zuulzuul00000000000000[Filters] ip: IpFilter, ip, root ip_exec: IpNetnsExecFilter, ip, root oslo.rootwrap-5.13.0/benchmark/rootwrap.conf0000666000175100017510000000013213224676415021142 0ustar zuulzuul00000000000000[DEFAULT] filters_path=filters.d exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin use_syslog=False oslo.rootwrap-5.13.0/ChangeLog0000664000175100017510000001766013224676616016255 0ustar zuulzuul00000000000000CHANGES ======= 5.13.0 ------ * Updated from global requirements * Follow the new PTI for document build * Add bandit to pep8 job 5.12.1 ------ 5.12.0 ------ * Remove -U from pip install * Avoid tox\_install.sh for constraints support * Ignore syslog settings if /dev/log is not present * Remove setting of version/release from releasenotes * Updated from global requirements 5.11.0 ------ * Protect rootwrap daemon socket against multiple threads * Cleanup test-requirements * Updated from global requirements 5.10.0 ------ * Updated from global requirements * Fix test\_daemon\_no\_cleanup\_for\_uninitialized\_server * Update reno for stable/pike * Updated from global requirements 5.9.0 ----- * Update URLs in documents according to document migration 5.8.0 ----- * rearrange existing documentation to fit the new standard layout * Switch from oslosphinx to openstackdocstheme * Updated from global requirements * Remove pbr warnerrors in favor of sphinx check * Updated from global requirements * Updated from global requirements * Updated from global requirements 5.7.0 ----- * Trivial: Remove testscenarios from test-requirements.txt 5.6.0 ----- 5.5.0 ----- * Updated from global requirements * [Fix gate]Update test requirement * Allow rootwrap-daemon to timeout and exit * Don't open subdirectories rootwrap filter directories * Avoid importing Linux specific modules on Windows * Always check cmd which does not exist * Updated from global requirements * Remove support for py34 * pbr.version.VersionInfo needs package name (oslo.xyz and not oslo\_xyz) * [daemon] Close inherited filedescriptors after forking * Update reno for stable/ocata 5.4.0 ----- * Relax default strict option under python3.x for configparser * Add Constraints support * Show team and repo badges on README 5.3.0 ----- * Updated from global requirements * Updated from global requirements * [TrivialFix] Replace 'assertFalse(a in b)' with 'assertNotIn(a, b)' * Fix running unknown commands in daemon mode * Enable release notes translation 5.2.0 ----- * Update homepage with developer documentation page * Enhance \_program() and \_program\_path() 5.1.0 ----- * Fix parameters of assertEqual are misplaced * Remove discover from test-requirements 5.0.0 ----- * always allow privsep-helper as a command * Add Python 3.5 classifier and venv * Add reno for release notes management 4.4.0 ----- * Updated from global requirements 4.3.0 ----- * Updated from global requirements 4.2.0 ----- * Updated from global requirements 4.1.0 ----- * Updated from global requirements 4.0.0 ----- * Updated from global requirements * Remove unused use-syslog-rfc-format option 3.2.0 ----- * Updated from global requirements * Updated from global requirements * Removes MANIFEST.in as it is not needed explicitely by PBR 3.1.0 ----- * Drop python 2.6 support 3.0.1 ----- * Updated from global requirements * Remove python 2.6 classifier * Remove python 2.6 and cleanup tox.ini * Python 3: encode or decode i/o data of Popen.communicate() 2.5.0 ----- * Fix Python 3 support for eventlet monkey-patching * Fix Python 3 issues in tests 2.4.0 ----- * No need for Oslo Incubator Sync * move usage instructions into main docs * docs - Set pbr 'warnerrors' option for doc build * Add shields.io version/downloads links/badges into README.rst * add pbr-generated release history to the documentation * Fix some spelling typo in manual * Updated from global requirements * Python 3: Don't use BaseException.message attribute 2.3.0 ----- * Handle renamed executables with KillFilter * Updated from global requirements 2.2.0 ----- * Updated from global requirements * Updated from global requirements * Updated from global requirements * Updated from global requirements * Updated from global requirements * Updated from global requirements * Remove test-requirements-py3.txt * Add tox target to find missing requirements 2.1.0 ----- * daemon: avoid raising UnboundLocalError to callers * Updated from global requirements * Updated from global requirements * Log that rootwrap was spawned after check 2.0.0 ----- * Remove oslo namespace package 1.8.0 ----- * Remove run\_cross\_tests.sh * Updated from global requirements * Remove mentions of root "tests" package from test\_funcional\_\* * Generate a oslo-rootwrap console script 1.7.0 ----- * Uncap library requirements for liberty * Speed up non-daemon rootwrap command line invocation * Correct RST syntax errors in README.rst * Update to latest hacking * Avoid calling sudo just to change users * Updated from global requirements 1.6.0 ----- * Remove env changing support in daemon mode * Updated from global requirements * Updated from global requirements * Add bug link to README 1.5.0 ----- * Add cross-testing script * Updated from global requirements * Move files out of the namespace package * Activate pep8 check that \_ is imported * Workflow documentation is now in infra-manual 1.4.0 ----- * Updated from global requirements * Updated from global requirements * Correct filters examples in README.rst * Updated from global requirements * Fix exit of subprocess in case it was terminated by signal * Updated from global requirements * Support building wheels (PEP-427) * Updated from global requirements 1.3.0 ----- * Clean up title on main doc page * Initial cut of documentation for oslo.rootwrap * Add a short doc to README on how to use daemon mode * Fix the bug tracker URL in CONTRIBUTING.rst * warn against sorting requirements * Updated from global requirements 1.3.0.0a2 --------- * Add daemon mode to benchmark * Add an option to run rootwrap as a daemon * Refactor common parts from cmd to wrapper * Add basic benchmark * Remove sys.path modification * Move test requirement coverage into tox.ini * Enabled hacking check H305 * Continue on failure of leaf filters of chaining filters 1.3.0.0a1 --------- * Let tests pass on distros where "ip" is in /bin * Bump hacking to 0.9.x series * Avoid usage of mutables as default args * Simplify the flow in RegExpFilter * Add ChainingRegExpFilter for prefix utilities * Fix Python 3 support, add functional test * Fix import grouping * Remove unused variable 'command' * Run py33 test env before others 1.2.0 ----- * Avoid matching ip -s netns exec in IpFilter * Don't use system pip things in tox * Add Python 3 trove classifiers * To honor RFC5424 add use\_syslog\_rfc\_format config option * Trivial changes from oslo-incubator 1.1.0 ----- * Discontinue usage of oslo-rootwrap * Add missing oslo/\_\_init\_\_.py * Fix spelling errors in comments 1.0.0 ----- * Use oslo-rootwrap in config directory names * Ship with etc/oslo.rootwrap instead of etc/oslo * Add a complete README.rst * Add .gitreview for oslo.rootwrap * Add standalone project packaging support files * Make Rootwrap python3-compatible * Make tests not depend on openstack.common stuff * Move files to new locations for oslo-config * Skip hidden files while traversion rootwrap filters * Fix os.getlogin() problem with no tty * Send rootwrap exit error message to stderr * rootwrap: improve Python 3 compatibility * Replace using tests.utils part2 * Fixes files with wrong bitmode * Remove DnsmasqFilter and DeprecatedDnsmasqFilter * Handle empty arglists in Filters * Handle empty PATH environment variable * Add IpFilter, IPNetnsExecFilter and EnvFilter * Handle relative path arguments in Killfilter * Enable hacking H404 test * Enable hacking H402 test * Update KillFilter to stop at '\0' for readlink() function * Stylistic improvements from quantum-rootwrap * Use print\_function \_\_future\_\_ import * Revert common logging use in rootwrap * Improve Python 3.x compatibility * Replaces standard logging with common logging * Move bin/ scripts to entrypoints * Add PathFilter to rootwrap * update OpenStack, LLC to OpenStack Foundation * Fix Copyright Headers - Rename LLC to Foundation * Replaced direct usage of stubout with BaseTestCase * Use testtools as test base class * Remove unused etc/openstack-common.conf.test * Fix pep8 E125 errors * Move rootwrap code to openstack.common oslo.rootwrap-5.13.0/requirements.txt0000666000175100017510000000034513224676415017756 0ustar zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. six>=1.10.0 # MIT oslo.rootwrap-5.13.0/LICENSE0000666000175100017510000002665213224676415015510 0ustar zuulzuul00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. --- License for python-keystoneclient versions prior to 2.1 --- All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of this project nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. oslo.rootwrap-5.13.0/AUTHORS0000664000175100017510000000436713224676616015553 0ustar zuulzuul00000000000000Akihiro Motoki Andreas Jaeger Angus Lees Bogdan Dobrelya Cedric Brandily ChangBo Guo(gcb) Christian Berendt Claudiu Belu Corey Bryant Cyril Roelandt Davanum Srinivas Davanum Srinivas Dina Belova Dirk Mueller Doug Hellmann Doug Hellmann Flavio Percoco IWAMOTO Toshihiro Ihar Hrachyshka Jakub Libosvar James Carey Jeremy Stanley Julien Danjou Kirill Bespalov Mark McClain Mark McLoughlin Maru Newby Monty Taylor OpenStack Release Bot Pádraig Brady Ralf Haferkamp Roman Podolyaka Ronald Bradford Sean Dague Sean McGinnis Sergey Kraynev Sergey Lukjanov Stanislav Kudriashev Stephen Ma Steve Martinelli Swapnil Kulkarni (coolsvap) Thierry Carrez Thomas Bechtold Tomoki Sekiyama Tony Breeds Tony Xu Victor Stinner Vu Cong Tuan XianChaobo Yatin Kumbhare Yufang Zhang Yuriy Taraday Zhao Lei Zhongyue Luo Zuul fumihiko kakuma howardlee melissaml ricolin sonu.kumar yan.haifeng oslo.rootwrap-5.13.0/PKG-INFO0000664000175100017510000000372513224676617015576 0ustar zuulzuul00000000000000Metadata-Version: 1.1 Name: oslo.rootwrap Version: 5.13.0 Summary: Oslo Rootwrap Home-page: https://docs.openstack.org/oslo.rootwrap/latest/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description-Content-Type: UNKNOWN Description: ======================== Team and repository tags ======================== .. image:: http://governance.openstack.org/badges/oslo.rootwrap.svg :target: http://governance.openstack.org/reference/tags/index.html .. Change things from this point on =============================================== oslo.rootwrap -- Escalated Permission Control =============================================== .. image:: https://img.shields.io/pypi/v/oslo.rootwrap.svg :target: https://pypi.python.org/pypi/oslo.rootwrap/ :alt: Latest Version .. image:: https://img.shields.io/pypi/dm/oslo.rootwrap.svg :target: https://pypi.python.org/pypi/oslo.rootwrap/ :alt: Downloads oslo.rootwrap allows fine-grained filtering of shell commands to run as `root` from OpenStack services. * License: Apache License, Version 2.0 * Documentation: https://docs.openstack.org/oslo.rootwrap/latest/ * Source: https://git.openstack.org/cgit/openstack/oslo.rootwrap * Bugs: https://bugs.launchpad.net/oslo.rootwrap Platform: UNKNOWN Classifier: Development Status :: 4 - Beta Classifier: Environment :: OpenStack Classifier: Intended Audience :: Developers Classifier: Intended Audience :: Information Technology Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.5 oslo.rootwrap-5.13.0/etc/0000775000175100017510000000000013224676617015245 5ustar zuulzuul00000000000000oslo.rootwrap-5.13.0/etc/rootwrap.conf.sample0000666000175100017510000000176513224676415021260 0ustar zuulzuul00000000000000# Configuration for rootwrap # This file should be owned by (and only-writeable by) the root user [DEFAULT] # List of directories to load filter definitions from (separated by ','). # These directories MUST all be only writeable by root ! filters_path=/etc/oslo-rootwrap/filters.d,/usr/share/oslo-rootwrap # List of directories to search executables in, in case filters do not # explicitly specify a full path (separated by ',') # If not specified, defaults to system PATH environment variable. # These directories MUST all be only writeable by root ! exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin # Enable logging to syslog # Default value is False use_syslog=False # Which syslog facility to use. # Valid values include auth, authpriv, syslog, user0, user1... # Default value is 'syslog' syslog_log_facility=syslog # Which messages to log. # INFO means log all usage # ERROR means only log unsuccessful attempts syslog_log_level=ERROR # Rootwrap daemon exits after this seconds of inactivity daemon_timeout=600 oslo.rootwrap-5.13.0/tox.ini0000666000175100017510000000341213224676427016006 0ustar zuulzuul00000000000000[tox] minversion = 2.0 envlist = py35,py27,pep8 [testenv] install_command = pip install {opts} {packages} deps = -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} -r{toxinidir}/test-requirements.txt -r{toxinidir}/requirements.txt # Functional tests with Eventlet involve monkeypatching, so force them to be # run in a separate process whitelist_externals = env commands = python setup.py testr --slowest --testr-args='(?!tests.test_functional_eventlet)tests {posargs}' env TEST_EVENTLET=1 python setup.py testr --slowest --testr-args='tests.test_functional_eventlet' [testenv:pep8] deps = -r{toxinidir}/test-requirements.txt commands = flake8 # Run security linter bandit -r oslo_rootwrap tests -n5 --skip B404 [testenv:cover] deps = {[testenv]deps} coverage commands = python setup.py testr --coverage [testenv:venv] commands = {posargs} [testenv:docs] deps = -r{toxinidir}/doc/requirements.txt commands = sphinx-build -b html doc/source doc/build/html [flake8] show-source = True exclude = .tox,dist,doc,*.egg,build [testenv:benchmark] commands = python benchmark/benchmark.py [testenv:pip-missing-reqs] # do not install test-requirements as that will pollute the virtualenv for # determining missing packages # this also means that pip-missing-reqs must be installed separately, outside # of the requirements.txt files deps = pip_missing_reqs commands = pip-missing-reqs -d --ignore-module=oslo_rootwrap* --ignore-module=pkg_resources --ignore-file=oslo_rootwrap/test.py --ignore-file=oslo_rootwrap/tests/* oslo_rootwrap [testenv:releasenotes] deps = -r{toxinidir}/doc/requirements.txt commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html oslo.rootwrap-5.13.0/oslo.rootwrap.egg-info/0000775000175100017510000000000013224676617021014 5ustar zuulzuul00000000000000oslo.rootwrap-5.13.0/oslo.rootwrap.egg-info/requires.txt0000664000175100017510000000001413224676616023406 0ustar zuulzuul00000000000000six>=1.10.0 oslo.rootwrap-5.13.0/oslo.rootwrap.egg-info/entry_points.txt0000664000175100017510000000015213224676616024307 0ustar zuulzuul00000000000000[console_scripts] oslo-rootwrap = oslo_rootwrap.cmd:main oslo-rootwrap-daemon = oslo_rootwrap.cmd:daemon oslo.rootwrap-5.13.0/oslo.rootwrap.egg-info/SOURCES.txt0000664000175100017510000000257113224676617022705 0ustar zuulzuul00000000000000.testr.conf AUTHORS CONTRIBUTING.rst ChangeLog LICENSE README.rst requirements.txt setup.cfg setup.py test-requirements.txt tox.ini benchmark/benchmark.py benchmark/rootwrap.conf benchmark/filters.d/ip.filters doc/requirements.txt doc/source/conf.py doc/source/index.rst doc/source/contributor/index.rst doc/source/install/index.rst doc/source/user/history.rst doc/source/user/index.rst doc/source/user/usage.rst etc/rootwrap.conf.sample oslo.rootwrap.egg-info/PKG-INFO oslo.rootwrap.egg-info/SOURCES.txt oslo.rootwrap.egg-info/dependency_links.txt oslo.rootwrap.egg-info/entry_points.txt oslo.rootwrap.egg-info/not-zip-safe oslo.rootwrap.egg-info/pbr.json oslo.rootwrap.egg-info/requires.txt oslo.rootwrap.egg-info/top_level.txt oslo_rootwrap/__init__.py oslo_rootwrap/client.py oslo_rootwrap/cmd.py oslo_rootwrap/daemon.py oslo_rootwrap/filters.py oslo_rootwrap/jsonrpc.py oslo_rootwrap/version.py oslo_rootwrap/wrapper.py oslo_rootwrap/tests/__init__.py oslo_rootwrap/tests/run_daemon.py oslo_rootwrap/tests/test_functional.py oslo_rootwrap/tests/test_functional_eventlet.py oslo_rootwrap/tests/test_rootwrap.py releasenotes/notes/add_reno-3b4ae0789e9c45b4.yaml releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/ocata.rst releasenotes/source/pike.rst releasenotes/source/unreleased.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholderoslo.rootwrap-5.13.0/oslo.rootwrap.egg-info/pbr.json0000664000175100017510000000005613224676616022472 0ustar zuulzuul00000000000000{"git_version": "68a6492", "is_release": true}oslo.rootwrap-5.13.0/oslo.rootwrap.egg-info/dependency_links.txt0000664000175100017510000000000113224676616025061 0ustar zuulzuul00000000000000 oslo.rootwrap-5.13.0/oslo.rootwrap.egg-info/not-zip-safe0000664000175100017510000000000113224676560023237 0ustar zuulzuul00000000000000 oslo.rootwrap-5.13.0/oslo.rootwrap.egg-info/PKG-INFO0000664000175100017510000000372513224676616022117 0ustar zuulzuul00000000000000Metadata-Version: 1.1 Name: oslo.rootwrap Version: 5.13.0 Summary: Oslo Rootwrap Home-page: https://docs.openstack.org/oslo.rootwrap/latest/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description-Content-Type: UNKNOWN Description: ======================== Team and repository tags ======================== .. image:: http://governance.openstack.org/badges/oslo.rootwrap.svg :target: http://governance.openstack.org/reference/tags/index.html .. Change things from this point on =============================================== oslo.rootwrap -- Escalated Permission Control =============================================== .. image:: https://img.shields.io/pypi/v/oslo.rootwrap.svg :target: https://pypi.python.org/pypi/oslo.rootwrap/ :alt: Latest Version .. image:: https://img.shields.io/pypi/dm/oslo.rootwrap.svg :target: https://pypi.python.org/pypi/oslo.rootwrap/ :alt: Downloads oslo.rootwrap allows fine-grained filtering of shell commands to run as `root` from OpenStack services. * License: Apache License, Version 2.0 * Documentation: https://docs.openstack.org/oslo.rootwrap/latest/ * Source: https://git.openstack.org/cgit/openstack/oslo.rootwrap * Bugs: https://bugs.launchpad.net/oslo.rootwrap Platform: UNKNOWN Classifier: Development Status :: 4 - Beta Classifier: Environment :: OpenStack Classifier: Intended Audience :: Developers Classifier: Intended Audience :: Information Technology Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.5 oslo.rootwrap-5.13.0/oslo.rootwrap.egg-info/top_level.txt0000664000175100017510000000001613224676616023542 0ustar zuulzuul00000000000000oslo_rootwrap oslo.rootwrap-5.13.0/releasenotes/0000775000175100017510000000000013224676617017163 5ustar zuulzuul00000000000000oslo.rootwrap-5.13.0/releasenotes/source/0000775000175100017510000000000013224676617020463 5ustar zuulzuul00000000000000oslo.rootwrap-5.13.0/releasenotes/source/conf.py0000666000175100017510000002161413224676415021764 0ustar zuulzuul00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'openstackdocstheme', 'reno.sphinxext', ] # openstackdocstheme options repository_name = 'openstack/oslo.rootwrap' bug_project = 'oslo.rootwrap' bug_tag = '' # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'oslo.rootwrap Release Notes' copyright = u'2016, oslo.rootwrap Developers' # Release notes do not need a version in the title, they span # multiple versions. # The full version, including alpha/beta/rc tags. release = '' # The short X.Y version. version = '' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%Y-%m-%d %H:%M' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'oslo.rootwrapReleaseNotesDoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'oslo.rootwrapReleaseNotes.tex', u'oslo.rootwrap Release Notes Documentation', u'oslo.rootwrap Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'oslo.rootwrapReleaseNotes', u'oslo.rootwrap Release Notes Documentation', [u'oslo.rootwrap Developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'oslo.rootwrapReleaseNotes', u'oslo.rootwrap Release Notes Documentation', u'oslo.rootwrap Developers', 'oslo.rootwrapReleaseNotes', 'Allows fine-grained filtering of shell commands to run as root from' ' OpenStack services.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] oslo.rootwrap-5.13.0/releasenotes/source/_static/0000775000175100017510000000000013224676617022111 5ustar zuulzuul00000000000000oslo.rootwrap-5.13.0/releasenotes/source/_static/.placeholder0000666000175100017510000000000013224676415024360 0ustar zuulzuul00000000000000oslo.rootwrap-5.13.0/releasenotes/source/unreleased.rst0000666000175100017510000000014413224676415023341 0ustar zuulzuul00000000000000========================== Unreleased Release Notes ========================== .. release-notes:: oslo.rootwrap-5.13.0/releasenotes/source/index.rst0000666000175100017510000000023413224676415022321 0ustar zuulzuul00000000000000============================= oslo.rootwrap Release Notes ============================= .. toctree:: :maxdepth: 1 unreleased pike ocata oslo.rootwrap-5.13.0/releasenotes/source/ocata.rst0000666000175100017510000000023013224676415022275 0ustar zuulzuul00000000000000=================================== Ocata Series Release Notes =================================== .. release-notes:: :branch: origin/stable/ocata oslo.rootwrap-5.13.0/releasenotes/source/_templates/0000775000175100017510000000000013224676617022620 5ustar zuulzuul00000000000000oslo.rootwrap-5.13.0/releasenotes/source/_templates/.placeholder0000666000175100017510000000000013224676415025067 0ustar zuulzuul00000000000000oslo.rootwrap-5.13.0/releasenotes/source/pike.rst0000666000175100017510000000021713224676415022143 0ustar zuulzuul00000000000000=================================== Pike Series Release Notes =================================== .. release-notes:: :branch: stable/pike oslo.rootwrap-5.13.0/releasenotes/notes/0000775000175100017510000000000013224676617020313 5ustar zuulzuul00000000000000oslo.rootwrap-5.13.0/releasenotes/notes/add_reno-3b4ae0789e9c45b4.yaml0000666000175100017510000000007113224676415025172 0ustar zuulzuul00000000000000--- other: - Switch to reno for managing release notes.oslo.rootwrap-5.13.0/oslo_rootwrap/0000775000175100017510000000000013224676617017403 5ustar zuulzuul00000000000000oslo.rootwrap-5.13.0/oslo_rootwrap/jsonrpc.py0000666000175100017510000001421413224676415021433 0ustar zuulzuul00000000000000# Copyright (c) 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import errno import json from multiprocessing import connection from multiprocessing import managers import socket import struct import weakref from oslo_rootwrap import wrapper class RpcJSONEncoder(json.JSONEncoder): def default(self, o): # We need to pass bytes unchanged as they are expected in arguments for # and are result of Popen.communicate() if isinstance(o, bytes): return {"__bytes__": base64.b64encode(o).decode('ascii')} # Handle two exception types relevant to command execution if isinstance(o, wrapper.NoFilterMatched): return {"__exception__": "NoFilterMatched"} elif isinstance(o, wrapper.FilterMatchNotExecutable): return {"__exception__": "FilterMatchNotExecutable", "match": o.match} # Other errors will fail to pass JSON encoding and will be visible on # client side else: return super(RpcJSONEncoder, self).default(o) # Parse whatever RpcJSONEncoder supplied us with def rpc_object_hook(obj): if "__exception__" in obj: type_name = obj.pop("__exception__") if type_name not in ("NoFilterMatched", "FilterMatchNotExecutable"): return obj exc_type = getattr(wrapper, type_name) return exc_type(**obj) elif "__bytes__" in obj: return base64.b64decode(obj["__bytes__"].encode('ascii')) else: return obj class JsonListener(object): def __init__(self, address, backlog=1): self.address = address self._socket = socket.socket(socket.AF_UNIX) try: self._socket.setblocking(True) self._socket.bind(address) self._socket.listen(backlog) except socket.error: self._socket.close() raise self.closed = False self._accepted = weakref.WeakSet() def accept(self): while True: try: s, _ = self._socket.accept() except socket.error as e: if e.errno in (errno.EINVAL, errno.EBADF): raise EOFError elif e.errno != errno.EINTR: raise else: break s.setblocking(True) conn = JsonConnection(s) self._accepted.add(conn) return conn def close(self): if not self.closed: self._socket.shutdown(socket.SHUT_RDWR) self._socket.close() self.closed = True def get_accepted(self): return self._accepted if hasattr(managers.Server, 'accepter'): # In Python 3 accepter() thread has infinite loop. We break it with # EOFError, so we should silence this error here. def silent_accepter(self): try: old_accepter(self) except EOFError: pass old_accepter = managers.Server.accepter managers.Server.accepter = silent_accepter class JsonConnection(object): def __init__(self, sock): sock.setblocking(True) self._socket = sock def send_bytes(self, s): self._socket.sendall(struct.pack('!Q', len(s))) self._socket.sendall(s) def recv_bytes(self, maxsize=None): l = struct.unpack('!Q', self.recvall(8))[0] if maxsize is not None and l > maxsize: raise RuntimeError("Too big message received") s = self.recvall(l) return s def send(self, obj): s = self.dumps(obj) self.send_bytes(s) def recv(self): s = self.recv_bytes() return self.loads(s) def close(self): self._socket.close() def half_close(self): self._socket.shutdown(socket.SHUT_RD) # We have to use slow version of recvall with eventlet because of a bug in # GreenSocket.recv_into: # https://bitbucket.org/eventlet/eventlet/pull-request/41 def _recvall_slow(self, size): remaining = size res = [] while remaining: piece = self._socket.recv(remaining) if not piece: raise EOFError res.append(piece) remaining -= len(piece) return b''.join(res) def recvall(self, size): buf = bytearray(size) mem = memoryview(buf) got = 0 while got < size: piece_size = self._socket.recv_into(mem[got:]) if not piece_size: raise EOFError got += piece_size # bytearray is mostly compatible with bytes and we could avoid copying # data here, but hmac doesn't like it in Python 3.3 (not in 2.7 or 3.4) return bytes(buf) @staticmethod def dumps(obj): return json.dumps(obj, cls=RpcJSONEncoder).encode('utf-8') @staticmethod def loads(s): res = json.loads(s.decode('utf-8'), object_hook=rpc_object_hook) try: kind = res[0] except (IndexError, TypeError): pass else: # In Python 2 json returns unicode while multiprocessing needs str if (kind in ("#TRACEBACK", "#UNSERIALIZABLE") and not isinstance(res[1], str)): res[1] = res[1].encode('utf-8', 'replace') return res class JsonClient(JsonConnection): def __init__(self, address, authkey=None): sock = socket.socket(socket.AF_UNIX) sock.setblocking(True) sock.connect(address) super(JsonClient, self).__init__(sock) if authkey is not None: connection.answer_challenge(self, authkey) connection.deliver_challenge(self, authkey) oslo.rootwrap-5.13.0/oslo_rootwrap/tests/0000775000175100017510000000000013224676617020545 5ustar zuulzuul00000000000000oslo.rootwrap-5.13.0/oslo_rootwrap/tests/test_rootwrap.py0000666000175100017510000006541513224676415024044 0ustar zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import logging.handlers import os import tempfile import uuid import fixtures import mock from six import moves import testtools from oslo_rootwrap import cmd from oslo_rootwrap import daemon from oslo_rootwrap import filters from oslo_rootwrap import subprocess from oslo_rootwrap import wrapper class RootwrapLoaderTestCase(testtools.TestCase): def test_privsep_in_loader(self): privsep = ["privsep-helper", "--context", "foo"] filterlist = wrapper.load_filters([]) # mock out get_exec because with mock.patch.object(filters.CommandFilter, 'get_exec') as ge: ge.return_value = "/fake/privsep-helper" filtermatch = wrapper.match_filter(filterlist, privsep) self.assertIsNotNone(filtermatch) self.assertEqual(["/fake/privsep-helper", "--context", "foo"], filtermatch.get_command(privsep)) def test_strict_switched_off_in_configparser(self): temp_dir = self.useFixture(fixtures.TempDir()).path os.mkdir(os.path.join(temp_dir, 'nested')) temp_file = os.path.join(temp_dir, 'test.conf') f = open(temp_file, 'w') f.write("""[Filters] privsep: PathFilter, privsep-helper, root privsep: PathFilter, privsep-helper, root """) f.close() filterlist = wrapper.load_filters([temp_dir]) self.assertIsNotNone(filterlist) class RootwrapTestCase(testtools.TestCase): if os.path.exists('/sbin/ip'): _ip = '/sbin/ip' else: _ip = '/bin/ip' def setUp(self): super(RootwrapTestCase, self).setUp() self.filters = [ filters.RegExpFilter("/bin/ls", "root", 'ls', '/[a-z]+'), filters.CommandFilter("/usr/bin/foo_bar_not_exist", "root"), filters.RegExpFilter("/bin/cat", "root", 'cat', '/[a-z]+'), filters.CommandFilter("/nonexistent/cat", "root"), filters.CommandFilter("/bin/cat", "root") # Keep this one last ] def test_CommandFilter(self): f = filters.CommandFilter("sleep", 'root', '10') self.assertFalse(f.match(["sleep2"])) # verify that any arguments are accepted self.assertTrue(f.match(["sleep"])) self.assertTrue(f.match(["sleep", "anything"])) self.assertTrue(f.match(["sleep", "10"])) f = filters.CommandFilter("sleep", 'root') self.assertTrue(f.match(["sleep", "10"])) def test_empty_commandfilter(self): f = filters.CommandFilter("sleep", "root") self.assertFalse(f.match([])) self.assertFalse(f.match(None)) def test_empty_regexpfilter(self): f = filters.RegExpFilter("sleep", "root", "sleep") self.assertFalse(f.match([])) self.assertFalse(f.match(None)) def test_empty_invalid_regexpfilter(self): f = filters.RegExpFilter("sleep", "root") self.assertFalse(f.match(["anything"])) self.assertFalse(f.match([])) def test_RegExpFilter_match(self): usercmd = ["ls", "/root"] filtermatch = wrapper.match_filter(self.filters, usercmd) self.assertFalse(filtermatch is None) self.assertEqual(["/bin/ls", "/root"], filtermatch.get_command(usercmd)) def test_RegExpFilter_reject(self): usercmd = ["ls", "root"] self.assertRaises(wrapper.NoFilterMatched, wrapper.match_filter, self.filters, usercmd) def test_missing_command(self): valid_but_missing = ["foo_bar_not_exist"] invalid = ["foo_bar_not_exist_and_not_matched"] self.assertRaises(wrapper.FilterMatchNotExecutable, wrapper.match_filter, self.filters, valid_but_missing) self.assertRaises(wrapper.NoFilterMatched, wrapper.match_filter, self.filters, invalid) def _test_EnvFilter_as_DnsMasq(self, config_file_arg): usercmd = ['env', config_file_arg + '=A', 'NETWORK_ID=foobar', 'dnsmasq', 'foo'] f = filters.EnvFilter("env", "root", config_file_arg + '=A', 'NETWORK_ID=', "/usr/bin/dnsmasq") self.assertTrue(f.match(usercmd)) self.assertEqual(['/usr/bin/dnsmasq', 'foo'], f.get_command(usercmd)) env = f.get_environment(usercmd) self.assertEqual('A', env.get(config_file_arg)) self.assertEqual('foobar', env.get('NETWORK_ID')) def test_EnvFilter(self): envset = ['A=/some/thing', 'B=somethingelse'] envcmd = ['env'] + envset realcmd = ['sleep', '10'] usercmd = envcmd + realcmd f = filters.EnvFilter("env", "root", "A=", "B=ignored", "sleep") # accept with leading env self.assertTrue(f.match(envcmd + ["sleep"])) # accept without leading env self.assertTrue(f.match(envset + ["sleep"])) # any other command does not match self.assertFalse(f.match(envcmd + ["sleep2"])) self.assertFalse(f.match(envset + ["sleep2"])) # accept any trailing arguments self.assertTrue(f.match(usercmd)) # require given environment variables to match self.assertFalse(f.match([envcmd, 'C=ELSE'])) self.assertFalse(f.match(['env', 'C=xx'])) self.assertFalse(f.match(['env', 'A=xx'])) # require env command to be given # (otherwise CommandFilters should match self.assertFalse(f.match(realcmd)) # require command to match self.assertFalse(f.match(envcmd)) self.assertFalse(f.match(envcmd[1:])) # ensure that the env command is stripped when executing self.assertEqual(realcmd, f.exec_args(usercmd)) env = f.get_environment(usercmd) # check that environment variables are set self.assertEqual('/some/thing', env.get('A')) self.assertEqual('somethingelse', env.get('B')) self.assertNotIn('sleep', env.keys()) def test_EnvFilter_without_leading_env(self): envset = ['A=/some/thing', 'B=somethingelse'] envcmd = ['env'] + envset realcmd = ['sleep', '10'] f = filters.EnvFilter("sleep", "root", "A=", "B=ignored") # accept without leading env self.assertTrue(f.match(envset + ["sleep"])) self.assertEqual(realcmd, f.get_command(envcmd + realcmd)) self.assertEqual(realcmd, f.get_command(envset + realcmd)) env = f.get_environment(envset + realcmd) # check that environment variables are set self.assertEqual('/some/thing', env.get('A')) self.assertEqual('somethingelse', env.get('B')) self.assertNotIn('sleep', env.keys()) def test_KillFilter(self): if not os.path.exists("/proc/%d" % os.getpid()): self.skipTest("Test requires /proc filesystem (procfs)") p = subprocess.Popen(["cat"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) try: f = filters.KillFilter("root", "/bin/cat", "-9", "-HUP") f2 = filters.KillFilter("root", "/usr/bin/cat", "-9", "-HUP") usercmd = ['kill', '-ALRM', p.pid] # Incorrect signal should fail self.assertFalse(f.match(usercmd) or f2.match(usercmd)) usercmd = ['kill', p.pid] # Providing no signal should fail self.assertFalse(f.match(usercmd) or f2.match(usercmd)) # Providing matching signal should be allowed usercmd = ['kill', '-9', p.pid] self.assertTrue(f.match(usercmd) or f2.match(usercmd)) f = filters.KillFilter("root", "/bin/cat") f2 = filters.KillFilter("root", "/usr/bin/cat") usercmd = ['kill', os.getpid()] # Our own PID does not match /bin/sleep, so it should fail self.assertFalse(f.match(usercmd) or f2.match(usercmd)) usercmd = ['kill', 999999] # Nonexistent PID should fail self.assertFalse(f.match(usercmd) or f2.match(usercmd)) usercmd = ['kill', p.pid] # Providing no signal should work self.assertTrue(f.match(usercmd) or f2.match(usercmd)) # verify that relative paths are matched against $PATH f = filters.KillFilter("root", "cat") # Our own PID does not match so it should fail usercmd = ['kill', os.getpid()] self.assertFalse(f.match(usercmd)) # Filter should find cat in /bin or /usr/bin usercmd = ['kill', p.pid] self.assertTrue(f.match(usercmd)) # Filter shouldn't be able to find binary in $PATH, so fail with fixtures.EnvironmentVariable("PATH", "/foo:/bar"): self.assertFalse(f.match(usercmd)) # ensure that unset $PATH is not causing an exception with fixtures.EnvironmentVariable("PATH"): self.assertFalse(f.match(usercmd)) finally: # Terminate the "cat" process and wait for it to finish p.terminate() p.wait() def test_KillFilter_no_raise(self): """Makes sure ValueError from bug 926412 is gone.""" f = filters.KillFilter("root", "") # Providing anything other than kill should be False usercmd = ['notkill', 999999] self.assertFalse(f.match(usercmd)) # Providing something that is not a pid should be False usercmd = ['kill', 'notapid'] self.assertFalse(f.match(usercmd)) # no arguments should also be fine self.assertFalse(f.match([])) self.assertFalse(f.match(None)) def test_KillFilter_deleted_exe(self): """Makes sure deleted exe's are killed correctly.""" command = "/bin/commandddddd" f = filters.KillFilter("root", command) usercmd = ['kill', 1234] # Providing no signal should work with mock.patch('os.readlink') as readlink: readlink.return_value = command + ' (deleted)' with mock.patch('os.path.isfile') as exists: def fake_exists(path): return path == command exists.side_effect = fake_exists self.assertTrue(f.match(usercmd)) @mock.patch('os.readlink') @mock.patch('os.path.isfile') def test_KillFilter_upgraded_exe(self, mock_isfile, mock_readlink): """Makes sure upgraded exe's are killed correctly.""" f = filters.KillFilter("root", "/bin/commandddddd") command = "/bin/commandddddd" usercmd = ['kill', 1234] def fake_exists(path): return path == command mock_readlink.return_value = command + '\0\05190bfb2 (deleted)' mock_isfile.side_effect = fake_exists self.assertTrue(f.match(usercmd)) @mock.patch('os.readlink') @mock.patch('os.path.isfile') @mock.patch('os.path.exists') @mock.patch('os.access') def test_KillFilter_renamed_exe(self, mock_access, mock_exists, mock_isfile, mock_readlink): """Makes sure renamed exe's are killed correctly.""" command = "/bin/commandddddd" f = filters.KillFilter("root", command) usercmd = ['kill', 1234] def fake_os_func(path, *args): return path == command mock_readlink.return_value = command + ';90bfb2 (deleted)' m = mock.mock_open(read_data=command) with mock.patch("six.moves.builtins.open", m, create=True): mock_isfile.side_effect = fake_os_func mock_exists.side_effect = fake_os_func mock_access.side_effect = fake_os_func self.assertTrue(f.match(usercmd)) def test_ReadFileFilter(self): goodfn = '/good/file.name' f = filters.ReadFileFilter(goodfn) usercmd = ['cat', '/bad/file'] self.assertFalse(f.match(['cat', '/bad/file'])) usercmd = ['cat', goodfn] self.assertEqual(['/bin/cat', goodfn], f.get_command(usercmd)) self.assertTrue(f.match(usercmd)) def test_IpFilter_non_netns(self): f = filters.IpFilter(self._ip, 'root') self.assertTrue(f.match(['ip', 'link', 'list'])) self.assertTrue(f.match(['ip', '-s', 'link', 'list'])) self.assertTrue(f.match(['ip', '-s', '-v', 'netns', 'add'])) self.assertTrue(f.match(['ip', 'link', 'set', 'interface', 'netns', 'somens'])) def test_IpFilter_netns(self): f = filters.IpFilter(self._ip, 'root') self.assertFalse(f.match(['ip', 'netns', 'exec', 'foo'])) self.assertFalse(f.match(['ip', 'netns', 'exec'])) self.assertFalse(f.match(['ip', '-s', 'netns', 'exec'])) self.assertFalse(f.match(['ip', '-l', '42', 'netns', 'exec'])) def _test_IpFilter_netns_helper(self, action): f = filters.IpFilter(self._ip, 'root') self.assertTrue(f.match(['ip', 'link', action])) def test_IpFilter_netns_add(self): self._test_IpFilter_netns_helper('add') def test_IpFilter_netns_delete(self): self._test_IpFilter_netns_helper('delete') def test_IpFilter_netns_list(self): self._test_IpFilter_netns_helper('list') def test_IpNetnsExecFilter_match(self): f = filters.IpNetnsExecFilter(self._ip, 'root') self.assertTrue( f.match(['ip', 'netns', 'exec', 'foo', 'ip', 'link', 'list'])) def test_IpNetnsExecFilter_nomatch(self): f = filters.IpNetnsExecFilter(self._ip, 'root') self.assertFalse(f.match(['ip', 'link', 'list'])) # verify that at least a NS is given self.assertFalse(f.match(['ip', 'netns', 'exec'])) def test_IpNetnsExecFilter_nomatch_nonroot(self): f = filters.IpNetnsExecFilter(self._ip, 'user') self.assertFalse( f.match(['ip', 'netns', 'exec', 'foo', 'ip', 'link', 'list'])) def test_match_filter_recurses_exec_command_filter_matches(self): filter_list = [filters.IpNetnsExecFilter(self._ip, 'root'), filters.IpFilter(self._ip, 'root')] args = ['ip', 'netns', 'exec', 'foo', 'ip', 'link', 'list'] self.assertIsNotNone(wrapper.match_filter(filter_list, args)) def test_match_filter_recurses_exec_command_matches_user(self): filter_list = [filters.IpNetnsExecFilter(self._ip, 'root'), filters.IpFilter(self._ip, 'user')] args = ['ip', 'netns', 'exec', 'foo', 'ip', 'link', 'list'] # Currently ip netns exec requires root, so verify that # no non-root filter is matched, as that would escalate privileges self.assertRaises(wrapper.NoFilterMatched, wrapper.match_filter, filter_list, args) def test_match_filter_recurses_exec_command_filter_does_not_match(self): filter_list = [filters.IpNetnsExecFilter(self._ip, 'root'), filters.IpFilter(self._ip, 'root')] args = ['ip', 'netns', 'exec', 'foo', 'ip', 'netns', 'exec', 'bar', 'ip', 'link', 'list'] self.assertRaises(wrapper.NoFilterMatched, wrapper.match_filter, filter_list, args) def test_ChainingRegExpFilter_match(self): filter_list = [filters.ChainingRegExpFilter('nice', 'root', 'nice', '-?\d+'), filters.CommandFilter('cat', 'root')] args = ['nice', '5', 'cat', '/a'] dirs = ['/bin', '/usr/bin'] self.assertIsNotNone(wrapper.match_filter(filter_list, args, dirs)) def test_ChainingRegExpFilter_not_match(self): filter_list = [filters.ChainingRegExpFilter('nice', 'root', 'nice', '-?\d+'), filters.CommandFilter('cat', 'root')] args_invalid = (['nice', '5', 'ls', '/a'], ['nice', '--5', 'cat', '/a'], ['nice2', '5', 'cat', '/a'], ['nice', 'cat', '/a'], ['nice', '5']) dirs = ['/bin', '/usr/bin'] for args in args_invalid: self.assertRaises(wrapper.NoFilterMatched, wrapper.match_filter, filter_list, args, dirs) def test_ChainingRegExpFilter_multiple(self): filter_list = [filters.ChainingRegExpFilter('ionice', 'root', 'ionice', '-c[0-3]'), filters.ChainingRegExpFilter('ionice', 'root', 'ionice', '-c[0-3]', '-n[0-7]'), filters.CommandFilter('cat', 'root')] # both filters match to ['ionice', '-c2'], but only the second accepts args = ['ionice', '-c2', '-n7', 'cat', '/a'] dirs = ['/bin', '/usr/bin'] self.assertIsNotNone(wrapper.match_filter(filter_list, args, dirs)) def test_ReadFileFilter_empty_args(self): goodfn = '/good/file.name' f = filters.ReadFileFilter(goodfn) self.assertFalse(f.match([])) self.assertFalse(f.match(None)) def test_exec_dirs_search(self): # This test supposes you have /bin/cat or /usr/bin/cat locally f = filters.CommandFilter("cat", "root") usercmd = ['cat', '/f'] self.assertTrue(f.match(usercmd)) self.assertTrue(f.get_command(usercmd, exec_dirs=['/bin', '/usr/bin']) in (['/bin/cat', '/f'], ['/usr/bin/cat', '/f'])) def test_skips(self): # Check that all filters are skipped and that the last matches usercmd = ["cat", "/"] filtermatch = wrapper.match_filter(self.filters, usercmd) self.assertTrue(filtermatch is self.filters[-1]) def test_RootwrapConfig(self): raw = moves.configparser.RawConfigParser() # Empty config should raise configparser.Error self.assertRaises(moves.configparser.Error, wrapper.RootwrapConfig, raw) # Check default values raw.set('DEFAULT', 'filters_path', '/a,/b') config = wrapper.RootwrapConfig(raw) self.assertEqual(['/a', '/b'], config.filters_path) self.assertEqual(os.environ["PATH"].split(':'), config.exec_dirs) with fixtures.EnvironmentVariable("PATH"): c = wrapper.RootwrapConfig(raw) self.assertEqual([], c.exec_dirs) self.assertFalse(config.use_syslog) self.assertEqual(logging.handlers.SysLogHandler.LOG_SYSLOG, config.syslog_log_facility) self.assertEqual(logging.ERROR, config.syslog_log_level) # Check general values raw.set('DEFAULT', 'exec_dirs', '/a,/x') config = wrapper.RootwrapConfig(raw) self.assertEqual(['/a', '/x'], config.exec_dirs) raw.set('DEFAULT', 'use_syslog', 'oui') self.assertRaises(ValueError, wrapper.RootwrapConfig, raw) raw.set('DEFAULT', 'use_syslog', 'true') config = wrapper.RootwrapConfig(raw) self.assertTrue(config.use_syslog) raw.set('DEFAULT', 'syslog_log_facility', 'moo') self.assertRaises(ValueError, wrapper.RootwrapConfig, raw) raw.set('DEFAULT', 'syslog_log_facility', 'local0') config = wrapper.RootwrapConfig(raw) self.assertEqual(logging.handlers.SysLogHandler.LOG_LOCAL0, config.syslog_log_facility) raw.set('DEFAULT', 'syslog_log_facility', 'LOG_AUTH') config = wrapper.RootwrapConfig(raw) self.assertEqual(logging.handlers.SysLogHandler.LOG_AUTH, config.syslog_log_facility) raw.set('DEFAULT', 'syslog_log_level', 'bar') self.assertRaises(ValueError, wrapper.RootwrapConfig, raw) raw.set('DEFAULT', 'syslog_log_level', 'INFO') config = wrapper.RootwrapConfig(raw) self.assertEqual(logging.INFO, config.syslog_log_level) def test_getlogin(self): with mock.patch('os.getlogin') as os_getlogin: os_getlogin.return_value = 'foo' self.assertEqual('foo', wrapper._getlogin()) def test_getlogin_bad(self): with mock.patch('os.getenv') as os_getenv: with mock.patch('os.getlogin') as os_getlogin: os_getenv.side_effect = [None, None, 'bar'] os_getlogin.side_effect = OSError( '[Errno 22] Invalid argument') self.assertEqual('bar', wrapper._getlogin()) os_getlogin.assert_called_once_with() self.assertEqual(3, os_getenv.call_count) class PathFilterTestCase(testtools.TestCase): def setUp(self): super(PathFilterTestCase, self).setUp() self.tmp_root_dir = tempfile.mkdtemp() tmpdir = fixtures.TempDir(self.tmp_root_dir) self.useFixture(tmpdir) self.f = filters.PathFilter('/bin/chown', 'root', 'nova', tmpdir.path) gen_name = lambda: str(uuid.uuid4()) self.SIMPLE_FILE_WITHIN_DIR = os.path.join(tmpdir.path, 'some') self.SIMPLE_FILE_OUTSIDE_DIR = os.path.join(self.tmp_root_dir, 'some') self.TRAVERSAL_WITHIN_DIR = os.path.join(tmpdir.path, 'a', '..', 'some') self.TRAVERSAL_OUTSIDE_DIR = os.path.join(tmpdir.path, '..', 'some') self.TRAVERSAL_SYMLINK_WITHIN_DIR = os.path.join(tmpdir.path, gen_name()) os.symlink(os.path.join(tmpdir.path, 'a', '..', 'a'), self.TRAVERSAL_SYMLINK_WITHIN_DIR) self.TRAVERSAL_SYMLINK_OUTSIDE_DIR = os.path.join(tmpdir.path, gen_name()) os.symlink(os.path.join(tmpdir.path, 'a', '..', '..', '..', 'etc'), self.TRAVERSAL_SYMLINK_OUTSIDE_DIR) self.SYMLINK_WITHIN_DIR = os.path.join(tmpdir.path, gen_name()) os.symlink(os.path.join(tmpdir.path, 'a'), self.SYMLINK_WITHIN_DIR) self.SYMLINK_OUTSIDE_DIR = os.path.join(tmpdir.path, gen_name()) os.symlink(os.path.join(self.tmp_root_dir, 'some_file'), self.SYMLINK_OUTSIDE_DIR) def test_empty_args(self): self.assertFalse(self.f.match([])) self.assertFalse(self.f.match(None)) def test_argument_pass_constraint(self): f = filters.PathFilter('/bin/chown', 'root', 'pass', 'pass') args = ['chown', 'something', self.SIMPLE_FILE_OUTSIDE_DIR] self.assertTrue(f.match(args)) def test_argument_equality_constraint(self): temp_file_path = os.path.join(self.tmp_root_dir, 'spam/eggs') f = filters.PathFilter('/bin/chown', 'root', 'nova', temp_file_path) args = ['chown', 'nova', temp_file_path] self.assertTrue(f.match(args)) args = ['chown', 'quantum', temp_file_path] self.assertFalse(f.match(args)) def test_wrong_arguments_number(self): args = ['chown', '-c', 'nova', self.SIMPLE_FILE_WITHIN_DIR] self.assertFalse(self.f.match(args)) def test_wrong_exec_command(self): args = ['wrong_exec', self.SIMPLE_FILE_WITHIN_DIR] self.assertFalse(self.f.match(args)) def test_match(self): args = ['chown', 'nova', self.SIMPLE_FILE_WITHIN_DIR] self.assertTrue(self.f.match(args)) def test_match_traversal(self): args = ['chown', 'nova', self.TRAVERSAL_WITHIN_DIR] self.assertTrue(self.f.match(args)) def test_match_symlink(self): args = ['chown', 'nova', self.SYMLINK_WITHIN_DIR] self.assertTrue(self.f.match(args)) def test_match_traversal_symlink(self): args = ['chown', 'nova', self.TRAVERSAL_SYMLINK_WITHIN_DIR] self.assertTrue(self.f.match(args)) def test_reject(self): args = ['chown', 'nova', self.SIMPLE_FILE_OUTSIDE_DIR] self.assertFalse(self.f.match(args)) def test_reject_traversal(self): args = ['chown', 'nova', self.TRAVERSAL_OUTSIDE_DIR] self.assertFalse(self.f.match(args)) def test_reject_symlink(self): args = ['chown', 'nova', self.SYMLINK_OUTSIDE_DIR] self.assertFalse(self.f.match(args)) def test_reject_traversal_symlink(self): args = ['chown', 'nova', self.TRAVERSAL_SYMLINK_OUTSIDE_DIR] self.assertFalse(self.f.match(args)) def test_get_command(self): args = ['chown', 'nova', self.SIMPLE_FILE_WITHIN_DIR] expected = ['/bin/chown', 'nova', self.SIMPLE_FILE_WITHIN_DIR] self.assertEqual(expected, self.f.get_command(args)) def test_get_command_traversal(self): args = ['chown', 'nova', self.TRAVERSAL_WITHIN_DIR] expected = ['/bin/chown', 'nova', os.path.realpath(self.TRAVERSAL_WITHIN_DIR)] self.assertEqual(expected, self.f.get_command(args)) def test_get_command_symlink(self): args = ['chown', 'nova', self.SYMLINK_WITHIN_DIR] expected = ['/bin/chown', 'nova', os.path.realpath(self.SYMLINK_WITHIN_DIR)] self.assertEqual(expected, self.f.get_command(args)) def test_get_command_traversal_symlink(self): args = ['chown', 'nova', self.TRAVERSAL_SYMLINK_WITHIN_DIR] expected = ['/bin/chown', 'nova', os.path.realpath(self.TRAVERSAL_SYMLINK_WITHIN_DIR)] self.assertEqual(expected, self.f.get_command(args)) class RunOneCommandTestCase(testtools.TestCase): def _test_returncode_helper(self, returncode, expected): with mock.patch.object(wrapper, 'start_subprocess') as mock_start: with mock.patch('sys.exit') as mock_exit: mock_start.return_value.wait.return_value = returncode cmd.run_one_command(None, mock.Mock(), None, None) mock_exit.assert_called_once_with(expected) def test_positive_returncode(self): self._test_returncode_helper(1, 1) def test_negative_returncode(self): self._test_returncode_helper(-1, 129) class DaemonCleanupException(Exception): pass class DaemonCleanupTestCase(testtools.TestCase): @mock.patch('os.chmod') @mock.patch('shutil.rmtree') @mock.patch('tempfile.mkdtemp') @mock.patch('multiprocessing.managers.BaseManager.get_server', side_effect=DaemonCleanupException) def test_daemon_no_cleanup_for_uninitialized_server(self, gs, mkd, *args): mkd.return_value = '/just_dir/123' self.assertRaises(DaemonCleanupException, daemon.daemon_start, config=None, filters=None) oslo.rootwrap-5.13.0/oslo_rootwrap/tests/test_functional_eventlet.py0000666000175100017510000000423013224676415026223 0ustar zuulzuul00000000000000# Copyright (c) 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os if os.environ.get('TEST_EVENTLET', False): import eventlet eventlet.monkey_patch() from oslo_rootwrap.tests import test_functional class RootwrapDaemonTest(test_functional.RootwrapDaemonTest): def assert_unpatched(self): # This test case is specifically for eventlet testing pass def _thread_worker(self, seconds, msg): code, out, err = self.execute( ['sh', '-c', 'sleep %d; echo %s' % (seconds, msg)]) # Ignore trailing newline self.assertEqual(msg, out.rstrip()) def _thread_worker_timeout(self, seconds, msg, timeout): with eventlet.Timeout(timeout): try: self._thread_worker(seconds, msg) except eventlet.Timeout: pass def test_eventlet_threads(self): """Check eventlet compatibility. The multiprocessing module is not eventlet friendly and must be protected against eventlet thread switching and its timeout exceptions. """ th = [] # 10 was not enough for some reason. for i in range(15): th.append( eventlet.spawn(self._thread_worker, i % 3, 'abc%d' % i)) for i in [5, 17, 20, 25]: th.append( eventlet.spawn(self._thread_worker_timeout, 2, 'timeout%d' % i, i)) for thread in th: thread.wait() oslo.rootwrap-5.13.0/oslo_rootwrap/tests/__init__.py0000666000175100017510000000000013224676415022642 0ustar zuulzuul00000000000000oslo.rootwrap-5.13.0/oslo_rootwrap/tests/test_functional.py0000666000175100017510000002425213224676415024323 0ustar zuulzuul00000000000000# Copyright (c) 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import io import logging import os import pwd import shutil import signal import sys import threading import time try: import eventlet except ImportError: eventlet = None import fixtures import mock import six import testtools from testtools import content from oslo_rootwrap import client from oslo_rootwrap import cmd from oslo_rootwrap import subprocess from oslo_rootwrap.tests import run_daemon class _FunctionalBase(object): def setUp(self): super(_FunctionalBase, self).setUp() tmpdir = self.useFixture(fixtures.TempDir()).path self.config_file = os.path.join(tmpdir, 'rootwrap.conf') self.later_cmd = os.path.join(tmpdir, 'later_install_cmd') filters_dir = os.path.join(tmpdir, 'filters.d') filters_file = os.path.join(tmpdir, 'filters.d', 'test.filters') os.mkdir(filters_dir) with open(self.config_file, 'w') as f: f.write("""[DEFAULT] filters_path=%s daemon_timeout=10 exec_dirs=/bin""" % (filters_dir,)) with open(filters_file, 'w') as f: f.write("""[Filters] echo: CommandFilter, /bin/echo, root cat: CommandFilter, /bin/cat, root sh: CommandFilter, /bin/sh, root id: CommandFilter, /usr/bin/id, nobody unknown_cmd: CommandFilter, /unknown/unknown_cmd, root later_install_cmd: CommandFilter, %s, root """ % self.later_cmd) def _test_run_once(self, expect_byte=True): code, out, err = self.execute(['echo', 'teststr']) self.assertEqual(0, code) if expect_byte: expect_out = b'teststr\n' expect_err = b'' else: expect_out = 'teststr\n' expect_err = '' self.assertEqual(expect_out, out) self.assertEqual(expect_err, err) def _test_run_with_stdin(self, expect_byte=True): code, out, err = self.execute(['cat'], stdin=b'teststr') self.assertEqual(0, code) if expect_byte: expect_out = b'teststr' expect_err = b'' else: expect_out = 'teststr' expect_err = '' self.assertEqual(expect_out, out) self.assertEqual(expect_err, err) def test_run_command_not_found(self): code, out, err = self.execute(['unknown_cmd']) self.assertEqual(cmd.RC_NOEXECFOUND, code) def test_run_unauthorized_command(self): code, out, err = self.execute(['unauthorized_cmd']) self.assertEqual(cmd.RC_UNAUTHORIZED, code) def test_run_as(self): if os.getuid() != 0: self.skip('Test requires root (for setuid)') # Should run as 'nobody' code, out, err = self.execute(['id', '-u']) self.assertEqual('%s\n' % pwd.getpwnam('nobody').pw_uid, out) # Should run as 'root' code, out, err = self.execute(['sh', '-c', 'id -u']) self.assertEqual('0\n', out) class RootwrapTest(_FunctionalBase, testtools.TestCase): def setUp(self): super(RootwrapTest, self).setUp() self.cmd = [ sys.executable, '-c', 'from oslo_rootwrap import cmd; cmd.main()', self.config_file] def execute(self, cmd, stdin=None): proc = subprocess.Popen( self.cmd + cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) out, err = proc.communicate(stdin) self.addDetail('stdout', content.text_content(out.decode('utf-8', 'replace'))) self.addDetail('stderr', content.text_content(err.decode('utf-8', 'replace'))) return proc.returncode, out, err def test_run_once(self): self._test_run_once(expect_byte=True) def test_run_with_stdin(self): self._test_run_with_stdin(expect_byte=True) class RootwrapDaemonTest(_FunctionalBase, testtools.TestCase): def assert_unpatched(self): # We need to verify that these tests are run without eventlet patching if eventlet and eventlet.patcher.is_monkey_patched('socket'): self.fail("Standard library should not be patched by eventlet" " for this test") def setUp(self): self.assert_unpatched() super(RootwrapDaemonTest, self).setUp() # Collect daemon logs daemon_log = io.BytesIO() p = mock.patch('oslo_rootwrap.subprocess.Popen', run_daemon.forwarding_popen(daemon_log)) p.start() self.addCleanup(p.stop) # Collect client logs client_log = six.StringIO() handler = logging.StreamHandler(client_log) log_format = run_daemon.log_format.replace('+', ' ') handler.setFormatter(logging.Formatter(log_format)) logger = logging.getLogger('oslo_rootwrap') logger.addHandler(handler) logger.setLevel(logging.DEBUG) self.addCleanup(logger.removeHandler, handler) # Add all logs as details @self.addCleanup def add_logs(): self.addDetail('daemon_log', content.Content( content.UTF8_TEXT, lambda: [daemon_log.getvalue()])) self.addDetail('client_log', content.Content( content.UTF8_TEXT, lambda: [client_log.getvalue().encode('utf-8')])) # Create client self.client = client.Client([ sys.executable, run_daemon.__file__, self.config_file]) # _finalize is set during Client.execute() @self.addCleanup def finalize_client(): if self.client._initialized: self.client._finalize() self.execute = self.client.execute def test_run_once(self): self._test_run_once(expect_byte=False) def test_run_with_stdin(self): self._test_run_with_stdin(expect_byte=False) def test_run_with_later_install_cmd(self): code, out, err = self.execute(['later_install_cmd']) self.assertEqual(cmd.RC_NOEXECFOUND, code) # Install cmd and try again shutil.copy('/bin/echo', self.later_cmd) code, out, err = self.execute(['later_install_cmd']) # Expect successfully run the cmd self.assertEqual(0, code) def test_daemon_ressurection(self): # Let the client start a daemon self.execute(['cat']) # Make daemon go away os.kill(self.client._process.pid, signal.SIGTERM) # Expect client to successfully restart daemon and run simple request self.test_run_once() def test_daemon_timeout(self): # Let the client start a daemon self.execute(['echo']) # Make daemon timeout with mock.patch.object(self.client, '_restart') as restart: time.sleep(15) self.execute(['echo']) restart.assert_called_once() def _exec_thread(self, fifo_path): try: # Run a shell script that signals calling process through FIFO and # then hangs around for 1 sec self._thread_res = self.execute([ 'sh', '-c', 'echo > "%s"; sleep 1; echo OK' % fifo_path]) except Exception as e: self._thread_res = e def test_graceful_death(self): # Create a fifo in a temporary dir tmpdir = self.useFixture(fixtures.TempDir()).path fifo_path = os.path.join(tmpdir, 'fifo') os.mkfifo(fifo_path) # Start daemon self.execute(['cat']) # Begin executing shell script t = threading.Thread(target=self._exec_thread, args=(fifo_path,)) t.start() # Wait for shell script to actually start with open(fifo_path) as f: f.readline() # Gracefully kill daemon process os.kill(self.client._process.pid, signal.SIGTERM) # Expect daemon to wait for our request to finish t.join() if isinstance(self._thread_res, Exception): raise self._thread_res # Python 3 will even provide nice traceback code, out, err = self._thread_res self.assertEqual(0, code) self.assertEqual('OK\n', out) self.assertEqual('', err) @contextlib.contextmanager def _test_daemon_cleanup(self): # Start a daemon self.execute(['cat']) socket_path = self.client._manager._address # Stop it one way or another yield process = self.client._process stop = threading.Event() # Start background thread that would kill process in 1 second if it # doesn't die by then def sleep_kill(): stop.wait(1) if not stop.is_set(): os.kill(process.pid, signal.SIGKILL) threading.Thread(target=sleep_kill).start() # Wait for process to finish one way or another self.client._process.wait() # Notify background thread that process is dead (no need to kill it) stop.set() # Fail if the process got killed by the background thread self.assertNotEqual(-signal.SIGKILL, process.returncode, "Server haven't stopped in one second") # Verify that socket is deleted self.assertFalse(os.path.exists(socket_path), "Server didn't remove its temporary directory") def test_daemon_cleanup_client(self): # Run _test_daemon_cleanup stopping daemon as Client instance would # normally do with self._test_daemon_cleanup(): self.client._finalize() def test_daemon_cleanup_signal(self): # Run _test_daemon_cleanup stopping daemon with SIGTERM signal with self._test_daemon_cleanup(): os.kill(self.client._process.pid, signal.SIGTERM) oslo.rootwrap-5.13.0/oslo_rootwrap/tests/run_daemon.py0000666000175100017510000000305213224676415023244 0ustar zuulzuul00000000000000# Copyright (c) 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import sys import threading from oslo_rootwrap import cmd from oslo_rootwrap import subprocess def forward_stream(fr, to): while True: line = fr.readline() if not line: break to.write(line) def forwarding_popen(f, old_popen=subprocess.Popen): def popen(*args, **kwargs): p = old_popen(*args, **kwargs) t = threading.Thread(target=forward_stream, args=(p.stderr, f)) t.daemon = True t.start() return p return popen class nonclosing(object): def __init__(self, f): self._f = f def __getattr__(self, name): return getattr(self._f, name) def close(self): pass log_format = ("%(asctime)s | [%(process)5s]+%(levelname)5s | " "%(message)s") if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG, format=log_format) sys.stderr = nonclosing(sys.stderr) cmd.daemon() oslo.rootwrap-5.13.0/oslo_rootwrap/cmd.py0000666000175100017510000001014213224676415020514 0ustar zuulzuul00000000000000# Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Root wrapper for OpenStack services Filters which commands a service is allowed to run as another user. To use this with oslo, you should set the following in oslo.conf: rootwrap_config=/etc/oslo/rootwrap.conf You also need to let the oslo user run oslo-rootwrap as root in sudoers: oslo ALL = (root) NOPASSWD: /usr/bin/oslo-rootwrap /etc/oslo/rootwrap.conf * Service packaging should deploy .filters files only on nodes where they are needed, to avoid allowing more than is necessary. """ from __future__ import print_function import logging import sys from six import moves from oslo_rootwrap import wrapper RC_UNAUTHORIZED = 99 RC_NOCOMMAND = 98 RC_BADCONFIG = 97 RC_NOEXECFOUND = 96 SIGNAL_BASE = 128 def _exit_error(execname, message, errorcode, log=True): print("%s: %s" % (execname, message), file=sys.stderr) if log: logging.error(message) sys.exit(errorcode) def daemon(): return main(run_daemon=True) def main(run_daemon=False): # Split arguments, require at least a command execname = sys.argv.pop(0) if run_daemon: if len(sys.argv) != 1: _exit_error(execname, "Extra arguments to daemon", RC_NOCOMMAND, log=False) else: if len(sys.argv) < 2: _exit_error(execname, "No command specified", RC_NOCOMMAND, log=False) configfile = sys.argv.pop(0) # Load configuration try: rawconfig = moves.configparser.RawConfigParser() rawconfig.read(configfile) config = wrapper.RootwrapConfig(rawconfig) except ValueError as exc: msg = "Incorrect value in %s: %s" % (configfile, exc.args[0]) _exit_error(execname, msg, RC_BADCONFIG, log=False) except moves.configparser.Error: _exit_error(execname, "Incorrect configuration file: %s" % configfile, RC_BADCONFIG, log=False) if config.use_syslog: wrapper.setup_syslog(execname, config.syslog_log_facility, config.syslog_log_level) filters = wrapper.load_filters(config.filters_path) if run_daemon: # NOTE(dims): When not running as daemon, this import # slows us down just a bit. So moving it here so we have # it only when we need it. from oslo_rootwrap import daemon as daemon_mod daemon_mod.daemon_start(config, filters) else: run_one_command(execname, config, filters, sys.argv) def run_one_command(execname, config, filters, userargs): # Execute command if it matches any of the loaded filters try: obj = wrapper.start_subprocess( filters, userargs, exec_dirs=config.exec_dirs, log=config.use_syslog, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr) returncode = obj.wait() # Fix returncode of Popen if returncode < 0: returncode = SIGNAL_BASE - returncode sys.exit(returncode) except wrapper.FilterMatchNotExecutable as exc: msg = ("Executable not found: %s (filter match = %s)" % (exc.match.exec_path, exc.match.name)) _exit_error(execname, msg, RC_NOEXECFOUND, log=config.use_syslog) except wrapper.NoFilterMatched: msg = ("Unauthorized command: %s (no filter matched)" % ' '.join(userargs)) _exit_error(execname, msg, RC_UNAUTHORIZED, log=config.use_syslog) oslo.rootwrap-5.13.0/oslo_rootwrap/daemon.py0000666000175100017510000001630313224676415021221 0ustar zuulzuul00000000000000# Copyright (c) 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import print_function import functools import logging from multiprocessing import managers import os import shutil import signal import six import stat import sys import tempfile import threading import time from oslo_rootwrap import cmd from oslo_rootwrap import jsonrpc from oslo_rootwrap import subprocess from oslo_rootwrap import wrapper LOG = logging.getLogger(__name__) # Since multiprocessing supports only pickle and xmlrpclib for serialization of # RPC requests and responses, we declare another 'jsonrpc' serializer managers.listener_client['jsonrpc'] = jsonrpc.JsonListener, jsonrpc.JsonClient class RootwrapClass(object): def __init__(self, config, filters): self.config = config self.filters = filters self.reset_timer() self.prepare_timer(config) def run_one_command(self, userargs, stdin=None): self.reset_timer() try: obj = wrapper.start_subprocess( self.filters, userargs, exec_dirs=self.config.exec_dirs, log=self.config.use_syslog, close_fds=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except wrapper.FilterMatchNotExecutable: LOG.warning("Executable not found for: %s", ' '.join(userargs)) return cmd.RC_NOEXECFOUND, "", "" except wrapper.NoFilterMatched: LOG.warning("Unauthorized command: %s (no filter matched)", ' '.join(userargs)) return cmd.RC_UNAUTHORIZED, "", "" if six.PY3 and stdin is not None: stdin = os.fsencode(stdin) out, err = obj.communicate(stdin) if six.PY3: out = os.fsdecode(out) err = os.fsdecode(err) return obj.returncode, out, err @classmethod def reset_timer(cls): cls.last_called = time.time() @classmethod def cancel_timer(cls): try: cls.timeout.cancel() except RuntimeError: pass @classmethod def prepare_timer(cls, config=None): if config is not None: cls.daemon_timeout = config.daemon_timeout # Wait a bit longer to avoid rounding errors timeout = max( cls.last_called + cls.daemon_timeout - time.time(), 0) + 1 if getattr(cls, 'timeout', None): # Another timer is already initialized return cls.timeout = threading.Timer(timeout, cls.handle_timeout) cls.timeout.start() @classmethod def handle_timeout(cls): if cls.last_called < time.time() - cls.daemon_timeout: cls.shutdown() cls.prepare_timer() @staticmethod def shutdown(): # Suicide to force break of the main thread os.kill(os.getpid(), signal.SIGINT) def get_manager_class(config=None, filters=None): class RootwrapManager(managers.BaseManager): def __init__(self, address=None, authkey=None): # Force jsonrpc because neither pickle nor xmlrpclib is secure super(RootwrapManager, self).__init__(address, authkey, serializer='jsonrpc') if config is not None: partial_class = functools.partial(RootwrapClass, config, filters) RootwrapManager.register('rootwrap', partial_class) else: RootwrapManager.register('rootwrap') return RootwrapManager def daemon_start(config, filters): temp_dir = tempfile.mkdtemp(prefix='rootwrap-') LOG.debug("Created temporary directory %s", temp_dir) try: # allow everybody to find the socket rwxr_xr_x = (stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH) os.chmod(temp_dir, rwxr_xr_x) socket_path = os.path.join(temp_dir, "rootwrap.sock") LOG.debug("Will listen on socket %s", socket_path) manager_cls = get_manager_class(config, filters) manager = manager_cls(address=socket_path) server = manager.get_server() try: # allow everybody to connect to the socket rw_rw_rw_ = (stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH | stat.S_IWOTH) os.chmod(socket_path, rw_rw_rw_) try: # In Python 3 we have to use buffer to push in bytes directly stdout = sys.stdout.buffer except AttributeError: stdout = sys.stdout stdout.write(socket_path.encode('utf-8')) stdout.write(b'\n') stdout.write(bytes(server.authkey)) sys.stdin.close() sys.stdout.close() sys.stderr.close() # Gracefully shutdown on INT or TERM signals stop = functools.partial(daemon_stop, server) signal.signal(signal.SIGTERM, stop) signal.signal(signal.SIGINT, stop) LOG.info("Starting rootwrap daemon main loop") server.serve_forever() finally: conn = server.listener # This will break accept() loop with EOFError if it was not in the # main thread (as in Python 3.x) conn.close() # Closing all currently connected client sockets for reading to # break worker threads blocked on recv() for cl_conn in conn.get_accepted(): try: cl_conn.half_close() except Exception: # Most likely the socket have already been closed LOG.debug("Failed to close connection") RootwrapClass.cancel_timer() LOG.info("Waiting for all client threads to finish.") for thread in threading.enumerate(): if thread.daemon: LOG.debug("Joining thread %s", thread) thread.join() finally: LOG.debug("Removing temporary directory %s", temp_dir) shutil.rmtree(temp_dir) def daemon_stop(server, signal, frame): LOG.info("Got signal %s. Shutting down server", signal) # Signals are caught in the main thread which means this handler will run # in the middle of serve_forever() loop. It will catch this exception and # properly return. Since all threads created by server_forever are # daemonic, we need to join them afterwards. In Python 3 we can just hit # stop_event instead. try: server.stop_event.set() except AttributeError: raise KeyboardInterrupt oslo.rootwrap-5.13.0/oslo_rootwrap/__init__.py0000666000175100017510000000202113224676415021505 0ustar zuulzuul00000000000000# Copyright (c) 2015 Red Hat. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os try: import eventlet.patcher except ImportError: _patched_socket = False else: # In tests patching happens later, so we'll rely on environment variable _patched_socket = (eventlet.patcher.is_monkey_patched('socket') or os.environ.get('TEST_EVENTLET', False)) if not _patched_socket: import subprocess else: from eventlet.green import subprocess # noqa oslo.rootwrap-5.13.0/oslo_rootwrap/client.py0000666000175100017510000001470613224676415021241 0ustar zuulzuul00000000000000# Copyright (c) 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from multiprocessing import managers from multiprocessing import util as mp_util import threading import weakref import oslo_rootwrap from oslo_rootwrap import daemon from oslo_rootwrap import jsonrpc from oslo_rootwrap import subprocess if oslo_rootwrap._patched_socket: # We have to use slow version of recvall with eventlet because of a bug in # GreenSocket.recv_into: # https://bitbucket.org/eventlet/eventlet/pull-request/41 # This check happens here instead of jsonrpc to avoid importing eventlet # from daemon code that is run with root privileges. jsonrpc.JsonConnection.recvall = jsonrpc.JsonConnection._recvall_slow try: finalize = weakref.finalize except AttributeError: def finalize(obj, func, *args, **kwargs): return mp_util.Finalize(obj, func, args=args, kwargs=kwargs, exitpriority=0) ClientManager = daemon.get_manager_class() LOG = logging.getLogger(__name__) class Client(object): def __init__(self, rootwrap_daemon_cmd): self._start_command = rootwrap_daemon_cmd self._initialized = False self._need_restart = False self._mutex = threading.Lock() self._manager = None self._proxy = None self._process = None self._finalize = None # This is for eventlet compatibility. multiprocessing stores # daemon connection in ForkAwareLocal, so this won't be # needed with the threading module. self._exec_sem = threading.Lock() def _initialize(self): if self._process is not None and self._process.poll() is not None: LOG.warning("Leaving behind already spawned process with pid %d, " "root should kill it if it's still there (I can't)", self._process.pid) process_obj = subprocess.Popen(self._start_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) LOG.debug("Popen for %s command has been instantiated", self._start_command) self._process = process_obj socket_path = process_obj.stdout.readline()[:-1] # For Python 3 we need to convert bytes to str here if not isinstance(socket_path, str): socket_path = socket_path.decode('utf-8') authkey = process_obj.stdout.read(32) if process_obj.poll() is not None: stderr = process_obj.stderr.read() # NOTE(yorik-sar): don't expose stdout here raise Exception("Failed to spawn rootwrap process.\nstderr:\n%s" % (stderr,)) LOG.info("Spawned new rootwrap daemon process with pid=%d", process_obj.pid) self._manager = ClientManager(socket_path, authkey) self._manager.connect() self._proxy = self._manager.rootwrap() self._finalize = finalize(self, self._shutdown, self._process, self._manager) self._initialized = True @staticmethod def _shutdown(process, manager, JsonClient=jsonrpc.JsonClient): # Storing JsonClient in arguments because globals are set to None # before executing atexit routines in Python 2.x if process.poll() is None: LOG.info('Stopping rootwrap daemon process with pid=%s', process.pid) try: manager.rootwrap().shutdown() except (EOFError, IOError): pass # assume it is dead already # We might want to wait for process to exit or kill it, but we # can't provide sane timeout on 2.x and we most likely don't have # permisions to do so # Invalidate manager's state so that proxy won't try to do decref manager._state.value = managers.State.SHUTDOWN def _ensure_initialized(self): with self._mutex: if not self._initialized: self._initialize() def _restart(self, proxy): with self._mutex: if not self._initialized: raise AssertionError("Client should be initialized.") # Verify if someone has already restarted this. if self._proxy is proxy: self._finalize() self._manager = None self._proxy = None self._initialized = False self._initialize() self._need_restart = False return self._proxy def _run_one_command(self, proxy, cmd, stdin): """Wrap proxy.run_one_command, setting _need_restart on an exception. Usually it should be enough to drain stale data on socket rather than to restart, but we cannot do draining easily. """ try: _need_restart = True res = proxy.run_one_command(cmd, stdin) _need_restart = False return res finally: if _need_restart: self._need_restart = True def execute(self, cmd, stdin=None): with self._exec_sem: self._ensure_initialized() proxy = self._proxy retry = False if self._need_restart: proxy = self._restart(proxy) try: res = self._run_one_command(proxy, cmd, stdin) except (EOFError, IOError): retry = True # res can be None if we received final None sent by dying # server thread instead of response to our # request. Process is most likely to be dead at this # point. if retry or res is None: proxy = self._restart(proxy) res = self._run_one_command(proxy, cmd, stdin) return res oslo.rootwrap-5.13.0/oslo_rootwrap/wrapper.py0000666000175100017510000002061113224676415021433 0ustar zuulzuul00000000000000# Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import logging.handlers import os import signal import sys if sys.platform != 'win32': import pwd import six from six import moves from oslo_rootwrap import filters from oslo_rootwrap import subprocess class NoFilterMatched(Exception): """This exception is raised when no filter matched.""" pass class FilterMatchNotExecutable(Exception): """Raised when a filter matched but no executable was found.""" def __init__(self, match=None, **kwargs): self.match = match class RootwrapConfig(object): def __init__(self, config): # filters_path self.filters_path = config.get("DEFAULT", "filters_path").split(",") # exec_dirs if config.has_option("DEFAULT", "exec_dirs"): self.exec_dirs = config.get("DEFAULT", "exec_dirs").split(",") else: self.exec_dirs = [] # Use system PATH if exec_dirs is not specified if "PATH" in os.environ: self.exec_dirs = os.environ['PATH'].split(':') # syslog_log_facility if config.has_option("DEFAULT", "syslog_log_facility"): v = config.get("DEFAULT", "syslog_log_facility") facility_names = logging.handlers.SysLogHandler.facility_names self.syslog_log_facility = getattr(logging.handlers.SysLogHandler, v, None) if self.syslog_log_facility is None and v in facility_names: self.syslog_log_facility = facility_names.get(v) if self.syslog_log_facility is None: raise ValueError('Unexpected syslog_log_facility: %s' % v) else: default_facility = logging.handlers.SysLogHandler.LOG_SYSLOG self.syslog_log_facility = default_facility # syslog_log_level if config.has_option("DEFAULT", "syslog_log_level"): v = config.get("DEFAULT", "syslog_log_level") level = v.upper() if (hasattr(logging, '_nameToLevel') and level in logging._nameToLevel): # Workaround a regression of Python 3.4.0 bug fixed in 3.4.2: # http://bugs.python.org/issue22386 self.syslog_log_level = logging._nameToLevel[level] else: self.syslog_log_level = logging.getLevelName(level) if (self.syslog_log_level == "Level %s" % level): raise ValueError('Unexpected syslog_log_level: %r' % v) else: self.syslog_log_level = logging.ERROR # use_syslog if config.has_option("DEFAULT", "use_syslog"): self.use_syslog = config.getboolean("DEFAULT", "use_syslog") else: self.use_syslog = False # daemon_timeout if config.has_option("DEFAULT", "daemon_timeout"): self.daemon_timeout = int(config.get("DEFAULT", "daemon_timeout")) else: self.daemon_timeout = 600 def setup_syslog(execname, facility, level): try: handler = logging.handlers.SysLogHandler(address='/dev/log', facility=facility) except IOError: logging.warning("Unable to setup syslog, maybe /dev/log socket needs " "to be restarted. Ignoring syslog configuration " "options.") return rootwrap_logger = logging.getLogger() rootwrap_logger.setLevel(level) handler.setFormatter(logging.Formatter( os.path.basename(execname) + ': %(message)s')) rootwrap_logger.addHandler(handler) def build_filter(class_name, *args): """Returns a filter object of class class_name.""" if not hasattr(filters, class_name): logging.warning("Skipping unknown filter class (%s) specified " "in filter definitions" % class_name) return None filterclass = getattr(filters, class_name) return filterclass(*args) def load_filters(filters_path): """Load filters from a list of directories.""" filterlist = [] for filterdir in filters_path: if not os.path.isdir(filterdir): continue for filterfile in filter(lambda f: not f.startswith('.'), os.listdir(filterdir)): filterfilepath = os.path.join(filterdir, filterfile) if not os.path.isfile(filterfilepath): continue kwargs = {"strict": False} if six.PY3 else {} filterconfig = moves.configparser.RawConfigParser(**kwargs) filterconfig.read(filterfilepath) for (name, value) in filterconfig.items("Filters"): filterdefinition = [s.strip() for s in value.split(',')] newfilter = build_filter(*filterdefinition) if newfilter is None: continue newfilter.name = name filterlist.append(newfilter) # And always include privsep-helper privsep = build_filter("CommandFilter", "privsep-helper", "root") privsep.name = "privsep-helper" filterlist.append(privsep) return filterlist def match_filter(filter_list, userargs, exec_dirs=None): """Checks user command and arguments through command filters. Returns the first matching filter. Raises NoFilterMatched if no filter matched. Raises FilterMatchNotExecutable if no executable was found for the best filter match. """ first_not_executable_filter = None exec_dirs = exec_dirs or [] for f in filter_list: if f.match(userargs): if isinstance(f, filters.ChainingFilter): # This command calls exec verify that remaining args # matches another filter. def non_chain_filter(fltr): return (fltr.run_as == f.run_as and not isinstance(fltr, filters.ChainingFilter)) leaf_filters = [fltr for fltr in filter_list if non_chain_filter(fltr)] args = f.exec_args(userargs) if not args: continue try: match_filter(leaf_filters, args, exec_dirs=exec_dirs) except (NoFilterMatched, FilterMatchNotExecutable): continue # Try other filters if executable is absent if not f.get_exec(exec_dirs=exec_dirs): if not first_not_executable_filter: first_not_executable_filter = f continue # Otherwise return matching filter for execution return f if first_not_executable_filter: # A filter matched, but no executable was found for it raise FilterMatchNotExecutable(match=first_not_executable_filter) # No filter matched raise NoFilterMatched() def _getlogin(): try: return os.getlogin() except OSError: return (os.getenv('USER') or os.getenv('USERNAME') or os.getenv('LOGNAME')) def start_subprocess(filter_list, userargs, exec_dirs=[], log=False, **kwargs): filtermatch = match_filter(filter_list, userargs, exec_dirs) command = filtermatch.get_command(userargs, exec_dirs) if log: logging.info("(%s > %s) Executing %s (filter match = %s)" % ( _getlogin(), pwd.getpwuid(os.getuid())[0], command, filtermatch.name)) def preexec(): # Python installs a SIGPIPE handler by default. This is # usually not what non-Python subprocesses expect. signal.signal(signal.SIGPIPE, signal.SIG_DFL) filtermatch.preexec() obj = subprocess.Popen(command, preexec_fn=preexec, env=filtermatch.get_environment(userargs), **kwargs) return obj oslo.rootwrap-5.13.0/oslo_rootwrap/version.py0000666000175100017510000000126513224676415021444 0ustar zuulzuul00000000000000# Copyright 2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version version_info = pbr.version.VersionInfo('oslo.rootwrap') oslo.rootwrap-5.13.0/oslo_rootwrap/filters.py0000666000175100017510000003321513224676415021427 0ustar zuulzuul00000000000000# Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re import shutil import sys if sys.platform != 'win32': # NOTE(claudiub): pwd is a Linux-specific library, and currently there is # no Windows support for oslo.rootwrap. import pwd def _getuid(user): """Return uid for user.""" return pwd.getpwnam(user).pw_uid class CommandFilter(object): """Command filter only checking that the 1st argument matches exec_path.""" def __init__(self, exec_path, run_as, *args): self.name = '' self.exec_path = exec_path self.run_as = run_as self.args = args self.real_exec = None def get_exec(self, exec_dirs=None): """Returns existing executable, or empty string if none found.""" exec_dirs = exec_dirs or [] if self.real_exec is not None: return self.real_exec if os.path.isabs(self.exec_path): if os.access(self.exec_path, os.X_OK): self.real_exec = self.exec_path else: for binary_path in exec_dirs: expanded_path = os.path.join(binary_path, self.exec_path) if os.access(expanded_path, os.X_OK): self.real_exec = expanded_path break return self.real_exec def match(self, userargs): """Only check that the first argument (command) matches exec_path.""" return userargs and os.path.basename(self.exec_path) == userargs[0] def preexec(self): """Setuid in subprocess right before command is invoked.""" if self.run_as != 'root': os.setuid(_getuid(self.run_as)) def get_command(self, userargs, exec_dirs=None): """Returns command to execute.""" exec_dirs = exec_dirs or [] to_exec = self.get_exec(exec_dirs=exec_dirs) or self.exec_path return [to_exec] + userargs[1:] def get_environment(self, userargs): """Returns specific environment to set, None if none.""" return None class RegExpFilter(CommandFilter): """Command filter doing regexp matching for every argument.""" def match(self, userargs): # Early skip if command or number of args don't match if (not userargs or len(self.args) != len(userargs)): # DENY: argument numbers don't match return False # Compare each arg (anchoring pattern explicitly at end of string) for (pattern, arg) in zip(self.args, userargs): try: if not re.match(pattern + '$', arg): # DENY: Some arguments did not match return False except re.error: # DENY: Badly-formed filter return False # ALLOW: All arguments matched return True class PathFilter(CommandFilter): """Command filter checking that path arguments are within given dirs One can specify the following constraints for command arguments: 1) pass - pass an argument as is to the resulting command 2) some_str - check if an argument is equal to the given string 3) abs path - check if a path argument is within the given base dir A typical rootwrapper filter entry looks like this: # cmdname: filter name, raw command, user, arg_i_constraint [, ...] chown: PathFilter, /bin/chown, root, nova, /var/lib/images """ def match(self, userargs): if not userargs or len(userargs) < 2: return False arguments = userargs[1:] equal_args_num = len(self.args) == len(arguments) exec_is_valid = super(PathFilter, self).match(userargs) args_equal_or_pass = all( arg == 'pass' or arg == value for arg, value in zip(self.args, arguments) if not os.path.isabs(arg) # arguments not specifying abs paths ) paths_are_within_base_dirs = all( os.path.commonprefix([arg, os.path.realpath(value)]) == arg for arg, value in zip(self.args, arguments) if os.path.isabs(arg) # arguments specifying abs paths ) return (equal_args_num and exec_is_valid and args_equal_or_pass and paths_are_within_base_dirs) def get_command(self, userargs, exec_dirs=None): exec_dirs = exec_dirs or [] command, arguments = userargs[0], userargs[1:] # convert path values to canonical ones; copy other args as is args = [os.path.realpath(value) if os.path.isabs(arg) else value for arg, value in zip(self.args, arguments)] return super(PathFilter, self).get_command([command] + args, exec_dirs) class KillFilter(CommandFilter): """Specific filter for the kill calls. 1st argument is the user to run /bin/kill under 2nd argument is the location of the affected executable if the argument is not absolute, it is checked against $PATH Subsequent arguments list the accepted signals (if any) This filter relies on /proc to accurately determine affected executable, so it will only work on procfs-capable systems (not OSX). """ def __init__(self, *args): super(KillFilter, self).__init__("/bin/kill", *args) @staticmethod def _program_path(command): """Try to determine the full path for command. Return command if the full path cannot be found. """ # shutil.which() was added to Python 3.3 if hasattr(shutil, 'which'): return shutil.which(command) if os.path.isabs(command): return command path = os.environ.get('PATH', os.defpath).split(os.pathsep) for dir in path: program = os.path.join(dir, command) if os.path.isfile(program): return program return command def _program(self, pid): """Determine the program associated with pid""" try: command = os.readlink("/proc/%d/exe" % int(pid)) except (ValueError, EnvironmentError): # Incorrect PID return None # NOTE(yufang521247): /proc/PID/exe may have '\0' on the # end (ex: if an executable is updated or deleted), because python # doesn't stop at '\0' when read the target path. command = command.partition('\0')[0] # NOTE(dprince): /proc/PID/exe may have ' (deleted)' on # the end if an executable is updated or deleted if command.endswith(" (deleted)"): command = command[:-len(" (deleted)")] if os.path.isfile(command): return command # /proc/PID/exe may have been renamed with # a ';......' or '.#prelink#......' suffix etc. # So defer to /proc/PID/cmdline in that case. try: with open("/proc/%d/cmdline" % int(pid)) as pfile: cmdline = pfile.read().partition('\0')[0] cmdline = self._program_path(cmdline) if os.path.isfile(cmdline): command = cmdline # Note we don't return None if cmdline doesn't exist # as that will allow killing a process where the exe # has been removed from the system rather than updated. return command except EnvironmentError: return None def match(self, userargs): if not userargs or userargs[0] != "kill": return False args = list(userargs) if len(args) == 3: # A specific signal is requested signal = args.pop(1) if signal not in self.args[1:]: # Requested signal not in accepted list return False else: if len(args) != 2: # Incorrect number of arguments return False if len(self.args) > 1: # No signal requested, but filter requires specific signal return False command = self._program(args[1]) if not command: return False kill_command = self.args[0] if os.path.isabs(kill_command): return kill_command == command return (os.path.isabs(command) and kill_command == os.path.basename(command) and os.path.dirname(command) in os.environ.get('PATH', '' ).split(':')) class ReadFileFilter(CommandFilter): """Specific filter for the utils.read_file_as_root call.""" def __init__(self, file_path, *args): self.file_path = file_path super(ReadFileFilter, self).__init__("/bin/cat", "root", *args) def match(self, userargs): return (userargs == ['cat', self.file_path]) class IpFilter(CommandFilter): """Specific filter for the ip utility to that does not match exec.""" def match(self, userargs): if userargs[0] == 'ip': # Avoid the 'netns exec' command here for a, b in zip(userargs[1:], userargs[2:]): if a == 'netns': return (b != 'exec') else: return True class EnvFilter(CommandFilter): """Specific filter for the env utility. Behaves like CommandFilter, except that it handles leading env A=B.. strings appropriately. """ def _extract_env(self, arglist): """Extract all leading NAME=VALUE arguments from arglist.""" envs = set() for arg in arglist: if '=' not in arg: break envs.add(arg.partition('=')[0]) return envs def __init__(self, exec_path, run_as, *args): super(EnvFilter, self).__init__(exec_path, run_as, *args) env_list = self._extract_env(self.args) # Set exec_path to X when args are in the form of # env A=a B=b C=c X Y Z if "env" in exec_path and len(env_list) < len(self.args): self.exec_path = self.args[len(env_list)] def match(self, userargs): # ignore leading 'env' if userargs[0] == 'env': userargs.pop(0) # require one additional argument after configured ones if len(userargs) < len(self.args): return False # extract all env args user_envs = self._extract_env(userargs) filter_envs = self._extract_env(self.args) user_command = userargs[len(user_envs):len(user_envs) + 1] # match first non-env argument with CommandFilter return (super(EnvFilter, self).match(user_command) and len(filter_envs) and user_envs == filter_envs) def exec_args(self, userargs): args = userargs[:] # ignore leading 'env' if args[0] == 'env': args.pop(0) # Throw away leading NAME=VALUE arguments while args and '=' in args[0]: args.pop(0) return args def get_command(self, userargs, exec_dirs=[]): to_exec = self.get_exec(exec_dirs=exec_dirs) or self.exec_path return [to_exec] + self.exec_args(userargs)[1:] def get_environment(self, userargs): env = os.environ.copy() # ignore leading 'env' if userargs[0] == 'env': userargs.pop(0) # Handle leading NAME=VALUE pairs for a in userargs: env_name, equals, env_value = a.partition('=') if not equals: break if env_name and env_value: env[env_name] = env_value return env class ChainingFilter(CommandFilter): def exec_args(self, userargs): return [] class IpNetnsExecFilter(ChainingFilter): """Specific filter for the ip utility to that does match exec.""" def match(self, userargs): # Network namespaces currently require root # require argument if self.run_as != "root" or len(userargs) < 4: return False return (userargs[:3] == ['ip', 'netns', 'exec']) def exec_args(self, userargs): args = userargs[4:] if args: args[0] = os.path.basename(args[0]) return args class ChainingRegExpFilter(ChainingFilter): """Command filter doing regexp matching for prefix commands. Remaining arguments are filtered again. This means that the command specified as the arguments must be also allowed to execute directly. """ def match(self, userargs): # Early skip if number of args is smaller than the filter if (not userargs or len(self.args) > len(userargs)): return False # Compare each arg (anchoring pattern explicitly at end of string) for (pattern, arg) in zip(self.args, userargs): try: if not re.match(pattern + '$', arg): # DENY: Some arguments did not match return False except re.error: # DENY: Badly-formed filter return False # ALLOW: All arguments matched return True def exec_args(self, userargs): args = userargs[len(self.args):] if args: args[0] = os.path.basename(args[0]) return args oslo.rootwrap-5.13.0/.testr.conf0000666000175100017510000000032213224676415016553 0ustar zuulzuul00000000000000[DEFAULT] test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_TEST_TIMEOUT=60 ${PYTHON:-python} -m subunit.run discover -t ./ . $LISTOPT $IDOPTION test_id_option=--load-list $IDFILE test_list_option=--list oslo.rootwrap-5.13.0/README.rst0000666000175100017510000000175113224676415016163 0ustar zuulzuul00000000000000======================== Team and repository tags ======================== .. image:: http://governance.openstack.org/badges/oslo.rootwrap.svg :target: http://governance.openstack.org/reference/tags/index.html .. Change things from this point on =============================================== oslo.rootwrap -- Escalated Permission Control =============================================== .. image:: https://img.shields.io/pypi/v/oslo.rootwrap.svg :target: https://pypi.python.org/pypi/oslo.rootwrap/ :alt: Latest Version .. image:: https://img.shields.io/pypi/dm/oslo.rootwrap.svg :target: https://pypi.python.org/pypi/oslo.rootwrap/ :alt: Downloads oslo.rootwrap allows fine-grained filtering of shell commands to run as `root` from OpenStack services. * License: Apache License, Version 2.0 * Documentation: https://docs.openstack.org/oslo.rootwrap/latest/ * Source: https://git.openstack.org/cgit/openstack/oslo.rootwrap * Bugs: https://bugs.launchpad.net/oslo.rootwrap oslo.rootwrap-5.13.0/doc/0000775000175100017510000000000013224676617015237 5ustar zuulzuul00000000000000oslo.rootwrap-5.13.0/doc/requirements.txt0000666000175100017510000000060213224676427020522 0ustar zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. # These are needed for docs generation openstackdocstheme>=1.17.0 # Apache-2.0 sphinx>=1.6.2 # BSD reno>=2.5.0 # Apache-2.0 mock>=2.0.0 # BSD fixtures>=3.0.0 # Apache-2.0/BSD oslo.rootwrap-5.13.0/doc/source/0000775000175100017510000000000013224676617016537 5ustar zuulzuul00000000000000oslo.rootwrap-5.13.0/doc/source/user/0000775000175100017510000000000013224676617017515 5ustar zuulzuul00000000000000oslo.rootwrap-5.13.0/doc/source/user/usage.rst0000666000175100017510000002775713224676415021373 0ustar zuulzuul00000000000000===== Usage ===== Rootwrap should be used as a separate Python process calling the ``oslo_rootwrap.cmd:main`` function. You can set up a specific console_script calling into ``oslo_rootwrap.cmd:main``, called for example ``nova-rootwrap``. To keep things simple, this document will consider that your console_script is called ``/usr/bin/nova-rootwrap``. The rootwrap command line should be called under `sudo`. It's first parameter is the configuration file to use, and the remainder of the parameters are the command line to execute: :: sudo nova-rootwrap ROOTWRAP_CONFIG COMMAND_LINE How rootwrap works ================== OpenStack services generally run under a specific, unprivileged user. However, sometimes they need to run a command as ``root``. Instead of just calling ``sudo make me a sandwich`` and have a blanket ``sudoers`` permission to always escalate rights from their unprivileged users to ``root``, those services can call ``sudo nova-rootwrap /etc/nova/rootwrap.conf make me a sandwich``. A sudoers entry lets the unprivileged user run ``nova-rootwrap`` as ``root``. ``nova-rootwrap`` looks for filter definition directories in its configuration file, and loads command filters from them. Then it checks if the command requested by the OpenStack service matches one of those filters, in which case it executes the command (as ``root``). If no filter matches, it denies the request. This allows for complex filtering of allowed commands, as well as shipping filter definitions together with the OpenStack code that needs them. Security model ============== The escalation path is fully controlled by the ``root`` user. A ``sudoers`` entry (owned by ``root``) allows the unprivileged user to run (as ``root``) a specific rootwrap executable, and only with a specific configuration file (which should be owned by ``root``) as its first parameter. ``nova-rootwrap`` imports the Python modules it needs from a cleaned (and system-default) ``PYTHONPATH``. The configuration file points to root-owned filter definition directories, which contain root-owned filters definition files. This chain ensures that the unprivileged user itself is never in control of the configuration or modules used by the ``nova-rootwrap`` executable. Installation ============ All nodes wishing to run ``nova-rootwrap`` should contain a ``sudoers`` entry that lets the unprivileged user run ``nova-rootwrap`` as ``root``, pointing to the root-owned ``rootwrap.conf`` configuration file and allowing any parameter after that. For example, Nova nodes should have this line in their ``sudoers`` file, to allow the ``nova`` user to call ``sudo nova-rootwrap``:: nova ALL = (root) NOPASSWD: /usr/bin/nova-rootwrap /etc/nova/rootwrap.conf * Then the node also should ship the filter definitions corresponding to its usage of ``nova-rootwrap``. You should not install any other filters file on that node, otherwise you would allow extra unneeded commands to be run as ``root``. The filter file(s) corresponding to the node must be installed in one of the filters_path directories. For example, on Nova compute nodes, you should only have ``compute.filters`` installed. The file should be owned and writeable only by the ``root`` user. Rootwrap configuration ====================== The ``rootwrap.conf`` file is used to influence how ``nova-rootwrap`` works. Since it's in the trusted security path, it needs to be owned and writeable only by the ``root`` user. Its location is specified in the ``sudoers`` entry, and must be provided on ``nova-rootwrap`` command line as its first argument. ``rootwrap.conf`` uses an *INI* file format with the following sections and parameters: [DEFAULT] section ----------------- filters_path Comma-separated list of directories containing filter definition files. All directories listed must be owned and only writeable by ``root``. This is the only mandatory parameter. Example: ``filters_path=/etc/nova/rootwrap.d,/usr/share/nova/rootwrap`` exec_dirs Comma-separated list of directories to search executables in, in case filters do not explicitly specify a full path. If not specified, defaults to the system ``PATH`` environment variable. All directories listed must be owned and only writeable by ``root``. Example: ``exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin`` use_syslog Enable logging to syslog. Default value is False. Example: ``use_syslog=True`` syslog_log_facility Which syslog facility to use for syslog logging. Valid values include ``auth``, ``authpriv``, ``syslog``, ``user0``, ``user1``... Default value is ``syslog``. Example: ``syslog_log_facility=syslog`` syslog_log_level Which messages to log. ``INFO`` means log all usage, ``ERROR`` means only log unsuccessful attempts. Example: ``syslog_log_level=ERROR`` .filters files ============== Filters definition files contain lists of filters that ``nova-rootwrap`` will use to allow or deny a specific command. They are generally suffixed by ``.filters``. Since they are in the trusted security path, they need to be owned and writeable only by the ``root`` user. Their location is specified in the ``rootwrap.conf`` file. It uses an *INI* file format with a ``[Filters]`` section and several lines, each with a unique parameter name (different for each filter you define): [Filters] section ----------------- filter_name (different for each filter) Comma-separated list containing first the Filter class to use, followed by that Filter arguments (which vary depending on the Filter class selected). Example: ``kpartx: CommandFilter, /sbin/kpartx, root`` Available filter classes ======================== CommandFilter ------------- Basic filter that only checks the executable called. Parameters are: 1. Executable allowed 2. User to run the command under Example: allow to run kpartx as the root user, with any parameters:: kpartx: CommandFilter, kpartx, root RegExpFilter ------------ Generic filter that checks the executable called, then uses a list of regular expressions to check all subsequent arguments. Parameters are: 1. Executable allowed 2. User to run the command under 3. (and following) Regular expressions to use to match first (and subsequent) command arguments Example: allow to run ``/usr/sbin/tunctl``, but only with three parameters with the first two being -b and -t:: tunctl: RegExpFilter, /usr/sbin/tunctl, root, tunctl, -b, -t, .* PathFilter ---------- Generic filter that lets you check that paths provided as parameters fall under a given directory. Parameters are: 1. Executable allowed 2. User to run the command under 3. (and following) Command arguments. There are three types of command arguments: ``pass`` will accept any parameter value, a string will only accept the corresponding string as a parameter, except if the string starts with '/' in which case it will accept any path that resolves under the corresponding directory. Example: allow to chown to the 'nova' user any file under /var/lib/images:: chown: PathFilter, /bin/chown, root, nova, /var/lib/images EnvFilter --------- Filter allowing extra environment variables to be set by the calling code. Parameters are: 1. ``env`` 2. User to run the command under 3. (and following) name of the environment variables that can be set, suffixed by ``=`` 4. Executable allowed Example: allow to run ``CONFIG_FILE=foo NETWORK_ID=bar dnsmasq ...`` as root:: dnsmasq: EnvFilter, env, root, CONFIG_FILE=, NETWORK_ID=, dnsmasq ReadFileFilter -------------- Specific filter that lets you read files as ``root`` using ``cat``. Parameters are: 1. Path to the file that you want to read as the ``root`` user. Example: allow to run ``cat /etc/iscsi/initiatorname.iscsi`` as ``root``:: read_initiator: ReadFileFilter, /etc/iscsi/initiatorname.iscsi KillFilter ---------- Kill-specific filter that checks the affected process and the signal sent before allowing the command. Parameters are: 1. User to run ``kill`` under 2. Only affect processes running that executable 3. (and following) Signals you're allowed to send Example: allow to send ``-9`` or ``-HUP`` signals to ``/usr/sbin/dnsmasq`` processes:: kill_dnsmasq: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP IpFilter -------- ip-specific filter that allows to run any ``ip`` command, except for ``ip netns`` (in which case it only allows the list, add and delete subcommands). Parameters are: 1. ``ip`` 2. User to run ``ip`` under Example: allow to run any ``ip`` command except ``ip netns exec`` and ``ip netns monitor``:: ip: IpFilter, ip, root IpNetnsExecFilter ----------------- ip-specific filter that allows to run any otherwise-allowed command under ``ip netns exec``. The command specified to ``ip netns exec`` must match another filter for this filter to accept it. Parameters are: 1. ``ip`` 2. User to run ``ip`` under Example: allow to run ``ip netns exec `` as long as ```` matches another filter:: ip: IpNetnsExecFilter, ip, root ChainingRegExpFilter -------------------- Filter that allows to run the prefix command, if the beginning of its arguments match to a list of regular expressions, and if remaining arguments are any otherwise-allowed command. Parameters are: 1. Executable allowed 2. User to run the command under 3. (and following) Regular expressions to use to match first (and subsequent) command arguments. This filter regards the length of the regular expressions list as the number of arguments to be checked, and remaining parts are checked by other filters. Example: allow to run ``/usr/bin/nice``, but only with first two parameters being -n and integer, and followed by any allowed command by the other filters:: nice: ChainingRegExpFilter, /usr/bin/nice, root, nice, -n, -?\d+ Note: this filter can't be used to impose that the subcommand is always run under the prefix command. In particular, it can't enforce that a particular command is only run under "nice", since the subcommand can explicitly be called directly. Calling rootwrap from OpenStack services ======================================== Standalone mode (``sudo`` way) ------------------------------ The ``oslo.processutils`` library ships with a convenience ``execute()`` function that can be used to call shell commands as ``root``, if you call it with the following parameters:: run_as_root=True root_helper='sudo nova-rootwrap /etc/nova/rootwrap.conf NB: Some services ship with a ``utils.execute()`` convenience function that automatically sets ``root_helper`` based on the value of a ``rootwrap_config`` parameter, so only ``run_as_root=True`` needs to be set. If you want to call as ``root`` a previously-unauthorized command, you will also need to modify the filters (generally shipped in the source tree under ``etc/rootwrap.d`` so that the command you want to run as ``root`` will actually be allowed by ``nova-rootwrap``. Daemon mode ----------- Since 1.3.0 version ``oslo.rootwrap`` supports "daemon mode". In this mode rootwrap would start, read config file and wait for commands to be run with root privileges. All communications with the daemon should go through ``Client`` class that resides in ``oslo_rootwrap.client`` module. Its constructor expects one argument - a list that can be passed to ``Popen`` to create rootwrap daemon process. For ``root_helper`` above it will be ``["sudo", "nova-rootwrap-daemon", "/etc/neutron/rootwrap.conf"]``, for example. Note that it uses a separate script that points to ``oslo_rootwrap.cmd:daemon`` endpoint (instead of ``:main``). The class provides one method ``execute`` with following arguments: * ``userargs`` - list of command line arguments that are to be used to run the command; * ``stdin`` - string to be passed to standard input of child process. The method returns 3-tuple containing: * return code of child process; * string containing everything captured from its stdout stream; * string containing everything captured from its stderr stream. The class lazily creates an instance of the daemon, connects to it and passes arguments. This daemon can die or be killed, ``Client`` will respawn it and/or reconnect to it as necessary. oslo.rootwrap-5.13.0/doc/source/user/history.rst0000666000175100017510000000004013224676415021740 0ustar zuulzuul00000000000000.. include:: ../../../ChangeLog oslo.rootwrap-5.13.0/doc/source/user/index.rst0000666000175100017510000000015713224676415021357 0ustar zuulzuul00000000000000=================== Using oslo.rootwrap =================== .. toctree:: :maxdepth: 2 usage history oslo.rootwrap-5.13.0/doc/source/conf.py0000777000175100017510000000514213224676415020041 0ustar zuulzuul00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys sys.path.insert(0, os.path.abspath('../..')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', #'sphinx.ext.intersphinx', 'openstackdocstheme', ] # openstackdocstheme options repository_name = 'openstack/oslo.rootwrap' bug_project = 'oslo.rootwrap' bug_tag = '' # autodoc generation is a bit aggressive and a nuisance when doing heavy # text edit cycles. # execute "export SPHINX_DEBUG=1" in your terminal to disable # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'oslo.rootwrap' copyright = u'2014, OpenStack Foundation' # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' # html_static_path = ['static'] html_theme = 'openstackdocs' html_last_updated_fmt = '%Y-%m-%d %H:%M' # Output file base name for HTML help builder. htmlhelp_basename = '%sdoc' % project # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', '%s.tex' % project, u'%s Documentation' % project, u'OpenStack Foundation', 'manual'), ] # Example configuration for intersphinx: refer to the Python standard library. #intersphinx_mapping = {'http://docs.python.org/': None} oslo.rootwrap-5.13.0/doc/source/install/0000775000175100017510000000000013224676617020205 5ustar zuulzuul00000000000000oslo.rootwrap-5.13.0/doc/source/install/index.rst0000666000175100017510000000031713224676415022045 0ustar zuulzuul00000000000000============ Installation ============ At the command line:: $ pip install oslo.rootwrap Or, if you have virtualenvwrapper installed:: $ mkvirtualenv oslo.rootwrap $ pip install oslo.rootwraposlo.rootwrap-5.13.0/doc/source/index.rst0000666000175100017510000000064013224676415020376 0ustar zuulzuul00000000000000=============================================== oslo.rootwrap -- Escalated Permission Control =============================================== oslo.rootwrap allows fine-grained filtering of shell commands to run as `root` from OpenStack services. .. toctree:: :maxdepth: 2 install/index user/index contributor/index .. rubric:: Indices and tables * :ref:`genindex` * :ref:`modindex` * :ref:`search` oslo.rootwrap-5.13.0/doc/source/contributor/0000775000175100017510000000000013224676617021111 5ustar zuulzuul00000000000000oslo.rootwrap-5.13.0/doc/source/contributor/index.rst0000666000175100017510000000012113224676415022742 0ustar zuulzuul00000000000000============= Contributing ============= .. include:: ../../../CONTRIBUTING.rst oslo.rootwrap-5.13.0/setup.cfg0000666000175100017510000000170613224676617016321 0ustar zuulzuul00000000000000[metadata] name = oslo.rootwrap author = OpenStack author-email = openstack-dev@lists.openstack.org summary = Oslo Rootwrap description-file = README.rst home-page = https://docs.openstack.org/oslo.rootwrap/latest/ classifier = Development Status :: 4 - Beta Environment :: OpenStack Intended Audience :: Developers Intended Audience :: Information Technology License :: OSI Approved :: Apache Software License Operating System :: OS Independent Programming Language :: Python Programming Language :: Python :: 2.7 Programming Language :: Python :: 3 Programming Language :: Python :: 3.5 [files] packages = oslo_rootwrap [entry_points] console_scripts = oslo-rootwrap = oslo_rootwrap.cmd:main oslo-rootwrap-daemon = oslo_rootwrap.cmd:daemon [build_sphinx] source-dir = doc/source build-dir = doc/build all_files = 1 warning-is-error = 1 [upload_sphinx] upload-dir = doc/build/html [wheel] universal = 1 [egg_info] tag_build = tag_date = 0 oslo.rootwrap-5.13.0/setup.py0000666000175100017510000000200613224676415016200 0ustar zuulzuul00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools # In python < 2.7.4, a lazy loading of package `pbr` will break # setuptools if some other modules registered functions in `atexit`. # solution from: http://bugs.python.org/issue15881#msg170215 try: import multiprocessing # noqa except ImportError: pass setuptools.setup( setup_requires=['pbr>=2.0.0'], pbr=True)