pax_global_header00006660000000000000000000000064131740103670014514gustar00rootroot0000000000000052 comment=ddf8d562a9df0c23e6d206dea67fbbc5e2a488f9 glare-0.5.0/000077500000000000000000000000001317401036700126105ustar00rootroot00000000000000glare-0.5.0/.gitignore000066400000000000000000000021601317401036700145770ustar00rootroot00000000000000# Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python env/ build/ develop-eggs/ dist/ downloads/ eggs/ lib/ lib64/ parts/ sdist/ var/ .installed.cfg *.egg* # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .testrepository/ .tox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *,cover .hypothesis/ ChangeLog # Translations *.mo *.pot # Django stuff: *.log local_settings.py # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder target/ # IPython Notebook .ipynb_checkpoints # pyenv .python-version # celery beat schedule file celerybeat-schedule # dotenv .env # virtualenv venv/ ENV/ # Spyder project settings .spyderproject # Rope project settings .ropeproject # IDE files .idea # Files created by doc build AUTHORS ChangeLog doc/source/api glare-0.5.0/.gitreview000066400000000000000000000001121317401036700146100ustar00rootroot00000000000000[gerrit] host=review.openstack.org port=29418 project=openstack/glare.git glare-0.5.0/.testr.conf000066400000000000000000000005151317401036700146770ustar00rootroot00000000000000[DEFAULT] test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-160} \ ${PYTHON:-python} -m subunit.run discover -t ./ ./glare/tests $LISTOPT $IDOPTION test_id_option=--load-list $IDFILE test_list_option=--list glare-0.5.0/LICENSE000066400000000000000000000261351317401036700136240ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. glare-0.5.0/README.rst000066400000000000000000000012641317401036700143020ustar00rootroot00000000000000Glare ===== Glare (from GLare Artifact REpository) is a service that provides access to a unified catalog of structured meta-information as well as related binary data (these structures are also called 'artifacts'). * Get Started: https://github.com/openstack/glare/blob/master/doc/source/quickstart.rst * Documentation: https://github.com/openstack/glare/blob/master/doc * Source: https://git.openstack.org/cgit/openstack/glare * Bugs: https://bugs.launchpad.net/glare * Blueprints:** https://blueprints.launchpad.net/glare * REST Client:** https://git.openstack.org/cgit/openstack/python-glareclient License ------- Apache License Version 2.0 http://www.apache.org/licenses/LICENSE-2.0 glare-0.5.0/babel.cfg000066400000000000000000000000201317401036700143260ustar00rootroot00000000000000[python: **.py] glare-0.5.0/bandit.yaml000066400000000000000000000255331317401036700147450ustar00rootroot00000000000000# optional: after how many files to update progress #show_progress_every: 100 # optional: plugins directory name #plugins_dir: 'plugins' # optional: plugins discovery name pattern plugin_name_pattern: '*.py' # optional: terminal escape sequences to display colors #output_colors: # DEFAULT: '\033[0m' # HEADER: '\033[95m' # LOW: '\033[94m' # MEDIUM: '\033[93m' # HIGH: '\033[91m' # optional: log format string #log_format: "[%(module)s]\t%(levelname)s\t%(message)s" # globs of files which should be analyzed include: - '*.py' - '*.pyw' # a list of strings, which if found in the path will cause files to be excluded # for example /tests/ - to remove all all files in tests directory exclude_dirs: - '/tests/' profiles: gate: include: - any_other_function_with_shell_equals_true - assert_used - blacklist_calls - blacklist_import_func # One of the blacklisted imports is the subprocess module. Keystone # has to import the subprocess module in a single module for # eventlet support so in most cases bandit won't be able to detect # that subprocess is even being imported. Also, Bandit's # recommendation is just to check that the use is safe without any # documentation on what safe or unsafe usage is. So this test is # skipped. # - blacklist_imports - exec_used - execute_with_run_as_root_equals_true # - hardcoded_bind_all_interfaces # TODO: enable this test # Not working because wordlist/default-passwords file not bundled, # see https://bugs.launchpad.net/bandit/+bug/1451575 : # - hardcoded_password # Not used because it's prone to false positives: # - hardcoded_sql_expressions # - hardcoded_tmp_directory # TODO: enable this test - jinja2_autoescape_false - linux_commands_wildcard_injection - paramiko_calls - password_config_option_not_marked_secret - request_with_no_cert_validation - set_bad_file_permissions - subprocess_popen_with_shell_equals_true # - subprocess_without_shell_equals_true # TODO: enable this test - start_process_with_a_shell # - start_process_with_no_shell # TODO: enable this test - start_process_with_partial_path - ssl_with_bad_defaults - ssl_with_bad_version - ssl_with_no_version # - try_except_pass # TODO: enable this test - use_of_mako_templates blacklist_calls: bad_name_sets: # - pickle: # qualnames: [pickle.loads, pickle.load, pickle.Unpickler, # cPickle.loads, cPickle.load, cPickle.Unpickler] # message: "Pickle library appears to be in use, possible security issue." # TODO: enable this test - marshal: qualnames: [marshal.load, marshal.loads] message: "Deserialization with the marshal module is possibly dangerous." # - md5: # qualnames: [hashlib.md5, Crypto.Hash.MD2.new, Crypto.Hash.MD4.new, Crypto.Hash.MD5.new, cryptography.hazmat.primitives.hashes.MD5] # message: "Use of insecure MD2, MD4, or MD5 hash function." # TODO: enable this test - mktemp_q: qualnames: [tempfile.mktemp] message: "Use of insecure and deprecated function (mktemp)." - eval: qualnames: [eval] message: "Use of possibly insecure function - consider using safer ast.literal_eval." - mark_safe: names: [mark_safe] message: "Use of mark_safe() may expose cross-site scripting vulnerabilities and should be reviewed." - httpsconnection: qualnames: [httplib.HTTPSConnection] message: "Use of HTTPSConnection does not provide security, see https://wiki.openstack.org/wiki/OSSN/OSSN-0033" - yaml_load: qualnames: [yaml.load] message: "Use of unsafe yaml load. Allows instantiation of arbitrary objects. Consider yaml.safe_load()." - urllib_urlopen: qualnames: [urllib.urlopen, urllib.urlretrieve, urllib.URLopener, urllib.FancyURLopener, urllib2.urlopen, urllib2.Request] message: "Audit url open for permitted schemes. Allowing use of file:/ or custom schemes is often unexpected." - random: qualnames: [random.random, random.randrange, random.randint, random.choice, random.uniform, random.triangular] message: "Standard pseudo-random generators are not suitable for security/cryptographic purposes." level: "LOW" # Most of this is based off of Christian Heimes' work on defusedxml: # https://pypi.python.org/pypi/defusedxml/#defusedxml-sax # TODO(jaegerandi): Enable once defusedxml is in global requirements. #- xml_bad_cElementTree: # qualnames: [xml.etree.cElementTree.parse, # xml.etree.cElementTree.iterparse, # xml.etree.cElementTree.fromstring, # xml.etree.cElementTree.XMLParser] # message: "Using {func} to parse untrusted XML data is known to be vulnerable to XML attacks. Replace {func} with it's defusedxml equivalent function." #- xml_bad_ElementTree: # qualnames: [xml.etree.ElementTree.parse, # xml.etree.ElementTree.iterparse, # xml.etree.ElementTree.fromstring, # xml.etree.ElementTree.XMLParser] # message: "Using {func} to parse untrusted XML data is known to be vulnerable to XML attacks. Replace {func} with it's defusedxml equivalent function." - xml_bad_expatreader: qualnames: [xml.sax.expatreader.create_parser] message: "Using {func} to parse untrusted XML data is known to be vulnerable to XML attacks. Replace {func} with it's defusedxml equivalent function." - xml_bad_expatbuilder: qualnames: [xml.dom.expatbuilder.parse, xml.dom.expatbuilder.parseString] message: "Using {func} to parse untrusted XML data is known to be vulnerable to XML attacks. Replace {func} with it's defusedxml equivalent function." - xml_bad_sax: qualnames: [xml.sax.parse, xml.sax.parseString, xml.sax.make_parser] message: "Using {func} to parse untrusted XML data is known to be vulnerable to XML attacks. Replace {func} with it's defusedxml equivalent function." - xml_bad_minidom: qualnames: [xml.dom.minidom.parse, xml.dom.minidom.parseString] message: "Using {func} to parse untrusted XML data is known to be vulnerable to XML attacks. Replace {func} with it's defusedxml equivalent function." - xml_bad_pulldom: qualnames: [xml.dom.pulldom.parse, xml.dom.pulldom.parseString] message: "Using {func} to parse untrusted XML data is known to be vulnerable to XML attacks. Replace {func} with it's defusedxml equivalent function." - xml_bad_etree: qualnames: [lxml.etree.parse, lxml.etree.fromstring, lxml.etree.RestrictedElement, lxml.etree.GlobalParserTLS, lxml.etree.getDefaultParser, lxml.etree.check_docinfo] message: "Using {func} to parse untrusted XML data is known to be vulnerable to XML attacks. Replace {func} with it's defusedxml equivalent function." shell_injection: # Start a process using the subprocess module, or one of its wrappers. subprocess: [subprocess.Popen, subprocess.call, subprocess.check_call, subprocess.check_output, utils.execute, utils.execute_with_timeout] # Start a process with a function vulnerable to shell injection. shell: [os.system, os.popen, os.popen2, os.popen3, os.popen4, popen2.popen2, popen2.popen3, popen2.popen4, popen2.Popen3, popen2.Popen4, commands.getoutput, commands.getstatusoutput] # Start a process with a function that is not vulnerable to shell injection. no_shell: [os.execl, os.execle, os.execlp, os.execlpe, os.execv,os.execve, os.execvp, os.execvpe, os.spawnl, os.spawnle, os.spawnlp, os.spawnlpe, os.spawnv, os.spawnve, os.spawnvp, os.spawnvpe, os.startfile] blacklist_imports: bad_import_sets: - telnet: imports: [telnetlib] level: HIGH message: "Telnet is considered insecure. Use SSH or some other encrypted protocol." - info_libs: imports: [pickle, cPickle, subprocess, Crypto] level: LOW message: "Consider possible security implications associated with {module} module." # Most of this is based off of Christian Heimes' work on defusedxml: # https://pypi.python.org/pypi/defusedxml/#defusedxml-sax - xml_libs: imports: [xml.etree.cElementTree, xml.etree.ElementTree, xml.sax.expatreader, xml.sax, xml.dom.expatbuilder, xml.dom.minidom, xml.dom.pulldom, lxml.etree, lxml] message: "Using {module} to parse untrusted XML data is known to be vulnerable to XML attacks. Replace {module} with the equivalent defusedxml package." level: LOW - xml_libs_high: imports: [xmlrpclib] message: "Using {module} to parse untrusted XML data is known to be vulnerable to XML attacks. Use defused.xmlrpc.monkey_patch() function to monkey-patch xmlrpclib and mitigate XML vulnerabilities." level: HIGH hardcoded_tmp_directory: tmp_dirs: ['/tmp', '/var/tmp', '/dev/shm'] hardcoded_password: # Support for full path, relative path and special "%(site_data_dir)s" # substitution (/usr/{local}/share) word_list: "%(site_data_dir)s/wordlist/default-passwords" ssl_with_bad_version: bad_protocol_versions: - 'PROTOCOL_SSLv2' - 'SSLv2_METHOD' - 'SSLv23_METHOD' - 'PROTOCOL_SSLv3' # strict option - 'PROTOCOL_TLSv1' # strict option - 'SSLv3_METHOD' # strict option - 'TLSv1_METHOD' # strict option password_config_option_not_marked_secret: function_names: - oslo.config.cfg.StrOpt - oslo_config.cfg.StrOpt execute_with_run_as_root_equals_true: function_names: - ceilometer.utils.execute - cinder.utils.execute - neutron.agent.linux.utils.execute - nova.utils.execute - nova.utils.trycmd try_except_pass: check_typed_exception: True glare-0.5.0/devstack/000077500000000000000000000000001317401036700144145ustar00rootroot00000000000000glare-0.5.0/devstack/README.rst000066400000000000000000000010331317401036700161000ustar00rootroot00000000000000==================== Enabling in Devstack ==================== 1. Download DevStack:: git clone https://github.com/openstack-dev/devstack.git cd devstack 2. Add this repo as an external repository:: > cat local.conf [[local|localrc]] enable_plugin glare https://github.com/openstack/glare .. note:: To enable installation of glare client from git repo instead of pypi execute a shell command: .. code-block:: bash export LIBS_FROM_GIT+=python-glareclient 3. run ``stack.sh`` glare-0.5.0/devstack/plugin.sh000066400000000000000000000153561317401036700162600ustar00rootroot00000000000000#!/usr/bin/env bash # Plugin file for Glare services # ------------------------------- # Dependencies: # ``functions`` file # ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined # Save trace setting XTRACE=$(set +o | grep xtrace) set -o xtrace echo_summary "glare's plugin.sh was called..." # create_glare_accounts() - Set up common required glare accounts # # Tenant User Roles # ------------------------------ # service glare admin function create_glare_accounts() { create_service_user "glare" # required for swift access if is_service_enabled s-proxy; then create_service_user "glare-swift" "ResellerAdmin" fi get_or_create_service "glare" "artifact" "Artifact repository" get_or_create_endpoint "artifact" \ "$REGION_NAME" \ "$GLARE_SERVICE_PROTOCOL://$GLARE_SERVICE_HOST:$GLARE_SERVICE_PORT" \ "$GLARE_SERVICE_PROTOCOL://$GLARE_SERVICE_HOST:$GLARE_SERVICE_PORT" \ "$GLARE_SERVICE_PROTOCOL://$GLARE_SERVICE_HOST:$GLARE_SERVICE_PORT" } function mkdir_chown_stack { if [[ ! -d "$1" ]]; then sudo mkdir -p "$1" fi sudo chown $STACK_USER "$1" } function configure_glare { # create and clean up auth cache dir mkdir_chown_stack "$GLARE_AUTH_CACHE_DIR" rm -f "$GLARE_AUTH_CACHE_DIR"/* mkdir_chown_stack "$GLARE_CONF_DIR" # Generate Glare configuration file and configure common parameters. oslo-config-generator --config-file $GLARE_DIR/etc/oslo-config-generator/glare.conf --output-file $GLARE_CONF_FILE # Glare Configuration #------------------------- iniset $GLARE_CONF_FILE DEFAULT debug $GLARE_DEBUG # Specify additional modules with external artifact types if [ -n "$GLARE_CUSTOM_MODULES" ]; then iniset $GLARE_CONF_FILE DEFAULT custom_artifact_types_modules $GLARE_CUSTOM_MODULES fi # Specify a list of enabled artifact types if [ -n "$GLARE_ENABLED_TYPES" ]; then iniset $GLARE_CONF_FILE DEFAULT enabled_artifact_types $GLARE_ENABLED_TYPES fi oslopolicy-sample-generator --namespace=glare --output-file=$GLARE_POLICY_FILE sed -i 's/^#"//' $GLARE_POLICY_FILE cp -p $GLARE_DIR/etc/glare-paste.ini $GLARE_CONF_DIR iniset $GLARE_CONF_FILE paste_deploy flavor $GLARE_FLAVOR # Setup keystone_authtoken section configure_auth_token_middleware $GLARE_CONF_FILE glare $GLARE_AUTH_CACHE_DIR # Setup RabbitMQ credentials iniset $GLARE_CONF_FILE oslo_messaging_rabbit rabbit_userid $RABBIT_USERID iniset $GLARE_CONF_FILE oslo_messaging_rabbit rabbit_password $RABBIT_PASSWORD # Enable notifications support iniset $GLARE_CONF_FILE oslo_messaging_notifications driver messaging # Configure the database. iniset $GLARE_CONF_FILE database connection `database_connection_url glare` iniset $GLARE_CONF_FILE database max_overflow -1 iniset $GLARE_CONF_FILE database max_pool_size 1000 # Path of policy.yaml file. iniset $GLARE_CONF_FILE oslo_policy policy_file $GLARE_POLICY_FILE if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then setup_colorized_logging $GLARE_CONF_FILE DEFAULT tenant user fi if [ "$GLARE_RPC_IMPLEMENTATION" ]; then iniset $GLARE_CONF_FILE DEFAULT rpc_implementation $GLARE_RPC_IMPLEMENTATION fi # Configuring storage iniset $GLARE_CONF_FILE glance_store filesystem_store_datadir $GLARE_ARTIFACTS_DIR # Store the artifacts in swift if enabled. if is_service_enabled s-proxy; then GLARE_SWIFT_STORE_CONF=$GLARE_CONF_DIR/glare-swift-store.conf cp -p $GLARE_DIR/etc/glare-swift.conf.sample $GLARE_CONF_DIR iniset $GLARE_CONF_FILE glance_store default_store swift iniset $GLARE_CONF_FILE glance_store swift_store_create_container_on_put True iniset $GLARE_CONF_FILE glance_store swift_store_config_file $GLARE_SWIFT_STORE_CONF iniset $GLARE_CONF_FILE glance_store default_swift_reference ref1 iniset $GLARE_CONF_FILE glance_store stores "file, http, swift" iniset $GLARE_SWIFT_STORE_CONF ref1 user $SERVICE_PROJECT_NAME:glare-swift iniset $GLARE_SWIFT_STORE_CONF ref1 key $SERVICE_PASSWORD iniset $GLARE_SWIFT_STORE_CONF ref1 auth_address $KEYSTONE_SERVICE_URI/v3 iniset $GLARE_SWIFT_STORE_CONF ref1 user_domain_name $SERVICE_DOMAIN_NAME iniset $GLARE_SWIFT_STORE_CONF ref1 project_domain_name $SERVICE_DOMAIN_NAME iniset $GLARE_SWIFT_STORE_CONF ref1 auth_version 3 # commenting is not strictly necessary but it's confusing to have bad values in conf inicomment $GLARE_CONF_FILE glance_store swift_store_user inicomment $GLARE_CONF_FILE glance_store swift_store_key inicomment $GLARE_CONF_FILE glance_store swift_store_auth_address fi } # init_glare - Initialize the database function init_glare { # Delete existing artifacts rm -rf $GLARE_ARTIFACTS_DIR mkdir -p $GLARE_ARTIFACTS_DIR # (re)create Glare database recreate_database glare utf8 # Migrate glare database $GLARE_BIN_DIR/glare-db-manage --config-file $GLARE_CONF_FILE upgrade } # install_glare - Collect source and prepare function install_glare { setup_develop $GLARE_DIR } function install_glare_pythonclient { if use_library_from_git "python-glareclient"; then git_clone $GLARE_PYTHONCLIENT_REPO $GLARE_PYTHONCLIENT_DIR $GLARE_PYTHONCLIENT_BRANCH setup_develop $GLARE_PYTHONCLIENT_DIR else # nothing actually "requires" glareclient, so force installation from pypi pip_install_gr python-glareclient fi } # start_glare - Start running processes, including screen function start_glare { run_process glare "$GLARE_BIN_DIR/glare-api --config-file $GLARE_CONF_DIR/glare.conf" } # stop_glare - Stop running processes function stop_glare { # Kill the Glare screen windows for serv in glare-api; do stop_process $serv done } function cleanup_glare { sudo rm -rf $GLARE_ARTIFACTS_DIR $GLARE_AUTH_CACHE_DIR } if is_service_enabled glare; then if [[ "$1" == "stack" && "$2" == "install" ]]; then echo_summary "Installing glare" install_glare install_glare_pythonclient elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then echo_summary "Configuring glare" create_glare_accounts configure_glare elif [[ "$1" == "stack" && "$2" == "extra" ]]; then echo_summary "Initializing glare" init_glare echo_summary "Starting Glare process" start_glare fi if [[ "$1" == "unstack" ]]; then echo_summary "Shutting down glare" stop_glare fi if [[ "$1" == "clean" ]]; then echo_summary "Cleaning glare" cleanup_glare fi fi # Restore xtrace $XTRACE # Local variables: # mode: shell-script # End: glare-0.5.0/devstack/settings000066400000000000000000000026441317401036700162050ustar00rootroot00000000000000# Devstack settings enable_service glare # Set up default directories GLARE_PYTHONCLIENT_REPO=${GLARE_PYTHONCLIENT_REPO:-${GIT_BASE}/openstack/python-glareclient.git} GLARE_PYTHONCLIENT_BRANCH=${GLARE_PYTHONCLIENT_BRANCH:-master} GLARE_PYTHONCLIENT_DIR=$DEST/python-glareclient GLARE_DIR=$DEST/glare GLARE_REPO=${GLARE_REPO:-${GIT_BASE}/openstack/glare.git} GLARE_BRANCH=${GLARE_BRANCH:-master} # Glare virtual environment if [[ ${USE_VENV} = True ]]; then PROJECT_VENV["glare"]=${GLARE_DIR}.venv GLARE_BIN_DIR=${PROJECT_VENV["glare"]}/bin else GLARE_BIN_DIR=$(get_python_exec_prefix) fi GLARE_ARTIFACTS_DIR=${GLARE_ARTIFACTS_DIR:=$DATA_DIR/glare/artifacts} GLARE_AUTH_CACHE_DIR=${GLARE_AUTH_CACHE_DIR:-/var/cache/glare} GLARE_CONF_DIR=${GLARE_CONF_DIR:-/etc/glare} GLARE_CONF_FILE=$GLARE_CONF_DIR/glare.conf GLARE_PASTE_INI=$GLARE_CONF_DIR/glare-paste.ini GLARE_POLICY_FILE=$GLARE_CONF_DIR/policy.yaml GLARE_SWIFT_STORE_CONF=$GLARE_CONF_DIR/glare-swift-store.conf if is_ssl_enabled_service "glare" || is_service_enabled tls-proxy; then GLARE_SERVICE_PROTOCOL="https" fi # Glare connection info. Note the port must be specified. GLARE_SERVICE_PORT=${GLARE_SERVICE_PORT:-9494} GLARE_SERVICE_HOST=${GLARE_SERVICE_HOST:-$SERVICE_HOST} GLARE_SERVICE_PROTOCOL=${GLARE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} GLARE_DEBUG=${GLARE_DEBUG:-True} GLARE_ADMIN_USER=${GLARE_ADMIN_USER:-glare} GLARE_FLAVOR=${GLARE_FLAVOR:-keystone} glare-0.5.0/doc/000077500000000000000000000000001317401036700133555ustar00rootroot00000000000000glare-0.5.0/doc/source/000077500000000000000000000000001317401036700146555ustar00rootroot00000000000000glare-0.5.0/doc/source/_static/000077500000000000000000000000001317401036700163035ustar00rootroot00000000000000glare-0.5.0/doc/source/_static/.placeholder000066400000000000000000000000001317401036700205540ustar00rootroot00000000000000glare-0.5.0/doc/source/_templates/000077500000000000000000000000001317401036700170125ustar00rootroot00000000000000glare-0.5.0/doc/source/_templates/sidebarlinks.html000066400000000000000000000004551317401036700223560ustar00rootroot00000000000000

Useful Links

{% if READTHEDOCS %} {% endif %} glare-0.5.0/doc/source/_theme/000077500000000000000000000000001317401036700161165ustar00rootroot00000000000000glare-0.5.0/doc/source/_theme/layout.css000066400000000000000000000002061317401036700201430ustar00rootroot00000000000000{% extends "basic/layout.html" %} {% set css_files = css_files + ['_static/tweaks.css'] %} {% block relbar1 %}{% endblock relbar1 %} glare-0.5.0/doc/source/_theme/theme.conf000066400000000000000000000001101317401036700200570ustar00rootroot00000000000000[theme] inherit = nature stylesheet = nature.css pygments_style = tango glare-0.5.0/doc/source/architecture.rst000066400000000000000000000112511317401036700200710ustar00rootroot00000000000000.. Copyright 2017 - Nokia Networks All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================== Basic architecture ================== OpenStack Glare has a client-server architecture that provides a Unified REST API, which then transfers control to the appropriate artifact type. The API consists of * *Router*, that converts WSGI requests into appropriate Glare API methods; * *Deserializer*, that parses parameters from user input and performs initial validation checks; * *Controller*, which is responsible for interactions with Glare Engine; * *Serializer*, that prepares information for responses (inserts status code, content-type, response content length, and so on). But before the requests reach the API they have to pass trough the set of middlewares, and each performs some actions over Request or Response objects. For example, *Auth* middleware checks that authentication token provided in request header is valid by sending auth requests to Identity service, obtains user information and injects it in Request object as a context objects; *Fault middleware*, on the other hand, is responsible for converting inner Glare exceptions to appropriate http error codes. Almost all business logic is provided by Glare *Engine*. It is responsible for *Policy* checking, when operator may define what operations users may execute, based on their contexts; for sending broadcast *Notifications* about performed actions; then it is *Access Control*, when Engine checks if user has rights to modify desired artifact; and finally – *Locking*, that is used to prevent race conditions during artifact updates, when the artifact is locked until the modification operation is finished. All the file (Blob data) operations are performed using *glance_store* library, which is responsible for interaction with external storage back ends and (or) local filesystem(s). The glance_store library provides a uniform interface to access the backend stores. Also there is an adapter layer *Store Manager* between Engine and glance_store that is responsible for converting glance_store exceptions and adding some additional logic, like sha256 calculation. All database operations are organized with artifact types. Each type installed in the system must implement Glare Artifact Type Interface (GATI) and use appropriate data types to describe its attributes. Glare uses several data types from a declarative framework *oslo.versionedobjects*: Integer, Float, String, Boolean, which complemented with the following home-grown data types: * Version — specifies the version of the artifact in ‘SemVer’ format and implements comparison operations. * Dependency — sets a reference to another artifact. At the request of the ‘dependency’ field, Glare will get the dependent artifact meta-information. * Blob — specifies a binary object. When a user assigns a value to this field, data will be automatically redirected to one of the connected storages. * List and Dict — define complex data structures such as Lists and Dictionaries of primitive types respectively. *Base artifact type* is an abstract class that has a reference implementation of GATI. It contains only common fields, like "id", "name", "version", "created_at”, "owner", and so on. Each artifact type is inherited from the Base and adds some additional fields. For example, for Image artifact type there were added "container_format" and "disk_format" string fields, for Heat Template it was "nested_templates" Blob Dictionary. *Validators* are objects that can be attached to a filed to perform additional checks. For example, if validator MinLen(1) is attached to a string field it checks that the string value is non empty. Validator ForbiddenChars("/", ",") validates that there shouldn't be slashes and commas in the string. Glare uses a central *Database* that is shared amongst all the components in the system and is sql-based by default. Other types of database backends are somewhat supported and used by operators but are not extensively tested upstream. .. figure:: ./images/glare-architecture.png :figwidth: 100% :align: center .. centered:: Image 1. OpenStack Glare Architecture glare-0.5.0/doc/source/conf.py000066400000000000000000000204221317401036700161540ustar00rootroot00000000000000# Copyright (c) 2010 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # # Glare documentation build configuration file, created by # sphinx-quickstart on Tue May 18 13:50:15 2010. # # This file is execfile()'d with the current directory set to its containing # dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import subprocess import sys import warnings # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path = [ os.path.abspath('../..'), os.path.abspath('../../bin') ] + sys.path # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.coverage', 'sphinx.ext.ifconfig', 'sphinx.ext.graphviz', 'oslosphinx', 'stevedore.sphinxext', 'oslo_config.sphinxext', 'sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'oslo_config.sphinxconfiggen', ] config_generator_config_file = [ ('../../etc/oslo-config-generator/glare.conf', '_static/glare'), ] # Add any paths that contain templates here, relative to this directory. # templates_path = [] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Glare' copyright = u'2016-present, OpenStack Foundation.' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. from glare.version import version_info as glare_version # The full version, including alpha/beta/rc tags. release = glare_version.version_string_with_vcs() # The short X.Y version. version = glare_version.canonical_version_string() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. #exclude_trees = ['api'] exclude_patterns = [ # The man directory includes some snippet files that are included # in other documents during the build but that should not be # included in the toctree themselves, so tell Sphinx to ignore # them when scanning for input files. 'man/footer.rst', 'man/general_options.rst', 'man/openstack_options.rst', ] # The reST default role (for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['glare.'] # -- Options for man page output -------------------------------------------- # Grouping the document tree for man pages. # List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' man_pages = [] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = ['_theme'] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". html_title = 'Glare' # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local", "-n1"] try: html_last_updated_fmt = subprocess.check_output(git_cmd).decode('utf-8') except Exception: warnings.warn('Cannot get last updated time from git repository. ' 'Not setting "html_last_updated_fmt".') # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. html_use_modindex = True # If false, no index is generated. html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'glareedoc' # -- Options for LaTeX output ------------------------------------------------ # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, # documentclass [howto/manual]). #latex_documents = [ # ('index', 'Glare.tex', u'Glare Documentation', # u'Glare Team', 'manual'), #] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True glare-0.5.0/doc/source/developer/000077500000000000000000000000001317401036700166425ustar00rootroot00000000000000glare-0.5.0/doc/source/developer/creating_custom_artifact_type.rst000066400000000000000000000125231317401036700255030ustar00rootroot00000000000000How to create new Artifact Type =============================== Basics ------ Each artifact type must realize **Glare Artifact Type Interface** (GATI) and be inherited from ``glare.objects.base.BaseArtifact`` class. GATI obliges to specify only one class method – ``get_type_name`` that returns a string with unique artifact type name. Other methods and fields are optional. .. note:: Conventionally it is recommended to give names in the plural, in lowercase, with words separated by underscores. Example of code for minimal artifact type: .. code-block:: python from glare.objects import base class HelloWorld(base.BaseArtifact): @classmethod def get_type_name(cls): return "hello_worlds" Custom artifact fields ---------------------- Users can add type specific fields to their artifact type to extend its logic and functionality. Follow the requirements of oslo.versionedobjects library all new fields must be placed in class dictionary attribute called ``fields``: .. code-block:: python from glare.objects import base class HelloWorld(base.BaseArtifact): ... fields = {...} There is a large number of possible field options. Let’s look at the most popular ones. Fields of primitive types ^^^^^^^^^^^^^^^^^^^^^^^^^ Users are allowed to create additional fields of 5 primitive types: * IntegerField * FloatField * FlexibleBooleanField * StringField * Link First four are taken from oslo.versionedobjects directly, Link is a glare-specific field which stores links in specific format to other artifacts in the system. .. note:: It’s recommended to use FlexibleBoolean field instead of just Boolean, because it has more sophisticated coercing. For instance, it accepts string parameters like “true”, “yes”, “1” and so on, and successfully coerces it to boolean value True. Users can create their own fields with method ``init`` from Attribute class. This method’s first parameter must be an appropriate field class, other parameters are optional and will be discussed later. In next example we will create 5 new custom fields, one for each primitive type: .. code-block:: python from oslo_versionedobjects import fields from glare.objects import base from glare.objects.meta import wrappers from glare.objects.meta import fields as glare_fields Field = wrappers.Field.init class HelloWorld(base.BaseArtifact): @classmethod def get_type_name(cls): return "hello_worlds" fields = { 'my_int': Field(fields.IntegerField), 'my_float': Field(fields.FloatField), 'my_bool': Field(fields.FlexibleBooleanField), 'my_string': Field(fields.StringField), 'my_link': Field(glare_fields.Link) } Compound types ^^^^^^^^^^^^^^ There are two collections, that may contain fields of primitive types: *List* and *Dict*. Fields of compound types are created with method ``init`` of classes ListAttribute and DictAttribute respectively. Unlike Attribute class’ ``init``, this method takes field type class as a first parameter, but not just field class. So, *IntegerField* must be changed to *Integer*, *FloatField* to *Float*, and so on. Finally for collection of links user should use *LinkType*. Let’s add several new compound fields to *HelloWorld* class. .. code-block:: python from oslo_versionedobjects import fields from glare.objects import base from glare.objects.meta import wrappers from glare.objects.meta import fields as glare_fields Field = wrappers.Field.init Dict = wrappers.DictField.init List = wrappers.ListField.init class HelloWorld(base.BaseArtifact): @classmethod def get_type_name(cls): return "hello_worlds" fields = { ... 'my_list_of_str': List(fields.String), 'my_dict_of_int': Dict(fields.Integer), 'my_list_of_float': List(fields.Float), 'my_dict_of_bools': Dict(fields.FlexibleBoolean), 'my_list_of_links': List(glare_fields.LinkType) } Other parameters, like collection max size, possible item values, and so on, also can be specified with additional parameters to ``init`` method. They will be discussed later. Blob and Folder types ^^^^^^^^^^^^^^^^^^^^^ The most interesting fields in glare framework are *Blob* and *Folder* (or *BlobDict*). These fields allow users to work binary data, which is stored in a standalone cloud storage, like Swift or Ceph. The difference between Blob and Folder is that Blob sets unique endpoint and may contain only one binary object, on the other hand Folder may contain lots of binaries with names specified by user. Example of Blob and Folder fields: .. code-block:: python from oslo_versionedobjects import fields from glare.objects import base from glare.objects.meta import wrappers from glare.objects.meta import fields as glare_fields Field = wrappers.Field.init Dict = wrappers.DictField.init List = wrappers.ListField.init Blob = wrappers.BlobField.init Folder = wrappers.FolderField.init class HelloWorld(base.BaseArtifact): @classmethod def get_type_name(cls): return "hello_worlds" fields = { ... 'my_blob': Blob(), 'my_folder': Folder(), } glare-0.5.0/doc/source/developer/devstack.rst000066400000000000000000000000741317401036700212010ustar00rootroot00000000000000Glare Devstack Installation =========================== TBDglare-0.5.0/doc/source/developer/index.rst000066400000000000000000000002331317401036700205010ustar00rootroot00000000000000Developer's Reference ===================== .. toctree:: :maxdepth: 3 webapi/index creating_custom_artifact_type devstack troubleshooting glare-0.5.0/doc/source/developer/troubleshooting.rst000066400000000000000000000001011317401036700226130ustar00rootroot00000000000000Troubleshooting And Debugging ============================= TBD glare-0.5.0/doc/source/developer/webapi/000077500000000000000000000000001317401036700201115ustar00rootroot00000000000000glare-0.5.0/doc/source/developer/webapi/index.rst000066400000000000000000000001231317401036700217460ustar00rootroot00000000000000REST API Specification ====================== .. toctree:: :maxdepth: 2 v1 glare-0.5.0/doc/source/developer/webapi/v1.rst000066400000000000000000001166261317401036700212050ustar00rootroot00000000000000V1 API ====== This API describes the different ways of interacting with Glare service via HTTP protocol using Representational State Transfer concept (ReST). **Glossary** * *Glare* (from GLare Artifact REpository) - a service that provides access to a unified catalog of immutable objects with structured meta-information as well as related binary data (these structures are also called *'artifacts'*). Glare controls artifact consistency and guaranties that binary data and fields won't change during artifact lifetime. .. note:: Artifact type developer can declare fields whose values may be changed, but he has to do it explicitly, because by default all fields are considered as immutable. * *Artifact* - in terms of Glare, an Artifact is a structured immutable object with some fields, related binary data, and metadata. * *Artifact Version* - field of an artifact that defines its version in SemVer format. * *Artifact type* - defines artifact structure of both its binary data and fields. Examples of OpenStack artifact types that will be supported in Glare are: Heat templates, Murano Packages, Nova Images, Tacker VNFs and so on. All artifact types are inherited from abstract Base type and extended with new fields. Base type is inherited from Base class from oslo_versionedobjects library (oslo_vo). * *Artifact status* - specifies the state of the artifact and the possible actions that can be done with it. List of possible artifact statuses: * *drafted* - Artifact is created but not activated, so it can be changed by Artifact owner or Administrator. * *active* - Artifact is activated and marked as ready for usage. Only mutable fields can be changed since that. * *deactivated* - Artifact is not available to other users except administrators. Used when Cloud Admin need to check the artifact. * *deleted* - Artifact's deleted. .. list-table:: **Artifact status transition table** :header-rows: 1 * - Artifacts Status - drafted - active - deactivated - deleted * - **drafted** - X - activate Artifact - N/A - delete Artifact * - **active** - N/A - X - deactivate Artifact - delete Artifact * - **deactivated** - N/A - reactivate Artifact - X - delete Artifact * - **deleted** - N/A - N/A - N/A - X * *Artifact Field* - field of an artifact that defines some information about the artifact. Artifact fields always have name, type, value and several additional parameters, described below. Glare uses several primitive types from oslo.versionedobjects directly: * *String*; * *Integer*; * *Float*; * *Boolean*; And also Glare expands this list with custom types: * *Blob*; * *Link*; * Structured generic types *Dict* or *List*. Each field has additional properties: * **required_on_activate** - boolean value indicating if the field value should be specified for the artifact before activation. (Default: True) * **mutable** - boolean value indicating if the field value may be changed after the artifact is activated. (Default: False) * **system** - boolean value indicating if the field value cannot be edited by User. (Default: False) * **sortable** - boolean value indicating if there is a possibility to sort by this field's values. (Default: False) .. note:: Only the fields of 4 primitive types may be sortable: integer, string, float and boolean. * **nullable** - boolean value indicating if field's value can be empty (Default: True). * **default** - a default value for the field may be specified by the Artifact Type. (Default: None) * **validators** - a list of objects. When a user sets a value to the field with additional validators, Glare applies them before setting the value and raises `ValueError` if at least one of the validator requirements is not satisfied. * **filter_ops** - a list of available filter operators for the field. There are seven available operators: 'eq', 'neq', 'lt', 'lte', 'gt', 'gte', 'in'. * *Artifact Link* - field type that defines soft dependency of the Artifact from another Artifact. It is an url that allows user to obtain some Artifact data. For external links the format is the following: *http(s):///* For internal links its value contains only . Example of : ``/artifacts//`` * *Artifact Blob* - field type that defines binary data for Artifact. User can download Artifact blob from Glare. Each blob field has a flag *external*, that indicates if the field was created during file upload (False) or by direct user request (True). In other words, “external” means that blob field url is just a reference to some external file and Glare does not manage the blob operations in that case. Json schema that defines blob format: .. code-block:: javascript { "type": "object", "properties": { "url": {"type": ["string", "null"], "format": "uri", "maxLength": 2048}, "size": {"type": ["number", "null"]}, "md5": {"type": ["string", "null"]}, "sha1": {"type": ["string", "null"]}, "sha256": {"type": ["string", "null"]}, "external": {"type": "boolean"}, "id": {"type": "string"}, "status": {"type": "string", "enum": ["saving", "active"]}, "content_type": {"type": ["string", "null"]}, }, "required": ["url", "size", "md5", "sha1", "sha256", "external", "status", "id", "content_type"] } Artifact blob fields may have the following statuses: * *saving* - Artifact blob record created in table, blob upload started. * *active* - blob upload successfully finished. .. list-table:: **Blob status transition table** :header-rows: 1 * - Blob Status - saving - active * - **saving** - X - finish blob upload * - **active** - N/A - X * *Artifact Dict and List* - compound generic field types that implement Dict or List interfaces respectively, and contain values of some primitive type, defined by `element_type` attribute. * *Artifact visibility* - defines who may have an access to Artifact. Initially there are 2 options: * `private` artifact is accessible by its owner and admin only. When artifact is 'drafted' its visibility is always `private`. * `public`, when all users have an access to the artifact by default. It's allowed to change visibility only when artifact has `active` status. * *Artifact immutability* - when artifact is *drafted* all its fields are editable, but when it becomes *active* it is "immutable" and cannot be modified (except for those fields explicitly declared as `mutable`). * *Base type json-schema*: .. code-block:: javascript { "name": "Base artifact type", "properties": { "activated_at": { "description": "Datetime when artifact has became active.", "filter_ops": ["eq", "neq", "in", "gt", "gte", "lt", "lte" ], "format": "date-time", "glareType": "DateTime", "readOnly": true, "required_on_activate": false, "sortable": true, "type": ["string", "null" ] }, "created_at": { "description": "Datetime when artifact has been created.", "filter_ops": ["eq", "neq", "in", "gt", "gte", "lt", "lte" ], "format": "date-time", "glareType": "DateTime", "readOnly": true, "sortable": true, "type": "string" }, "description": { "default": "", "description": "Artifact description.", "filter_ops": ["eq", "neq", "in" ], "glareType": "String", "maxLength": 4096, "mutable": true, "required_on_activate": false, "type": ["string", "null" ] }, "id": { "description": "Artifact UUID.", "filter_ops": ["eq", "neq", "in" ], "glareType": "String", "maxLength": 255, "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", "readOnly": true, "sortable": true, "type": "string" }, "metadata": { "additionalProperties": { "type": "string" }, "default": {}, "description": "Key-value dict with useful information about an artifact.", "filter_ops": ["eq", "neq" ], "glareType": "StringDict", "maxProperties": 255, "required_on_activate": false, "type": ["object", "null" ] }, "name": { "description": "Artifact Name.", "filter_ops": ["eq", "neq", "in" ], "glareType": "String", "maxLength": 255, "required_on_activate": false, "sortable": true, "type": "string" }, "owner": { "description": "ID of user/tenant who uploaded artifact.", "filter_ops": ["eq", "neq", "in" ], "glareType": "String", "maxLength": 255, "readOnly": true, "required_on_activate": false, "sortable": true, "type": "string" }, "status": { "default": "drafted", "description": "Artifact status.", "enum": ["drafted", "active", "deactivated", "deleted" ], "filter_ops": ["eq", "neq", "in" ], "glareType": "String", "sortable": true, "type": "string" }, "tags": { "default": [], "description": "List of tags added to Artifact.", "filter_ops": ["eq", "neq", "in" ], "glareType": "StringList", "items": { "type": "string" }, "maxItems": 255, "mutable": true, "required_on_activate": false, "type": ["array", "null" ] }, "updated_at": { "description": "Datetime when artifact has been updated last time.", "filter_ops": ["eq", "neq", "in", "gt", "gte", "lt", "lte" ], "format": "date-time", "glareType": "DateTime", "readOnly": true, "sortable": true, "type": "string" }, "version": { "default": "0.0.0", "description": "Artifact version(semver).", "filter_ops": ["eq", "neq", "in", "gt", "gte", "lt", "lte" ], "glareType": "String", "pattern": "/^([0-9]+)\\.([0-9]+)\\.([0-9]+)(?:-([0-9A-Za-z-]+(?:\\.[0-9A-Za-z-]+)*))?(?:\\+[0-9A-Za-z-]+)?$/", "required_on_activate": false, "sortable": true, "type": "string" }, "visibility": { "default": "private", "description": "Artifact visibility that defines if artifact can be available to other users.", "filter_ops": ["eq"], "glareType": "String", "maxLength": 255, "sortable": true, "type": "string" } }, "required": ["name"], "type": "object" } Basics ------ Glare API complies with OpenStack API-WG guidelines: * `Filtering, sorting and pagination `_ * `Errors `_ For updating artifact field values, Glare API uses `json-patch `_. Glare supports microversions to define what API version it should use: `API-WG microversion guidelines `_. For description of artifact type `json-schema `_ is used. Media types ^^^^^^^^^^^ Currently this API relies on JSON to represent states of REST resources. Error states ^^^^^^^^^^^^ The common HTTP Response Status Codes (https://github.com/for-GET/know-your-http-well/blob/master/status-codes.md) are used. Application root [/] ^^^^^^^^^^^^^^^^^^^^ Application Root provides links to all possible API versions for Glare. URLs for other resources described below are relative to Application Root. API schemas root [/schemas/] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ All the API urls are relative to schemas of artifact types. * **List of enabled artifact type schemas** * **GET /schemas** - JSON-schemas list of all enabled artifact types * HTTP Responses: * 200 * Response schema: JSON dictionary with elements : * **Get artifact type schema** * **GET /schemas/{artifact_type}** - get JSON-schema of artifact type `artifact_type` * HTTP Responses: * 200 if `artifact_type` is enabled * 404 if no artifact type is defined to handle the specified value of `artifact_type` * Response schema: JSON-schema for requested type API artifacts root [/artifacts/] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ All the API urls are relative to artifacts. All the APIs which are specific to the particular artifact type are placed to `/artifacts/{artifact_type}`, where `artifact_type` is a constant defined by the artifact type definition (i.e. by the related oslo_vo class). For example, for artifacts of type "images" the API endpoints will start with `/artifacts/images`. The `artifact_type` constant should unambiguously identify the artifact type, so the values of this constants should be unique among all the enabled artifact types. * **List artifacts** * **GET /artifacts/{artifact_type}** - list artifacts of given type Returns the list of artifacts having the specified type and scoped by the current tenant. If the user is ``administrator``, it returns the artifacts owned by all the tenants. * **GET /artifacts/all** - list artifacts regardless of their type Returns the list of artifacts of all types for given tenant. Only common fields will be shown in the output. All type-specific fields are skipped. * URL parameters: * `artifact_type` identifier of the artifact type, should be equal to a valid constant defined in one of the enabled oslo_vo classes. * Query parameters: Query may contain parameters intended for filtering and soring by most of the common and type-specific artifact fields. The set of parameters and their values should be compliant to the schema defined by the artifact type and its version. **Filtering**: * Filter keys may be any common or type-specific fields of primitive type, like 'String', 'Float', 'Integer' and 'Boolean'. Also it is possible to filter artifacts by Dict keys and Dict or List values. Direct comparison requires a field name to be specified as query parameter and the filtering value as its value, e.g. `?name=some_name` Parameter names and values are case sensitive. * Artifact API supports filtering operations in the format `?name=:some_name`, where `op` is one of the following: 1. **eq**: equal; 2. **neq**: not equal; 3. **gt**: greater than; 4. **gte**: greater or equal than; 5. **lt**: lesser than; 6. **lte**: lesser or equal than; 7. **in**: in a list of. Operator `eq` is default and may be omitted, i.e. filter `=eq:` is equal to `=` * Set comparison requires a field name to be specified as query parameter. The parameter may be repeated several times, e.g. the query `?name=qwerty&version=gt:1.0&version=lt:5.0` will filter the artifacts having name `qwerty` and versions from 1.0 to 5.0 excluding. * If it's required to filter the artifacts by any of the values, **in** operator should be used. List of comma-separated values should be provided for this operator. Query `?name=in:abc,def,ghi` will return all artifacts with names `abc`, `def` and `ghi`. * Filtering by Dict values is performed in format `.=[:]`. This filter returns only those artifacts, that have the key `key_name` in their Dict `dict_name` and the `value` of the key satisfies the right part of the filter. It is allowed to filter values for Dict of primitive types only. * Dicts can be filtered by their keys in format `dict_name`=[:]. Only `eq`, `neq`, `in` can be used as filtering operators. For `eq`, it returns all artifacts, that have key `value` in their Dict field `dict_name`; for `neq`, it returns all artifacts that don't have that key in Dict `dict_name`; for `in`, it returns artifacts with any of the keys in comma-separated list `value`. * Filtering by List values may be performed in the same manner as by Dict keys. **Sorting** In order to retrieve data in any sort order and direction, artifacts REST API accepts multiple sort keys and directions. Artifacts API will align with the `API Working group sorting guidelines `_ and support the following parameter on the request: * sort: Comma-separated list of sort keys. Each key is optionally appended with <:dir>, where 'dir' is the direction for the corresponding sort key (supported values are 'asc' for ascending and 'desc' for descending) Sort keys may be any generic and type-specific metadata fields of primitive type, like 'string', 'numeric', 'int' and 'bool'. But sorting by type-specific fields is allowed only when artifact version is provided. Default value for sort direction is 'desc'. Default value for sort key is 'created_at'. **Pagination** `limit` and `marker` query parameters may be used to paginate through the artifacts collection in the same way as it is done in the current version of Glance "List Images" API. Maximum `limit` number is 1000. It is done for security reasons to protect the system from intruders to prevent them from sending requests that can pull the entire database at a time. * HTTP Responses: * 200 if `artifact_type` is enabled * 400 if query has incorrect filter or sort parameters * 404 if no artifact type is defined to handle the specified value of `artifact_type` * Response schema: .. code-block:: javascript { "": [], "first": "/artifacts/", "schema": "/schemas/", "next": "" } * **Create a new artifact** * **POST /artifacts/{artifact_type}** * Creates a new artifact record in database. The status of artifact is set to `drafted`. Request body may contain initial metadata of the artifact. It's mandatory to define at least artifact `name` and `version` in the request body. * URL parameters: * `artifact_type` identifier of the artifact type. It should be equal to a valid constant defined in one of the enabled oslo_vo classes. * HTTP Responses: * 201 if everything went fine. * 409 if an artifact of this type with the same name and version already exists for tenant. * 400 if incorrect initial values were provided in request body. * 404 if no Artifact Type is defined to handle the specified value of `artifact_type`. * Request content-type: `application/json` * Response content-type: `application/json` * Response schema: JSON with definition of created artifact * **Get an artifact info** * **GET /artifacts/{artifact_type}/{id}** * Returns an artifact record with all the common and type-specific fields * URL parameters: * `artifact_type` identifier of the artifact type. It should be equal to a valid constant defined in one of the enabled oslo_vo classes. * `id` identifier of the artifact. * HTTP Responses: * 200 if everything went fine. * 404 if no artifact with the given ID was found. * 404 if the type of the found artifact differs from the type specified by `artifact_type` parameter. * Response content-type: `application/json`. * Response body: JSON with artifact definition. * **GET /artifacts/all/{id}** * Returns an artifact record with common fields only, regardless of its type. * URL parameters: * `id` identifier of the artifact * HTTP Responses: * 200 if everything went fine * 404 if no artifact with the given ID was found * Response content-type: `application/json` * Response schema: JSON with artifact definition * **Update an Artifact** * **PATCH /artifacts/{artifact_type}/{id}** * Updates artifact's fields using json-patch notation. If the artifact has a status other than `drafted` then only mutable fields may be updated. * URL parameters: * `artifact_type` identifier of the artifact type, should be equal to a valid constant defined in one of the enabled oslo_vo classes. * `id` identifier of the artifact. * HTTP Responses: * 200 if everything went fine. * 404 if no artifact with the given ID was found. * 404 if the type of the found artifact differs from type specified by `artifact_type` parameter. * 403 if the PATCH attempts to modify the immutable field while the artifact's state is other than `drafted`. * 400 if incorrect initial values were provided in request body. * 409 if artifact with updated name and version already exists for the tenant. * Request content-type: `application/json-patch+json` * Response content-type: `application/json` * Response body: JSON definition of updated artifact * **Delete an Artifact** * **DELETE /artifacts/{artifact_type}/{id}** * Deletes an artifact db record and all its binary data from store. * URL parameters: * `artifact_type` identifier of the artifact type. It should be equal to a valid constant defined in one of the enabled oslo_vo classes. * `id` identifier of the artifact * HTTP Responses: * 204 if everything went fine. * 404 if no artifact with the given ID was found. * 404 if the type of the found artifact differs from type specified by `artifact_type` parameter. * **Upload a blob** * **PUT /artifacts/{artifact_type}/{id}/{blob_name}[/{key_name}]** * Uploads binary data to a blob field. * URL parameters: * `artifact_type` identifier of the artifact type, should be equal to a valid constant defined in one of the enabled oslo_vo classes. * `id` identifier of the artifact. * `blob_name` name of blob field. * optional: `key_name` name of a key if user uploads data in blob dictionary. * HTTP Responses: * 200 if everything went fine. * 404 if no artifact with the given ID was found. * 404 if the type of the found artifact differs from type specified by. `artifact_type` parameter. * 400 if `blob_name` field doesn't exist in `artifact_type` or it's not a blob field. * 409 if blob is already uploaded and has status `active`. * 409 if blob has status `saving`. * 413 if blob size exceeds the limit specified by artifact type. * Request content-type: any, except `application/vnd+openstack.glare-custom-location+json`. * Response content-type: `application/json`. * Response body: JSON definition of the artifact. * **Download a blob** * **GET /artifacts/{artifact_type}/{id}/{blob_name}[/{key_name}]** * Downloads binary data from a blob field. * URL parameters: * `artifact_type` identifier of the artifact type, should be equal to a valid constant defined in one of the enabled oslo_vo classes. * `id` identifier of the artifact. * `blob_name` name of blob field. * optional: `key_name` name of a key if user downloads data from blob dictionary. * HTTP Responses: * 200 if everything went fine. * 301 if blob has `external` location. * 404 if no artifact with the given ID was found. * 404 if the type of the found artifact differs from type specified by `artifact_type` parameter. * 400 if `blob_name` field doesn't exist in `artifact_type` or it's not a blob field. * 403 if artifact has status `deactivated`. * Response content-type: specified by `content-type` field from the blob description. * Response body: binary data of the blob. * **Add location to a blob** * **PUT /artifacts/{artifact_type}/{id}/{blob_name}[/{key_name}]** * Adds external location to a blob field instead of upload data. * URL parameters: * `artifact_type` identifier of the artifact type, should be equal to a valid constant defined in one of the enabled oslo_vo classes. * `id` identifier of the artifact. * `blob_name` name of blob field. * optional: `key_name` name of a key if user inserts location in blob dictionary. * HTTP Responses: * 200 if everything went fine. * 404 if no artifact with the given ID was found. * 404 if the type of the found artifact differs from type specified by `artifact_type` parameter. * 400 if `blob_name` field doesn't exist in `artifact_type` or it's not a blob field. * 409 if blob is already uploaded and has status `active`. * 409 if blob has status `saving`. * Request content-type: `application/vnd+openstack.glare-custom-location+json`. * Response content-type: `application/json`. * Response body: JSON definition of the artifact. .. note:: Json-schema for `application/vnd+openstack.glare-external-location+json` and `application/vnd+openstack.glare-internal-location+json`: .. code-block:: javascript { "type": "object", "properties": { "url": {"type": ["string", "null"], "format": "uri", "max_length": 255} }, "required": ["url"] } A detailed example ^^^^^^^^^^^^^^^^^^ For this example, we have an artifact type 'example_type' with fields: * id: StringField * name: StringField * visibility: StringField * status: StringField * blob_file: BlobField * metadata: DictOfStringsField * version: VersionField .. note:: For output simplicity this artifact type doesn't contain all required fields from Base artifact type. 1. Create artifact Request: * Method: POST * URL: http://host:port/artifacts/example_type * Body: .. code-block:: javascript { "name": "new_art", "version": "1.0" } Response: 201 Created .. code-block:: javascript { "status": "drafted", "name": "new_art", "id": "art_id1", "version": "1.0.0", "blob_file": null, "metadata": {}, "visibility": "private" } 2. Get artifact Request: * Method: GET * URL: http://host:port/artifacts/example_type/art_id1 Response: 200 OK .. code-block:: javascript { "status": "drafted", "name": "new_art", "id": "art_id1", "version": "1.0.0", "blob_file": null, "metadata": {}, "visibility": "private" } 3. List artifacts Request: * Method: GET * URL: http://host:port/artifacts/example_type Response: 200 OK .. code-block:: javascript { "example_type": [{ "status": "drafted", "name": "new_art", "id": "art_id1", "version": "1.0.0", "blob_file": null, "metadata": {}, "visibility": "private" }, { "status": "drafted", "name": "old_art", "id": "art_id2", "version": "0.0.0", "blob_file": null, "metadata": {}, "visibility": "private" }, { "status": "drafted", "name": "old_art", "id": "art_id3", "version": "1.0.0", "blob_file": null, "metadata": {}, "visibility": "private" }], "first": "/artifacts/example_type", "schema": "/schemas/example_type" } Request: * Method: GET * URL: http://host:port/artifacts/example_type?name=eq:old_art Response: 200 OK .. code-block:: javascript { "example_type": [{ "status": "drafted", "name": "old_art", "id": "art_id2", "version": "0.0.0", "blob_file": null, "metadata": {}, "visibility": "private" }, { "status": "drafted", "name": "old_art", "id": "art_id3", "version": "1.0.0", "blob_file": null, "metadata": {}, "visibility": "private" }], "first": "/artifacts/example_type?name=eq%3Aold_art", "schema": "/schemas/example_type" } 4. Update artifact Request: * Method: PATCH * URL: http://host:port/artifacts/example_type/art_id1 * Body: .. code-block:: javascript [{ "op": "replace", "path": "/name", "value": "another_artifact" }, { "op": "add", "path": "/metadata/item", "value": "qwerty" }] Response: 200 OK .. code-block:: javascript { "status": "drafted", "name": "another_artifact", "id": "art_id1", "version": "1.0.0", "blob_file": null, "metadata": { "item": "qwerty" }, "visibility": "private" } 5. Upload blob Request: * Method: PUT * URL: http://host:port/artifacts/example_type/art_id1/blob_file * Body: ``some binary data`` Response: 200 OK .. code-block:: javascript { "status": "drafted", "name": "another_artifact", "id": "art_id1", "version": "1.0.0", "metadata": { "item": "qwerty" }, "blob_file": { "status": "active", "checksum": "8452e47f27b9618152a2b172357a547d", "external": false, "size": 594, "content_type": "application/octet-stream", "md5": "35d83e8eedfbdb87ff97d1f2761f8ebf", "sha1": "942854360eeec1335537702399c5aed940401602", "sha256": "d8a7834fc6652f316322d80196f6dcf294417030e37c15412e4deb7a67a367dd", "url": "/artifacts//example_type/art_id1/blob_file" }, "visibility": "private" } 6. Download blob Request: * Method: GET * URL: http://host:port/artifacts/example_type/art_id1/blob_file Response: 200 OK Body: ``blob binary data`` 7. Activate artifact Request: * Method: PATCH * URL: http://host:port/artifacts/example_type/art_id1 * Body: .. code-block:: javascript [{ "op": "replace", "path": "/status", "value": "active" }] Response: 200 OK .. code-block:: javascript { "status": "active", "name": "another_artifact", "id": "art_id1", "version": "1.0.0", "metadata": { "item": "qwerty" }, "blob_file": { "status": "active", "checksum": "8452e47f27b9618152a2b172357a547d", "external": false, "size": 594, "content_type": "application/octet-stream", "md5": "35d83e8eedfbdb87ff97d1f2761f8ebf", "sha1": "942854360eeec1335537702399c5aed940401602", "sha256": "d8a7834fc6652f316322d80196f6dcf294417030e37c15412e4deb7a67a367dd", "url": "/artifacts//example_type/art_id1/blob_file" }, "visibility": "private" } 8. Deactivate artifact Request: * Method: PATCH * URL: http://host:port/artifacts/example_type/art_id1 * Body: .. code-block:: javascript [{ "op": "replace", "path": "/status", "value": "deactivated" }] Response: 200 OK .. code-block:: javascript { "status": "deactivated", "name": "another_artifact", "id": "art_id1", "version": "1.0.0", "metadata": { "item": "qwerty" }, "blob_file": { "status": "active", "checksum": "8452e47f27b9618152a2b172357a547d", "external": false, "size": 594, "content_type": "application/octet-stream", "md5": "35d83e8eedfbdb87ff97d1f2761f8ebf", "sha1": "942854360eeec1335537702399c5aed940401602", "sha256": "d8a7834fc6652f316322d80196f6dcf294417030e37c15412e4deb7a67a367dd", "url": "/artifacts//example_type/art_id1/blob_file" }, "visibility": "private" } 9. Reactivate artifact Request: * Method: PATCH * URL: http://host:port/artifacts/example_type/art_id1 * Body: .. code-block:: javascript [{ "op": "replace", "path": "/status", "value": "active" }] Response: 200 OK .. code-block:: javascript { "status": "active", "name": "another_artifact", "id": "art_id1", "version": "1.0.0", "metadata": { "item": "qwerty" }, "blob_file": { "status": "active", "checksum": "8452e47f27b9618152a2b172357a547d", "external": false, "size": 594, "content_type": "application/octet-stream", "md5": "35d83e8eedfbdb87ff97d1f2761f8ebf", "sha1": "942854360eeec1335537702399c5aed940401602", "sha256": "d8a7834fc6652f316322d80196f6dcf294417030e37c15412e4deb7a67a367dd", "url": "/artifacts//example_type/art_id1/blob_file" }, "visibility": "private" } 10. Publish artifact Request: * Method: PATCH * URL: http://host:port/artifacts/example_type/art_id1 * Body: .. code-block:: javascript [{ "op": "replace", "path": "/visibility", "value": "public" }] Response: 200 OK .. code-block:: javascript { "status": "active", "name": "another_artifact", "id": "art_id1", "version": "1.0.0", "metadata": { "item": "qwerty" }, "blob_file": { "status": "active", "checksum": "8452e47f27b9618152a2b172357a547d", "external": false, "size": 594, "content_type": "application/octet-stream", "md5": "35d83e8eedfbdb87ff97d1f2761f8ebf", "sha1": "942854360eeec1335537702399c5aed940401602", "sha256": "d8a7834fc6652f316322d80196f6dcf294417030e37c15412e4deb7a67a367dd", "url": "/artifacts//example_type/art_id1/blob_file" }, "visibility": "public" } 11. Delete artifact Request: * Method: DELETE * URL: http://host:port/artifacts/example_type/art_id1 Response: 204 No Content References ========== #. `Filtering and sorting API-WG guideline `_ #. `Errors API-WG guideline `_ #. `json-patch description `_ #. `json-schema description `_ glare-0.5.0/doc/source/guides/000077500000000000000000000000001317401036700161355ustar00rootroot00000000000000glare-0.5.0/doc/source/guides/configuration_guide.rst000066400000000000000000000000711317401036700227110ustar00rootroot00000000000000Glare Configuration Guide ========================= TBD glare-0.5.0/doc/source/guides/dashboard_guide.rst000066400000000000000000000001131317401036700217660ustar00rootroot00000000000000Glare Dashboard Installation Guide ================================== TBD glare-0.5.0/doc/source/guides/glareclient_guide.rst000066400000000000000000000060011317401036700223320ustar00rootroot00000000000000Glare Client Installation Guide =============================== To install ``python-glareclient``, it is required to have ``pip`` (in most cases). Make sure that ``pip`` is installed. Then type:: $ pip install python-glareclient Or, if it is needed to install ``python-glareclient`` from master branch, type:: $ pip install git+https://github.com/openstack/python-glareclient.git After ``python-glareclient`` is installed you will see command ``glare`` in your environment. Glare client also provides a plugin ``openstack artifact`` to OpenStack client. If glare client is supposed to be used with OpenStack cloud then additionally ``python-openstackclient`` has to be installed:: $ pip install python-openstackclient Configure authentication against Keystone ----------------------------------------- If Keystone is used for authentication in Glare, then the interraction has to be organized with openstackclient plugin ``openstack artifact`` and the environment should have auth variables:: $ export OS_AUTH_URL=http://:5000/v3 $ export OS_TENANT_NAME=tenant $ export OS_USERNAME=admin $ export OS_PASSWORD=secret $ export OS_GLARE_URL=http://:9494 (optional, by default URL=http://localhost:9494/) And in the case when you are authenticating against keystone over https:: $ export OS_CACERT= .. note:: In client, we can use both Keystone auth versions - v2.0 and v3. But server supports only v3. You can see the list of available commands by typing:: $ openstack artifact --help To make sure Glare client works, type:: $ openstack artifact type-list Configure authentication against Keycloak ----------------------------------------- Glare also supports authentication against Keycloak server via OpenID Connect protocol. In this case ``glare`` command must be used. In order to use it on the client side the environment should look as follows:: $ export KEYCLOAK_AUTH_URL=https://:/auth $ export KEYCLOAK_REALM_NAME=my_keycloak_realm $ export KEYCLOAK_USERNAME=admin $ export KEYCLOAK_PASSWORD=secret $ export OPENID_CLIENT_ID=my_keycloak_client $ export OS_GLARE_URL=http://:9494 (optional, by default URL=http://localhost:9494) .. note:: If KEYCLOAK_AUTH_URL is set then authentication against KeyCloak will be used You can see the list of available commands by typing:: $ glare --help To make sure Glare client works, type:: $ glare type-list Send tokens directly without authentication ------------------------------------------- Glare has a possibility to send tokens directly. In order to use it on the client side the environment should look as follows:: $ export OS_GLARE_URL=http://:9494 (optional, by default URL=http://localhost:9494) $ export AUTH_TOKEN=secret_token .. note:: It's more convenient to specify token as a command parameter in format ``--auth-token``, for example, ``glare --auth-token secret_token type-list`` glare-0.5.0/doc/source/guides/hooks_guide.rst000066400000000000000000000000731317401036700211670ustar00rootroot00000000000000Custom Actions Hooks Guide ========================== TBD glare-0.5.0/doc/source/guides/installation_guide.rst000066400000000000000000000000671317401036700225500ustar00rootroot00000000000000Glare Installation Guide ======================== TBD glare-0.5.0/doc/source/guides/upgrade_guide.rst000066400000000000000000000001201317401036700214640ustar00rootroot00000000000000Glare Upgrade Guide =================== Database Upgrade ---------------- TBD glare-0.5.0/doc/source/images/000077500000000000000000000000001317401036700161225ustar00rootroot00000000000000glare-0.5.0/doc/source/images/glare-architecture.png000066400000000000000000001204211317401036700224020ustar00rootroot00000000000000PNG  IHDR$DFIDATx PU׽ EoѪŪFq5JP˕xMZUZIDb,QUh P ADF5h bib26f2yg0^{Կm(.ll [l [ [l8###CKQQON:餓Nz`K}?x7n+}|ZG/-k׮MJJCZZY__hd <5O)u͛7oܸf^UUUYYs7Hl9ֺu ֭رc iiisqi [ls5eʔ>}={VvÆ blׯ.555ǎb!ߐ-@_[nE˗oݺeS>ҒÇRWWwIY)++g3I>S{gj*Ǫd]A; /^(+U *r.R}'8WW l9C ILLt\m&V/)2~W.\('N6lبQDb>9*;W^'[/A!}嗥z:{ѣG%Ν+9}t)YN7߼9:d :͛7aq1ZDbbb֬Ycx&6gff KBNNNxxz3f0FFF_}QMtK.ׯ_cc^!3cs]_#$c#<8zMG-2!!$3bl>aaa)ݺu+--y>7o?~d҇ݷ냮\rȐ!xa=~[)$44Z4{xRNKK4:"%%%֎RSS3vX3fe#ye)G0C\C*++o644H~$׭ŋ%u}Z  0|D{Ѧl 2$55I|g+[ׯ/mmm<@JJB*L8Q8} oD Li9u0f `ժUƸ%Dz ?dKXn]έGRBl@DIζ̦lڜ-@ nܸ1vZZZonSDD>z=//ODͱlm޼9,,XիbBr3>}]M-33SFt\̓ʛ3f̠A-dkÆ d۬ե9g -pׯ/]TBׯ7e;Jiii=2k,qDz%lڴIvRxllMJIIC_:d w H+W:-aݺuRvze\S3gğ}d c8d ȖGIOO 1>,%۶mSdcǂM.]7-N -Cī,XtR#155g 3dWRR`aT6Y˖Y\\\ZZj32VTT/O}'IJssshhceܓ._`E!bB/^_?᲋Ȋ(4f̘}z7Y gϞ5Zr5k%9w7oJN|#cBv+W&4i$q{%b>KعsnT6Y"FexUii'EVābcc&)O>/T[[[YY`}(t2Z ql1 jhh8z==ٺx|}GF`E!/_c0CDmǎGб˗K%%%يEDDJ!l%&&Gpp~Irh/ق@Ź]d hrl 'N1Ŧl]~]>2Jҍ&%S~/zl}[n566y7 2DlI|g˖G,..-ϝ;ق8[l 1[zRDDĮ]V[[5J7m%Zn6lŋűvxq))= wAo)[=\)88d Ȗd{/_ޯ_Z1220*, ټy_\\իW ZZZ-2_惮ğ@}<ֲep۷oonn˔!g -d \7@:-[(2rz!j%F`uM6I6$+bcc z7$EKyqyiA(--2k,@N7B(9]|e -oFW2NޑMHѝcǎir֭˗/w0Éuc}d l [`d 0Օ [@ [NA-@0f -GF?.d }d _@hd q` [FxPlYr%*d 0Օ [@ [NA-@0f -GFd |ly [ln8[l0f -`Fd ld Jt-e -  [}A q0LOS/q<|BlM" [dōŃ-d \S-d  [} [~su%вłl8qØ-d ق~![lD@XUߞvK ]?4q-de*)Ӻn9Khj9Q/kˠؔ[՘aaӱC̝$*)Y?DSTL-94h+ #Mr\+X>~RgKڻW2xLӑMfc4ٔ-nD,}EZc=z)K%9 -10Q=4dIfi|Re0s Z+AJ3iiU5i;U-N\].Ѳ%$%R"ˊմG-c_Բٓ-񞡑Z`۲%yĥ#L4?  IL7eX)WYݹi)9 Zv,dd g ->]f"4Lh'+Fs*[zCֲ%ƣwD6OC2IXvĴǨiex-[JFCو\U=tyN[9ԧ&z$aX]NɖHlf![b?{ie't9DsdG1#uaV,GR+w_Ǧl9(8[|Gd+1SϚD0|Px8>FfXh#^d|9@^N3LGhc> ӈ1!ˠ;jxעrbRm˖㒉l-[l`=<^աDPa_T;q u]G9U2q-` [,6"+^łlxlqX-\].l [NA-@0f B q0-_-lylg -d hdśE-@ c-@`6"lAC-@8g d Jt-z6<(  [}jd q0-@g -d hd q` [-d -+0d 0Օ [@ [NA-@0f -GFd |ly [ln8[l0f -`Fd ld Jt-e -  [}#f#?ɖ?8[l![ dM,8ud -@X([d 1[lQxPlqj6"ld d pDZX-  [}l![؏8d q<| `dZiחzmlCg Zrl:]%Nk_νt i+TqGG72R6-R.-Eܑ8[cdkv^yr>r8I5 Be}\d1[l![l[lDѠyᆰ'xƪ_nozk=#"=L͝2Rm+qE봒%.[/Rk+_Tij{^lJVTur76l2fGYj%Hb<%#[]8[lp-ѠMK5.U1>lJ7i]xνCPq[՘aae-![3=>Q=iHf^c~U{eM" #MrP]mGY|9ϲ7bdd +8WW lA`l!V4칿lݷQd,mej|@szv\})CK7v\`V(Yd%DFњf{eKJ_W2ixϏٲFyN=>2fp#f#+E-@:7@-ll-[lxScbaFD-`xl![0Օ [ -[w [g ->NU𠀀|![lD@W  [ [@-@-e -&d 1[lc-@pj6" [qasu%вSg ->d l_8[ltolc:@WWWpqd 1[b֯_?k,T/^el [zzE#8{Ŵbcc6q"))I%}vzTTTNNsu%;rJHHȹsq>999Rf#x;w 8Pǽ{ <|lgc Saa!e =z4`8s zl [. ##BRRR,q{ׯsqJt-RPP VZZӒ8d kČ7뻣:S⢢ܹ FUo6Ʀ;v0Lpo\d Z-@1[ [l g -d ܈su%NA-@-pf# [l @-eܹs5;;;22RrEEE}YjjN:;H/+**qd 1[ܸqcҤIIIIA \p̪Uy }A{Xv [uwg -sС>J-:QQQ\].չ-j@ _g -OS=:Fիٳg-җ-[?~mm)""b-@>=zT>[lJOO =p;wĈu?^ftl&f1lgw}k27z%~[o]-Z-@---<Ϟ=+ÇV[[̴4d`Xm۶fY4i… VIIzc-dوl D,X`.^7n0nݺ-O~YGoݺldP/E lֳ>[RRf ;+@hd :U>nD=ȖA-@EU/z]]Ř-d l]L>ߚ,_Y"``Q̟?ԨQK.fu\pd -d -p#BlUd b6" [l lvd+qD٢e -@)cd F B_3@U.-[ [Fl0f d {/u+Wzd 8[l@ f+@hd -p lŘ-@l-@B-d .5zky-[\vZnZ^^%.-[tÇ=ȑ#lM0aX@-d 1[t\uTdɒݻ *q3l [Fq…pu/!!!MMM\@lviii5|. [su%-[Z,dСC\@ lŘ-2'O6Lkȑ\@lDd \IQQ![[lႀcl%fTk׮_ .pA1 @NrJ= q-l![4 !!!\ /B-f#Bg"g -C؄{W\].EV[nb\-d \qbłl![-`6"w!``Q= [ e B-p#Bl![0f -@lG  [nB f+@hB-@)c-W.NWӴeΗs0OGb-dev['^S*{I'G+kl%fTA%[L;_P S7}]6-Z2Rsi\m8GuSsj9wk)R)բ'Uݮj/W&wuA"[0f وVʖFAZ/Vk#g K0ͨ-~\lвl![l![B-d  [Fd B#g -C݀l![l3Օ [l![ [d1[ lcو [܅8[]kFl![*pd -d Bg `g6` [l1<ʕ+W֬Y>k֬*?:k׮9;#G:t7A-@u#e˖Ϝ9¥KFmӴ )#[>+@h 6ںuСC'Lm?|ADDfݻOwC-@5kɻwݻL^555q#[-`6"w L!!!lc l-$??_eAϞ=6#ʵaÆϛ7f8[]kFBvmӴtgΜIpʕ7'''WUUqA\lвl'O>|ȑ'O7o^VVֆ ^CUWW755Ѭl2`Q.~g `̖eKfclyy6nb\-nِ-8[l?d fCJt-Z   [8ffC1[lDdB:q-׌B\lвl![7!-d B [FUt27'l9{SyB#g -߯U4Zz~jZ_ueoNd+d?p >l]d+0Zfi%;KYZK:rҡz9@oOA}UnN^ުw}lAG  [8fKd`^ˏhç6|eIU=__Wi?SGZxZ,/_>)V}U-NTڦ6E=繷TSK=GհGuۛl~柵qSٻe}uc![-`6"ْeHgfTRWwZۃ(6 BUU]r~3Lsjoo|E[߼La݈vԗ1:r,dˏdSju&6?hܥlW݈6龜8[l_3ߗxUz1Xc#RQIeCFYF@kpk[r0f˸:x,ll![.'j]*ߴHE%gxJuӲ< S*63l" lV+Z2౐-($Z#6UlWdై0f rlI#OnhPj0NӦl5jD;-,etXȖ~xY4~6e}eS,1[llDV]3L:%y&3LmH6eKRR dV䱐 #ْO𯯷\&>h{eᄇ-B%g -Vh-`wʹ~B75S#h޽Tqi*bܩj揵}ޢ͜v=>ѮlEd]_RptVȖʖBiM0Eǎ0A;Fy_Y˖#B%8WW lFVGtQm()c䴹H6}k noOcZ|]rS![AV0ꀛ-`6"El![l6 ``Q![7]-Z-d fs#Bl![7cdو͆lG  [nBِ`ƹ]d-?fC)c l-?d f q-7f wAhl$.\HMM>|Ν;>NiiiJJ̙3CBBEd„ W\Bul![Bɓ'M6k֬J/믋ilٳ|\"l![ [~<19tSRR>˵kz!  [qr; ȷn:x5k:g۷~ᰰ0ô,V\].E˖p…Çq5|> 99y„ FY0cd+8! [oyw9tW\t)++KkÆ .)ls}-l![lDd_ٽ{T b-[ds߿HH81ȅF [ ՃrJnnntttvvަ7=zĉ'/%r! @MΝ;>x`! dɒۻ੩\%B-d 1[ȖywFMD.nذ!""BtAS"K]c-dوݻ'LBMSTT4tЙ3gVWWs5-p qr; m}reee1%TUUMk\ qMk֬>{C-@s̛7oȑ@.-%1iuu#|0f lG0a¬Yy]xwFT 60>;̝;wZ[[ׯ_)7ꫯz~Z ]I_h\^(? Ճ>|ܸq37$$ߔzt>637nܘ4iRRRR}}g6yQ[d hBe?0[zɓ'2-6l]VL  [Nh0f TeeeEDDX)$$$]ۥKac,=tG}ygFEE![l1&6l,Gog۶mÇOOOŇvZȓ\~8[lg u{n1ɓ'Bzi/OQYY)矐lI"iblEK>8tЖ-[gΜ9k,_n(NMM>|8ab?O, :;vϷ5gu8 d1[^oM7-?%11ږf̘/\"CMMͱcǜ-j4l`bp26l![zYGР'oٔ8p֭[:d قc9fuD/_]eժUW\٭[)SHJdd… -e d |B:ݍhSn޼)_#ŋ-`cp,7V^^cb6" [E˖eKX~\~ɾEEEmmm.6Gg -lll1f@h̘1֑RRR-ŲKXXܹs-[l![V~nݺ]~HzlڴIq2(**lLH$ [d B?G?O711W^ϟ3l޼9""޻qFKK=٪߾}{ss^f#"[U=+VBRQQ1b&aaaWի &Q({UWW)yD$sttÇ-u$[ {Tlկ=νt TqvÝxd+!VxbyyyN"lDe V{=TqZkdڳIe-tNC-l![FQEW7uڴT-x\e<>.kO߭V=|QْuhSƎh`^QֲiA˼ad+g+lg GdټL1"gqiEʐQ"MomG69@i9DM޽4C3-[<B'8[!^>b+SEi|QMh!Ɔ"[ق-~| [Fd 7VG橝/0@I6$قqā ר)uu-x\e<>.![/R_YR]oU{6/^-d,Y FOkҳɲ#[ ROԎ~퓑vi;< #1eZގв_~޼yTlAYxqAAkg#;Ps*1mEKWU:ZnDɖ8M]RFS #U˦N}U(22Ӱiz, E퓑1ô8c4DQ49?N+q-`V 6eʔ-[477SU=ӊmmmum-q?4hs-`wzSjb۲If&eX`6x;B/0Q=Z{NFvm$ʎߜ2־e#V@|SrqI>}:餓N-]vo4 ު%s4)[X"E͝Hɹ6ЌEԧĴ.l;m$EN89rGl\)8QVâ_<'ږw7\/ߞV"LmK-Ǻ$[N|G!;qn.-llyz(\UA[ZsZD?鉒(dbmhT4Gl;m$Faq@l&:QZqZGބ`%k(ʖp&ѴOKtl1f lQrU/E `Ҹv/^wdOCvoF-dx [GՕeo֞T.`Z-.2_&Q#[-d bA@e+e8[l [tIrqD@BŘ-`6" [Rڽ{ĉMaÆ[644 [ < -pw˖Uݕ&LHNNE-e pوofxxŬYΝ;l![-d 9=zٲeaaau5]& [m/_/YdуNII5j={8[l [\reǎVrL8q͚5Gv횤#_YY>&Jt-d ˝;wN87p4-.rݴMv9g -d gyo߾Yo۷\V\))K.ia0ق̙3Xɲ~u L6m۶m(AW[[[###^/**RSS-_~eEEClAṔֶ=ZV?\v B-OpƍI&%%%ɪUyfd K.N>]o?|50fs]VL _w҂ Nn߾}񌌌!wYzt܈el:t裏>BVƨ(d xΜ9{>}zaaSqh"la*..]zΝ![J!V/^lsˠ]|y=z3gDGG`Y4M8qذaFmiiIHH ;vl^8`wOrKJo|d e+ȐK:cƌmmm[-(>kҤI111[m۶333E.^~ lo":7b`X\"^:f#"[F 6o<~nݺɧ٧O<%$y$Lvy#F0VPP`"%"[SX/@o]rN A#[uӌ33}7eŋɼYR ỷ˗/ֱc0#44tǎx۷oٳGoĊ:ڝ-lвƧx %%Z^*oidS@֯__~/F"޽{}^\\˳bE-cM=D݈zy2rhSug ވu+?lglEߵkWKKRiލyf{ƍWUxxcdSSS<xYȖV\\ cg <ЈUXX7b >ވuR`l-/UȋZ=JHHaMnZhѣi3ɖi&kV0ȖZV%&ق@+O \x#o޼YQQa8cǎaN.or/@#[lA -z#}f#"[0fe0Bh2~ړ?yZ![lBSNWyi N" [l![Vp*RڷoߩSl`̖e˩Oǹ]-l![V`s9sĴ_lg <} `~ l<&bŊ`(Dࣳє e d+ y7n߻w襤`Pjշ_~hJHG:?} `~ 3 0|#Gqe|G~R$IY[mZz]~}ԉԻޓlв EԩSwܱ'N79s&??Μ9z߸q)xk68=L1oNb`qqqQQQH򌌌<3lmmŢ:lle/Q[uZ+ׂUS2Sbq&gsj'Zl5,J=>QKz=]WVjѓnWqj/W<)MQ-@[q 盁狋ΝۻwoիW&޲%l%Lz}US롮Uh7/SG U˱V}X$t)AtM;M B Se/i@ [vimmP n߾]SS=z蘘Q̷~X~:fKT^$%R`wSs,)6ūݓIV7L;_ڷ "e1[Ȗa![)))N:>>>77X.555ɥlD]ʐ-YMFͥ-)$4DE7-.[%@u$3gXݹsG 5}eK.rHH̙3nz9֍JhIjcl9i6lI!C#mŘ-`6"hv59}tôFOII۷ollի)ݵk&L`tIO6-77ݲ%Tl}sJ=QW˦lcG;oOk Z݅W#[A v={UXX׍XR/c%%%b:WwUUUY-'OOɓ}U.]F l--}!jH޽Hd -`Vg4e{wv"'\^^*'/|rz@U7nX߫Gw%`6" []@C?P3ccck~nݺu͚5)))3g9rf͚5mDd - [•+WBBBΝ;XR'''w˗/?q)wũS?k׮|1gyFcbbE>ѣGgeeu>}Gqǘ-lE扝?>///66W^R C]D$Uj뚚={%HIIEJ.wTTԸq$%55UVرc_Mkĉ:وA+[N}Օl9& dOovZZC  F,gEȑ#ŹW9gΜ8EV\^)Q./]: lݹsk!he8[luRxF7 O'66V,̙3ȓׯ_+(e.RbWXbZr%źĽfv_ .7Ȧfc08[|>F)s'_YRj3Pr4>VP#[ȖFR"˿Yes5d+8eq(=ߜmlcx\CΈ=_nr N&Gs =D-@ l٦wڕב1RԵˣ&ht+/blODN8qAѣ7._\)>>~QQQ!!!}+V R>򸃮C-@-ll}U=O|TAA>L*99YIoիL:5))I+;;;??_\>s̕+W9jd`0UlPzxZjgJ?ZuJUFʦZʅC&[׮]+**ڰa??&cټ3NO|Tff>Lj>R [ g+oOA}UnN^ުw";'ՀjKG֬Yӿj%Suд"7a6" [@зl5QFP05wxʤ_bcjM_q)[BUKxZ3Jk'[n:hHw_UUrhި&GsD9_4%EZ,\ڢ-N"[҇fee-HAػ/Rݫ#w lD@1[0SZeс{ϽC?:ZZi) ST!ɦkZ^ҲK ԟ~55Qުr2nv CW?|}/9UHr2O{D=告\v9:1l]p!77w%eKa󂛷ܫ#w l`?wdqwbэ(J$[Uoz2[]lG?)/wXؔQAiu$%wZ.˨hnF/^ܴOsGpӴ󴴴ׯFiAؼֲeunAhB-@<Բe,".bJtK<ej-QaQDt'Z֣G꾵ԲzCԺl5hÎd^wkiuleojjZfMttE7`c?#ՑB8[ll]=zOk#{Dkč`-o2G=5QlʖquْEm=2ӻj5ٲFtQ4DX;w6m[(VelunA-`01d? #a7 SQ)7ZԔ2qA}jXN-+M!ZT^!e|1ڨ$Og'ާ^=.Ea㦱eN̅ep%KȺ?ْjуly+[[-d -F|JڔCcq]js:Nu2=-4]91g^ti˖-K.8[^,.xͻCjz꼼<™"[l-[+`{sV1o6]N\OMM͞={rssWXaC=-_}CBBN*f&~VPPk.K.aA#[łlӹyʕ'N߿?///33355U exh<[ 'K&& ll ]7n'VUUUVVPG._\RRr@Wg w 8[Ȗ~aWNKK3<^{XK:lIѝvuO硡sέ3{Bu|Ncǎ|l![B =0?| _~(lݼysʔ)}9ѣG-ZSpBמxᇙBt Μ9$cl3?}tcƭ ׯ֭{g.eO***仱o߾[n'O4r#C[[/))ihhlI9%w9p^B}}3}H!D/AGV$eJ6N},_f9y/|å`}֧*8y-{ge lrT=8VNNǃ60arq+,<[o˳vgs5dȐ ٺ|{!) 6+11q…^+r>7n1bDDDկ_?)Ёl+7e|r]VZLf'6lԨQ۶m tY}E/^lrbzQ"'44Tv;vl^DjV0D Q>E2Ǎ'%O$~сyk(͛g;Ȗg1V'd͛ܯ- ]D&M$w}eKlHʏ~#{OII"&d[,]ٳF˓d^fqcƌOY8L0tp,[ "1Μ9SPP [_rrv|8}W͗4$''deel¬7lDy;vltmƦ2]'[Whh=Ν;V^^1[D\LĉaԌ;VTO>bQ3f0yz/_k";Uߑ-l/c [ԩS+V7nXWffH޿< N.[rf.[bTbt;U@-d+~V',ƾ}󧠠yr8qBrjGCBBz-oٲe۶mbȑ#+++-f#jY&44ԘgzԺW^"F9hcc=g7EEEF7e>,,??BtCU2Ftp&ֲwLGDD[/gJ-d1[Ȗ`b]RшcmܸQjIGasYN*zZ:Vg 9Z;k'[W$ǔ)St[n-Z[n=zl7F$DGG˿VlKK+''Ǟl*&9rX": ([lnRLAϐ l +x2\W1Q6urpB-d;wNlӧO8pHĥ#+읉5gl}vQ+ԯاyB-d˧:ĴķĺĽ a'GMlٲECBBMqF~IzLkzn=*K/?1eU˔U L=n'>)ךGz%}sr ZyNV~ln2Wjblf/R s< 1[杌ɮE-d8[Ȗ V]\\wsy]Q09QelFG]6cJGd݈Zȳll/W\cС"^ .$MH^np܈PNoMdsfTdhLjjX,U봞c!Uhnk:|=aZM(¤%[#//zD(TO܍l![V` 2#JKK.gO(qK'lV]VcaB\k$̺Z@vJ$7=]w+;9֝X/)5TM"p[7I$ob0ۆHֶN>}g ˴d \Yg B@BNl;}ʘ̫L}]Ƥg̲?zze~8vCXvy-z-d r0/?.-誻vܛg"˩Xressl![Vp#-˖z9-d>\CoHXA-pAR~-Oʖl_GO.gΜ٭[7yæArR_|؃V=[>ӧɳ`444i7b!hyf #E#_g+BNJ<d $BQȳTUUYΝ;-d :6I=P+d \\g+ INN6L+..t $999-y -Ȑ+W"[dqݺucǎ AFbȳ\Osssddaaa d,--AFd<[l![na޼yzꇿ:]+[`"݈6$$:e+HNU+d[2333gQ |ȳCF-AƲ1ciAFd+Hep|ߒg~`GֵcǎYfEGGƊ~[ 2@Ȗd<߷"b4R [#~~B'"E‰' g͚URR #w#"["[Wh>7 oɳ݈N&d 455[.+++""bҤIb][20>lyYNeM~ =Vաo{C8Y}cR[%Fs5ɳ%,Yًe/N^)ȖdgQ^-ջAFk &TTTDl]\jyD]ɑ/JKͧEvVo6eeerԽ{w9-;&ԁڳu={FFF렕-_d˭E|z=z ۾};VD-M?1_kRעe_R/?,6< Hri+Һuƨ;9 _={ٲ}-Zt:Ujma1]H{g,D׵SngT")Ë.ЦLM3&j\;-Mua k ^deqs2G/f:e3从l![ZfMVVT sN[d bD-/zP߽aa~%ę^KeVs3oj?Y{)O>ꈚ޽yǸ*`jnnnHjCCke+.mV2*q)l]Y^Kɹ}@eQC^US)5%EX5C5ZRͱժd]/kɢNWJ\f"9[B-oqҥm۶uɿӿ9s攗1[F]'lez~=9UziHXKjW(vjw[6U^FܴiѣC\⛲lbz왝]UUB(..x+d-[ 2זʖzSTq}Y'd[fF9u6{zy6k fW9[B-_(?{ˋ0aBTTQѥ@-Nب%TyJjE:ir>Y|PVZK|k :u]Rƴ͙fC?xhIԁ[H^TߕUOt Ѣ"MVcscd j$jk;kgbRR`[lE-d z1ΗÇ+u]LL\U"I܍WUb7r.ʴ5Tl<--Mt0"yaD =߿~~~]] ++2f[VVWt{%kk('nu-i(ѭKΘbN1[EFlll0a͎͛7IȖ'&)ßdjAMQMuّCZ~UGypˆٲ-\RDEǗllHD;9{ݟ'>H[o?Z $RU>iV{ӚE7B-d'Kmmm||+W2Ȗ+l%ĩOHyifIZ6]ek g>7y1æMƌ#9|pufΜY]]*KĔ50B)TPCxSc|vdK/:\6ȰDxꑵy+1*P~Nuqۓ/["f  B-٪[nK,),,ܼyy ' <[%[8xYHjj޳%u\OB(cxEz9o>.y(,b䭻줿o-d z455t2k'l[A?g^խ5b =IMT(X_k'l![Ȗ[kSѺ?bIo|d@^9#[3߱`+--Ij3osgdK 7g-ߓŋQ F5v؜Eq)#*ȼCKBlO'5|ē`+[)SruXo燬.!jpPw5Z?vRȖd[P.P2RDz*O^?W61*VRdEc.|V^PGr݈ Enݮ.\XRRR__%Ojjc}> yzRG<9p'|΃lna(lIlނ*sĆ+֫/ [W+և^>5T$._HKފ2O-gK䩦FD*''GJJK^lr0?sDBBBuVPP`ޡ~y" _Ȗ[POnKkEUrCǕlמzԑٳG"p.[cc-uegg'%%]I[;i$YCyy#JgΒ%KRSS>#l![Ȗ}r7lټUf?v̖g;@l![=[-^\]z5l![咩[PRT˳.[?O<)v+v%j%%/-[=ڐ<[Gil Tֈw>g PĮ=jnWbӧO9><[V$*l![N׶P^vig :l]Ϛ5+99W^wqo+SB<[l3ĸ1rٳgγuE(q)1**ܷzѣ=[@̖e (+w#"[VRYY9|psr<Ů~M6M_~r]T#GDGG#[>G! /гly8 5裏"""t+++kKDDM&j*%u(ӧ]nW^Gʕ7̘1cȖ jjj伞<[eˈ|G7lG݈(È---555K.҇ \٢g Zbwܡ[}q/݈V$>>sLeeU222bcc{9}y2f MMM&L0bm>l>""¸ [Ȗ2!k1[VݧMf޼ysZZ<[Ю}B m?k֬˗/*[@:ƕ+W?Ҝ`x: [%[URedKؠ(ہ7l lu Κ5C̟?gϞ%V9ڰ۴Lo}LzN6Iլ 1*cBX>Ç_pC߿ "ONͳR0.8Vr5nOӦht1E{4B->d pjii9>?y3fYyI῭̧V [V̜;w.ŏpBrr)Sr+=zS1gˏg <֭;v,OGߒj6::O?/9ڍݻ/Wvwc-gZKy-ill8qGg7<[Ȗ;rJRRRII BȳlI&q,-&-s3gp,\KB-d˜ . :r7l [999wy-d *++:t(:pM-dWıX^^!?٢g<[}"Y<˖/[l [mQRR2zK._}g ٲə3gx#[#[Ǐ_z5{J-@8U=sk֬IKKiWl:z-Ӧ<d 7gϞmqeә{f*4n̳ [܍/YvŋErz懤uf[Y`]wE\=[>y>ڵO?:`Yr޼yؽ+2cέ1[:v'^| Z:pa?=cd٫W/Cdw[) ֲ;x/B+tXX7gΜ>};vp/}1aD}6nh1F̖d(&&u)iǎ 0,XХK lb8%j0ɋ^z=1[{6[ vR?ɖO<[ޒ a/6lXLLLYYXhDw>cƌ י={7ֲK/uڵD7ӧOYܹszɒ%C8W?ɖȒG %[B-7@-lmٲc1P9uM(Ѽy#<[zȼ *f> hA^\TK.zJѣG%<2__Ihh Ul2G:tPTTLݻw+qc-d ֗_~ymwC"2]YY~sͲ૯ڻw"a]8w5G+/Sa IDAT_xWq:"f qƽOhkrԨQOytk+@-5jhhfAC-@-d;ۑ-O퐕tR' "f -oRWWg ٪JNNpd%EEE)))OZXd < o2o<o.pvvvCChll3gNHHH#_?|A/{-d -߅l߾]N޽oEEE|]@=[&qqqڍDFF655EM:l(((vU]][[[d! [MziVHHH ܜ}v8qDB̜9Ӑɓ'|~~ٳoPg -d 2!!!l_sθ8GyFdWHLL!e?cVCC\"|-4o܍mQ\\,qӟ{9v [=[/4hР;&$$,-d<!vz:M@suLNA"[h1[nP)?6KE|@t|%eB-Td`e<[:g2l![ *EfV-4l![4e#yB|nDElQ6B-dˍy(-F [ d bgɍEJA(D=[4eB-d "f٢lP6-lu`z@ֵkΟ?ﳲu̙-vk&d+`T1vKŇ~HpEasQyȳ9za6%E˸_{mڱʎUZKe;fFo+V;}YVZ5iҤ+WxK#-QKwdKgȖ KB[Ky9QS%fZם9G꽶JXܹs{=n8d1տPEas_RM"ϖdkb*R+k/r.hE<#ȧ7naD1Cہ={eI&M`B\gKVͧٷ~tlVwEh{EMet7nRq.]GRf6nLm:Ӥv+ -l`̖_b3]U&Noz+)ZxmcŪKLS6ekS. ZosT՟,9ٽkhYzٺ|rQQwa<"499ڵk>~7beeRqO>}߾}JGտnc?~_ʲѣGoذAlH^\=,YݩaDo6m}hW322òe$<>FOiѭ5F҈j=R;YTz۪?A"ͭҷaJ{9*QRdggâDۗg՗)\YTmc1Yꀊlef D-OȖ\ꑤ2 M- Z/#Ey ӧלּa(baYըꭱ\HQ[+Zn:˖QZ䣑C_9r$NjgdKN!i$X_~֦ʖ4mzduaFя~Dp_-}<)6bQ^fd|.!ْkOj@WiSEκұyݪY8gwBKidaϯXbРAUChhLxb IKK+//te(-51ݱْ&{O3gܹlu~Ѽ$<;?!Ǵl:"[6JZhuǹOp0~\c,[R>l![T"%mUՐqp2Rw; $E\OBke(7_vĉͫlߗF {+f˼X|$GܢZtxnDٱ'#""|12Jscʊ4JR4VUbv*@l\ȖdN1v[Sn1/y.[ZHujKj" zM4rᶿz2Zzͅ6*\urib>פ.O?t^e+##CԤ$#N[#ZD?8UTʕ+6m3fǎluR/ypp.~҆lT6e9"Tom͑IBCClI})fᡦ+ïlY@lg]%D]mՖ)~U2Ҧ>hPWReS)wS ?{SCEѦޭ>G3n ߑV9CJ1իW.ސh;w>}z+ϖb|$; l5kVdfIy;pzϱU-0ͭsq īD=bmN5jll~#)WȖ;dN1WretٲYɳE-/dupb0N LE_Sz&4 >+[۶mkii G_o m>G6Ǫo>msA=r偄v͵mgٵYɳEF g# x1-Fg#R6x6"l![n̳xUlq7"U E@-&z([4dl![L"[Nu zhP-l![ExѢEA@.l![ JT}}Ν;W^={ & 4YYY͔ d> <[l![4*Q֏~29sfe<[٢#[L {KTYYYddiCe<[l1q7"E>==ܴt钓y榦&E̖8E0n7͝',,c%EW6RNLL֭[LLLVVVaaÇ)A寲S=[1444ۿ[Ϟ=2dV,+뮻v\Joh {OOkUWWC [Bx r-K. 7n\MMp^:~#~_y_kll0aʼn֟VUU5hРEIt [ [@̖xW 0jԨ )o֝wŸ{|k׊巖,YnBq) 7o3g{-&۹}z E-d -d :x>dȐw}WLH/𱱱&?P_Eh<<(gQp̙bi_͢b"d`<[g -@]v%$$ׯ,&""hK.}GO=?%KL0w"^_%+yd˿8qb^VXqU.>1|knu}Wo+WK.^͛:ujuƠ<[lw#:+\|ٺ>}{n Q1blϟկ~%:>;cioWyy976l8w#LѧO;999>~=??ܹmNAQx?,EtΝlг8c_|W^nuYsTTH fn`W`fllE+Vohhy]z577{}n-^لy![EV|`>dȐ7gϞ3XbG1b… G+WZy!^ȖoB-@-dZk׮F!ߺu}Kx^BB-[; a3qDB@*22rܹaaaF˗{_o߾'OT^$''G/ 񊍍%Pg -d HSTT3upn:9 m69gΜ3grt,FülBzA-bbZK,1דO>;ن7|s>`zzg}fsssjj|1#^)))DLEUCӠ Bw|n!>>瞳t [vٳg-6/66c᧘?8Hīb`-mT dl![@8 MS#YVV@S]7o3gܞ={l1[@ϖǡRSSgΜyYz.ED-d d iM0!==/,OV'A-WA-@-d!ӚWPXyU[[ҙydg -d+eIL++++vkU|6"ytk+@0ɓ'ˋ GFFVWWVyyBN233SRR0-FvE-brӧO>|x}}=2a0a„,E~w#z<1-zq,hhh߿aa!B-g \O𴝢 "MzMl![@гAV\SWWCEaa![llu0<,, OuZȳವ}-[`ԔzjvE-\[.䲥Aovfw2yB4=yO?"f -dEp݈ls#[@- f B-d @٢G-@  [lq -gKǹ]d ٢EG-p*Xll![yFd tvsd-d ݈ -Gw:YNXTZΜbcrKӛl![=[~;Z&&rp[݅ZTtg:ԣ|u3lEϖӲvh61=k0SA9^{9uO^fLQ3e1Cfo[RMkhY֬M$ vvoɖks,ֺuh9Tߘ򓛕fϱ^L_G2oXD-d -:bl˖4 q[C3mR/i0LKmie%iAZbYW~ڱSk}{kfzY/e/j|jde%sd}ٲf,7PsYlpi![nSB|lllI۩=7Ya,1^-赥ZV̛GuL}47SDQCwzqWeks92Ng.[v!e0'9v7ClqDB$[F,Л%o׋.TMrdKw Q"OxWuHw qFlcV[kv/sz&_wd=ȖAC-@-?a6z8njtlGl4̟%Pwlm1geΞtPdow_wp=ȖA-bodK>YÐ&Vfc65|5fKCwkjm rowٲc#ڗ-kvJfe;>HK9yJd-d 7F0bC85~͵*G>%EQ?3_'n T^icԻzNPw#Zm1Pvol+@-1QCq9u(-d zE:{~ZPJk5#FG2άّI4ځCl![YGϖ T Tֈ?{x6"l![lmը^TeZV[SrG-<[l![<@[ ϖsm%El![ T:d B-l܍Łb- 8߸6B-d z~dl=[l![VplMV/@:yBh-gKǹ]d p*Xll![ȳEp7" [FFp݈lsl1[@ [EeyB-p qDB V<[l![gϒg-nDP߿\ˡCc-(o֭[hWzĥKvIi= bĚ5k43>o6%%E3̷3rui![lxlFk+@<[lb-8B-d @-lFk+@<[99 x/bbb-eeegΜ'\lڵko@s 1{xhfO 7nv#g>|6mڲe˜2-d [ll [l [ [@ߗ2IENDB`glare-0.5.0/doc/source/images_src/000077500000000000000000000000001317401036700167715ustar00rootroot00000000000000glare-0.5.0/doc/source/images_src/glare-architecture.graphml000066400000000000000000001565341317401036700241350ustar00rootroot00000000000000 Glare Service Folder 6 Middlewares Folder 1 Auth Middleware Fault Middleware API Folder 2 Router Deserializer Controller Serializer Engine Utils Folder 3 PolicyEnforcer Notifier Access Control Locking Store Folder 4 Store Manager glance_store Objects Folder 5 Fields Validators Base Artifact Images Heat Templates DB api oslo.vo Base Cloud storages Folder 7 Swift Ceph Database glare-0.5.0/doc/source/index.rst000066400000000000000000000016061317401036700165210ustar00rootroot00000000000000Welcome to Glare's documentation! ================================= Glare is the OpenStack artifact service. This project aims to provide a mechanism to define tasks and workflows without writing code, manage and execute them in the cloud environment. Overview -------- .. toctree:: :maxdepth: 1 overview quickstart architecture Roadmap main_features User guide ---------- **Installation** .. toctree:: :maxdepth: 1 guides/installation_guide guides/configuration_guide guides/dashboard_guide guides/upgrade_guide guides/glareclient_guide guides/hooks_guide **API** .. toctree:: :maxdepth: 2 developer/webapi/index Developer guide --------------- .. toctree:: :maxdepth: 2 developer/index Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` glare-0.5.0/doc/source/main_features.rst000066400000000000000000000000421317401036700202250ustar00rootroot00000000000000Glare Features ============== TBDglare-0.5.0/doc/source/overview.rst000066400000000000000000000001771317401036700172620ustar00rootroot00000000000000Glare Overview ============== What is Glare? -------------- TBD Main use cases -------------- TBD Rationale --------- TBDglare-0.5.0/doc/source/quickstart.rst000066400000000000000000000000351317401036700175770ustar00rootroot00000000000000Quick Start =========== TBD glare-0.5.0/etc/000077500000000000000000000000001317401036700133635ustar00rootroot00000000000000glare-0.5.0/etc/glare-paste.ini000066400000000000000000000036271317401036700163000ustar00rootroot00000000000000# Use this pipeline for trusted auth - DEFAULT # Auth token has format user:tenant:roles [pipeline:glare-api] pipeline = cors faultwrapper healthcheck http_proxy_to_wsgi versionnegotiation osprofiler trustedauth glarev1api # Use this pipeline for keystone auth [pipeline:glare-api-keystone] pipeline = cors faultwrapper healthcheck http_proxy_to_wsgi versionnegotiation osprofiler authtoken context glarev1api # Use this pipeline for Keycloak auth [pipeline:glare-api-keycloak] pipeline = cors faultwrapper healthcheck http_proxy_to_wsgi versionnegotiation osprofiler keycloak context glarev1api # Use this pipeline when you want to specify context params manually [pipeline:glare-api-noauth] pipeline = cors faultwrapper healthcheck http_proxy_to_wsgi versionnegotiation osprofiler context glarev1api [app:glarev1api] paste.app_factory = glare.api.v1.router:API.factory [filter:healthcheck] paste.filter_factory = oslo_middleware:Healthcheck.factory backends = disable_by_file disable_by_file_path = /etc/glare/healthcheck_disable [filter:versionnegotiation] paste.filter_factory = glare.api.middleware.version_negotiation:GlareVersionNegotiationFilter.factory [filter:faultwrapper] paste.filter_factory = glare.api.middleware.fault:GlareFaultWrapperFilter.factory [filter:context] paste.filter_factory = glare.api.middleware.context:ContextMiddleware.factory [filter:trustedauth] paste.filter_factory = glare.api.middleware.context:TrustedAuthMiddleware.factory [filter:authtoken] paste.filter_factory = keystonemiddleware.auth_token:filter_factory delay_auth_decision = true [filter:keycloak] paste.filter_factory = glare.api.middleware.keycloak_auth:KeycloakAuthMiddleware.factory [filter:osprofiler] paste.filter_factory = osprofiler.web:WsgiMiddleware.factory [filter:cors] use = egg:oslo.middleware#cors oslo_config_project = glare [filter:http_proxy_to_wsgi] paste.filter_factory = oslo_middleware:HTTPProxyToWSGI.factoryglare-0.5.0/etc/glare-swift.conf.sample000066400000000000000000000011301317401036700177310ustar00rootroot00000000000000# glare-swift.conf.sample # # This file is an example config file when # multiple swift accounts/backing stores are enabled. # # Specify the reference name in [] # For each section, specify the auth_address, user and key. # # WARNING: # * If any of auth_address, user or key is not specified, # the glare's swift store will fail to configure [ref1] user = tenant:user1 key = key1 auth_version = 2 auth_address = http://localhost:5000/v2.0 [ref2] user = project_name:user_name2 key = key2 user_domain_id = default project_domain_id = default auth_version = 3 auth_address = http://localhost:5000/v3 glare-0.5.0/etc/oslo-config-generator/000077500000000000000000000000001317401036700175665ustar00rootroot00000000000000glare-0.5.0/etc/oslo-config-generator/glare.conf000066400000000000000000000005541317401036700215330ustar00rootroot00000000000000[DEFAULT] output_file = etc/glare.conf.sample namespace = glare namespace = glance.store namespace = keystonemiddleware.auth_token namespace = oslo.concurrency namespace = oslo.db namespace = oslo.db.concurrency namespace = oslo.log namespace = oslo.messaging namespace = oslo.middleware.cors namespace = oslo.middleware.http_proxy_to_wsgi namespace = oslo.policy glare-0.5.0/glare/000077500000000000000000000000001317401036700137025ustar00rootroot00000000000000glare-0.5.0/glare/__init__.py000066400000000000000000000015471317401036700160220ustar00rootroot00000000000000# Copyright 2017 - Nokia Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os os.environ['EVENTLET_NO_GREENDNS'] = 'yes' # Import oslo_service first, so that it makes eventlet hub use a monotonic # clock to avoid issues with drifts of system time (see LP 1510234 for details) import oslo_service # noqa import eventlet # noqa glare-0.5.0/glare/api/000077500000000000000000000000001317401036700144535ustar00rootroot00000000000000glare-0.5.0/glare/api/__init__.py000066400000000000000000000000001317401036700165520ustar00rootroot00000000000000glare-0.5.0/glare/api/middleware/000077500000000000000000000000001317401036700165705ustar00rootroot00000000000000glare-0.5.0/glare/api/middleware/__init__.py000066400000000000000000000000001317401036700206670ustar00rootroot00000000000000glare-0.5.0/glare/api/middleware/context.py000066400000000000000000000134111317401036700206260ustar00rootroot00000000000000# Copyright 2011-2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_context import context from oslo_log import log as logging from oslo_middleware import base as base_middleware from oslo_middleware import request_id from oslo_serialization import jsonutils from glare.common import exception from glare.common import policy from glare.i18n import _ context_opts = [ cfg.BoolOpt('allow_anonymous_access', default=False, help=_('Allow unauthenticated users to access the API with ' 'read-only privileges. This only applies when using ' 'ContextMiddleware.')) ] CONF = cfg.CONF CONF.register_opts(context_opts) LOG = logging.getLogger(__name__) class RequestContext(context.RequestContext): """Stores information about the security context for Glare. Stores how the user accesses the system, as well as additional request information. """ def __init__(self, service_catalog=None, **kwargs): super(RequestContext, self).__init__(**kwargs) self.service_catalog = service_catalog # check if user is admin using policy file if kwargs.get('is_admin') is None: self.is_admin = policy.check_is_admin(self) def to_dict(self): d = super(RequestContext, self).to_dict() d.update({ 'service_catalog': self.service_catalog, }) return d def to_policy_values(self): values = super(RequestContext, self).to_policy_values() values['is_admin'] = self.is_admin values['read_only'] = self.read_only return values class BaseContextMiddleware(base_middleware.ConfigurableMiddleware): @staticmethod def process_response(resp, request=None): try: request_id = resp.request.context.request_id # For python 3 compatibility need to use bytes type prefix = b'req-' if isinstance(request_id, bytes) else 'req-' if not request_id.startswith(prefix): request_id = prefix + request_id resp.headers['x-openstack-request-id'] = request_id except AttributeError: pass return resp class ContextMiddleware(BaseContextMiddleware): @staticmethod def process_request(req): """Convert authentication information into a request context. Generate a RequestContext object from the available authentication headers and store on the 'context' attribute of the req object. :param req: wsgi request object that will be given the context object :raises: webob.exc.HTTPUnauthorized: when value of the X-Identity-Status header is not 'Confirmed' and anonymous access is disallowed """ if req.headers.get('X-Identity-Status') == 'Confirmed': req.context = ContextMiddleware._get_authenticated_context(req) elif CONF.allow_anonymous_access: req.context = RequestContext(read_only=True, is_admin=False) else: raise exception.Unauthorized() @staticmethod def _get_authenticated_context(req): headers = req.headers service_catalog = None if headers.get('X-Service-Catalog') is not None: catalog_header = headers.get('X-Service-Catalog') try: service_catalog = jsonutils.loads(catalog_header) except ValueError: raise exception.GlareException( _('Invalid service catalog json.')) kwargs = { 'service_catalog': service_catalog, 'request_id': req.environ.get(request_id.ENV_REQUEST_ID), } return RequestContext.from_environ(req.environ, **kwargs) class TrustedAuthMiddleware(BaseContextMiddleware): @staticmethod def process_request(req): auth_token = req.headers.get('X-Auth-Token') if not auth_token: msg = _("Auth token must be provided") raise exception.Unauthorized(msg) try: user, tenant, roles = auth_token.strip().split(':', 3) except ValueError: msg = _("Wrong auth token format. It must be 'user:tenant:roles'") raise exception.Unauthorized(msg) if not tenant: msg = _("Tenant must be specified in auth token. " "Format of the token is 'user:tenant:roles'") raise exception.Unauthorized(msg) elif tenant.lower() == 'none': tenant = None req.headers['X-Identity-Status'] = 'Nope' else: req.headers['X-Identity-Status'] = 'Confirmed' req.headers['X-User-Id'] = user req.headers['X-Tenant-Id'] = tenant req.headers['X-Roles'] = roles if req.headers.get('X-Identity-Status') == 'Confirmed': kwargs = {'request_id': req.environ.get(request_id.ENV_REQUEST_ID)} req.context = RequestContext.from_environ(req.environ, **kwargs) elif CONF.allow_anonymous_access: req.context = RequestContext(read_only=True, is_admin=False) else: raise exception.Unauthorized() glare-0.5.0/glare/api/middleware/fault.py000066400000000000000000000105101317401036700202520ustar00rootroot00000000000000# Copyright 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """A middleware that turns exceptions into parsable string. Inspired by Cinder's and Heat't faultwrapper. """ import sys import traceback from oslo_config import cfg from oslo_log import log as logging from oslo_middleware import base as base_middleware from oslo_utils import reflection import six import webob.dec import webob.exc from glare.common import exception from glare.common import wsgi LOG = logging.getLogger(__name__) class Fault(object): def __init__(self, error): self.error = error @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): serializer = wsgi.JSONResponseSerializer() resp = webob.Response(request=req) default_webob_exc = webob.exc.HTTPInternalServerError() resp.status_code = self.error.get('code', default_webob_exc.code) serializer.default(resp, self.error) return resp class GlareFaultWrapperFilter(base_middleware.ConfigurableMiddleware): """Replace error body with something the client can parse.""" error_map = { 'BadRequest': webob.exc.HTTPBadRequest, 'Unauthorized': webob.exc.HTTPUnauthorized, 'Forbidden': webob.exc.HTTPForbidden, 'NotFound': webob.exc.HTTPNotFound, 'RequestTimeout': webob.exc.HTTPRequestTimeout, 'Conflict': webob.exc.HTTPConflict, 'Gone': webob.exc.HTTPGone, 'PreconditionFailed': webob.exc.HTTPPreconditionFailed, 'RequestEntityTooLarge': webob.exc.HTTPRequestEntityTooLarge, 'UnsupportedMediaType': webob.exc.HTTPUnsupportedMediaType, 'RequestRangeNotSatisfiable': webob.exc.HTTPRequestRangeNotSatisfiable, 'Locked': webob.exc.HTTPLocked, 'FailedDependency': webob.exc.HTTPFailedDependency, 'NotAcceptable': webob.exc.HTTPNotAcceptable, 'Exception': webob.exc.HTTPInternalServerError, } def _map_exception_to_error(self, class_exception): if class_exception.__name__ not in self.error_map: return self._map_exception_to_error(class_exception.__base__) return self.error_map[class_exception.__name__] def _error(self, ex): traceback_marker = 'Traceback (most recent call last)' webob_exc = None ex_type = reflection.get_class_name(ex, fully_qualified=False) full_message = six.text_type(ex) if traceback_marker in full_message: message, msg_trace = full_message.split(traceback_marker, 1) message = message.rstrip('\n') msg_trace = traceback_marker + msg_trace else: msg_trace = 'None\n' if sys.exc_info() != (None, None, None): msg_trace = traceback.format_exc() message = full_message if isinstance(ex, exception.GlareException): message = six.text_type(ex) if not webob_exc: webob_exc = self._map_exception_to_error(ex.__class__) error = { 'code': webob_exc.code, 'title': webob_exc.title, 'explanation': webob_exc.explanation, 'error': { 'message': message, 'type': ex_type, } } if cfg.CONF.debug: error['error']['traceback'] = msg_trace # add microversion header is this is not acceptable request if isinstance(ex, exception.InvalidGlobalAPIVersion): error['min_version'] = ex.kwargs['min_ver'] error['max_version'] = ex.kwargs['max_ver'] return error @webob.dec.wsgify def __call__(self, req): try: return req.get_response(self.application) except Exception as exc: LOG.exception(exc) return req.get_response(Fault(self._error(exc))) glare-0.5.0/glare/api/middleware/keycloak_auth.py000066400000000000000000000134101317401036700217640ustar00rootroot00000000000000# Copyright 2017 - Nokia Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import jwt import memcache from oslo_config import cfg from oslo_log import log as logging from oslo_middleware import base as base_middleware import pprint import requests from six.moves import urllib import webob.dec from glare.common import exception from glare.common import utils from glare.i18n import _ LOG = logging.getLogger(__name__) keycloak_oidc_opts = [ cfg.StrOpt( 'auth_url', default='http://127.0.0.1:8080/auth', help='Keycloak base url (e.g. https://my.keycloak:8443/auth)' ), cfg.StrOpt( 'user_info_endpoint_url', default='/realms/%s/protocol/openid-connect/userinfo', help='Endpoint against which authorization will be performed' ), cfg.StrOpt( 'certfile', help='Required if identity server requires client certificate' ), cfg.StrOpt( 'keyfile', help='Required if identity server requires client certificate' ), cfg.StrOpt( 'cafile', help='A PEM encoded Certificate Authority to use when verifying ' 'HTTPs connections. Defaults to system CAs.' ), cfg.BoolOpt( 'insecure', default=False, help='If True, SSL/TLS certificate verification is disabled' ), cfg.StrOpt( 'memcached_server', default=None, help='Url of memcached server to use for caching' ), cfg.IntOpt( 'token_cache_time', default=60, min=0, help='In order to prevent excessive effort spent validating ' 'tokens, the middleware caches previously-seen tokens ' 'for a configurable duration (in seconds).' ), ] CONF = cfg.CONF CONF.register_opts(keycloak_oidc_opts, group="keycloak_oidc") class KeycloakAuthMiddleware(base_middleware.Middleware): def __init__(self, app): super(KeycloakAuthMiddleware, self).__init__(application=app) mcserv_url = CONF.keycloak_oidc.memcached_server self.mcclient = memcache.Client(mcserv_url) if mcserv_url else None self.certfile = CONF.keycloak_oidc.certfile self.keyfile = CONF.keycloak_oidc.keyfile self.cafile = CONF.keycloak_oidc.cafile or utils.get_system_ca_file() self.insecure = CONF.keycloak_oidc.insecure self.url_template = CONF.keycloak_oidc.auth_url + \ CONF.keycloak_oidc.user_info_endpoint_url def authenticate(self, access_token, realm_name): info = None if self.mcclient: info = self.mcclient.get(access_token) if info is None and CONF.keycloak_oidc.user_info_endpoint_url: url = self.url_template % realm_name verify = None if urllib.parse.urlparse(url).scheme == "https": verify = False if self.insecure else self.cafile cert = (self.certfile, self.keyfile) \ if self.certfile and self.keyfile else None try: resp = requests.get( url, headers={"Authorization": "Bearer %s" % access_token}, verify=verify, cert=cert ) except requests.ConnectionError: msg = _("Can't connect to keycloak server with address '%s'." ) % CONF.keycloak_oidc.auth_url LOG.error(msg) raise exception.GlareException(message=msg) if resp.status_code == 401: raise exception.Unauthorized(message=resp.text) if resp.status_code == 403: raise exception.Forbidden(message=resp.text) elif resp.status_code >= 400: raise exception.GlareException(message=resp.text) if self.mcclient: self.mcclient.set(access_token, resp.json(), time=CONF.keycloak_oidc.token_cache_time) info = resp.json() LOG.debug("HTTP response from OIDC provider: %s", pprint.pformat(info)) return info @webob.dec.wsgify def __call__(self, request): if 'X-Auth-Token' not in request.headers: msg = _("Auth token must be provided in 'X-Auth-Token' header.") LOG.error(msg) raise exception.Unauthorized() access_token = request.headers.get('X-Auth-Token') try: decoded = jwt.decode(access_token, algorithms=['RS256'], verify=False) except Exception: msg = _("Token can't be decoded because of wrong format.") LOG.error(msg) raise exception.Unauthorized() # Get user realm from parsed token # Format is "iss": "http://:/auth/realms/", __, __, realm_name = decoded['iss'].strip().rpartition('/realms/') # Get roles from from parsed token roles = ','.join(decoded['realm_access']['roles']) \ if 'realm_access' in decoded else '' self.authenticate(access_token, realm_name) request.headers["X-Identity-Status"] = "Confirmed" request.headers["X-Project-Id"] = realm_name request.headers["X-Roles"] = roles return request.get_response(self.application) glare-0.5.0/glare/api/middleware/version_negotiation.py000066400000000000000000000114651317401036700232360ustar00rootroot00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ A filter middleware that inspects the requested URI for a version string and/or Accept headers and attempts to negotiate an API controller to return. """ import microversion_parse from oslo_log import log as logging from oslo_middleware import base as base_middleware from glare.api.v1 import api_version_request as api_version from glare.api import versions as artifacts_versions from glare.common import exception LOG = logging.getLogger(__name__) class GlareVersionNegotiationFilter(base_middleware.ConfigurableMiddleware): """Middleware that defines API version in request and redirects it to correct Router. """ SERVICE_TYPE = 'artifact' MIME_TYPE = 'application/vnd.openstack.artifacts-' @staticmethod def get_version_from_accept(accept_header): """Try to parse accept header to extract api version. :param accept_header: accept header :return: version string in the request or None if not specified """ accept = str(accept_header) if accept.startswith(GlareVersionNegotiationFilter.MIME_TYPE): LOG.debug("Using media-type versioning") return accept[len(GlareVersionNegotiationFilter.MIME_TYPE):] return None @staticmethod def process_request(req): """Process api request: 1. Define if this is request for available versions or not 2. If it is not version request check extract version 3. Validate available version and add version info to request """ args = {'method': req.method, 'path': req.path, 'accept': req.accept} LOG.debug("Determining version of request: %(method)s %(path)s " "Accept: %(accept)s", args) # determine if this is request for versions if req.path_info in ('/versions', '/'): return artifacts_versions.Controller.index(req) # determine api version from request req_version = GlareVersionNegotiationFilter.get_version_from_accept( req.accept) if req_version is None: # determine api version from microversion header LOG.debug("Determine version from microversion header.") req_version = microversion_parse.get_version( req.headers, service_type=GlareVersionNegotiationFilter.SERVICE_TYPE) # validate microversions header req.api_version_request = \ GlareVersionNegotiationFilter._get_api_version_request( req_version) req_version = req.api_version_request.get_string() LOG.debug("Matched version: %s", req_version) LOG.debug('new path %s', req.path_info) @staticmethod def _get_api_version_request(req_version): """Set API version for request based on the version header string.""" if req_version is None: LOG.debug("No API version in request header. Use default version.") cur_ver = api_version.APIVersionRequest.default_version() elif req_version == 'latest': # 'latest' is a special keyword which is equivalent to # requesting the maximum version of the API supported cur_ver = api_version.APIVersionRequest.max_version() else: cur_ver = api_version.APIVersionRequest(req_version) # Check that the version requested is within the global # minimum/maximum of supported API versions if not cur_ver.matches(cur_ver.min_version(), cur_ver.max_version()): raise exception.InvalidGlobalAPIVersion( req_ver=cur_ver.get_string(), min_ver=cur_ver.min_version().get_string(), max_ver=cur_ver.max_version().get_string()) return cur_ver @staticmethod def process_response(response, request=None): if hasattr(response, 'headers'): if hasattr(request, 'api_version_request'): api_header_name = microversion_parse.STANDARD_HEADER response.headers[api_header_name] = ( GlareVersionNegotiationFilter.SERVICE_TYPE + ' ' + request.api_version_request.get_string()) response.headers.add('Vary', api_header_name) return response glare-0.5.0/glare/api/v1/000077500000000000000000000000001317401036700150015ustar00rootroot00000000000000glare-0.5.0/glare/api/v1/__init__.py000066400000000000000000000000001317401036700171000ustar00rootroot00000000000000glare-0.5.0/glare/api/v1/api_version_request.py000066400000000000000000000105421317401036700214430ustar00rootroot00000000000000# Copyright 2016 Openstack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from glare.common import exception from glare.i18n import _ REST_API_VERSION_HISTORY = """REST API Version History: * 1.1 Added dynamic quotas API request. Added a possibility to delete blobs with external locations. Added a possibility to define system locations to blobs. * 1.0 - First stable API version that supports microversion. If API version is not specified in the request then API v1.0 is used as default API version. """ class APIVersionRequest(object): """This class represents an API Version Request with convenience methods for manipulation and comparison of version numbers that we need to do to implement microversions. """ _MIN_API_VERSION = "1.0" _MAX_API_VERSION = "1.1" _DEFAULT_API_VERSION = "1.0" def __init__(self, version_string): """Create an API version request object. :param version_string: String representation of APIVersionRequest. Correct format is 'X.Y', where 'X' and 'Y' are int values. """ match = re.match(r"^([1-9]\d*)\.([1-9]\d*|0)$", version_string) if match: self.ver_major = int(match.group(1)) self.ver_minor = int(match.group(2)) else: msg = _("API version string %s is not valid. " "Cannot determine API version.") % version_string raise exception.BadRequest(msg) def __str__(self): """Debug/Logging representation of object.""" return ("API Version Request Major: %s, Minor: %s" % (self.ver_major, self.ver_minor)) def _format_type_error(self, other): return TypeError(_("'%(other)s' should be an instance of '%(cls)s'") % {"other": other, "cls": self.__class__}) def __lt__(self, other): if not isinstance(other, APIVersionRequest): raise self._format_type_error(other) return ((self.ver_major, self.ver_minor) < (other.ver_major, other.ver_minor)) def __eq__(self, other): if not isinstance(other, APIVersionRequest): raise self._format_type_error(other) return ((self.ver_major, self.ver_minor) == (other.ver_major, other.ver_minor)) def __gt__(self, other): if not isinstance(other, APIVersionRequest): raise self._format_type_error(other) return ((self.ver_major, self.ver_minor) > (other.ver_major, other.ver_minor)) def __le__(self, other): return self < other or self == other def __ne__(self, other): return not self.__eq__(other) def __ge__(self, other): return self > other or self == other def matches(self, min_version, max_version): """Returns whether the version object represents a version greater than or equal to the minimum version and less than or equal to the maximum version. :param min_version: Minimum acceptable version. :param max_version: Maximum acceptable version. :returns: boolean """ return min_version <= self <= max_version def get_string(self): """Converts object to string representation which is used to create an APIVersionRequest object results in the same version request. """ return "%s.%s" % (self.ver_major, self.ver_minor) @classmethod def min_version(cls): """Minimal allowed api version.""" return APIVersionRequest(cls._MIN_API_VERSION) @classmethod def max_version(cls): """Maximal allowed api version.""" return APIVersionRequest(cls._MAX_API_VERSION) @classmethod def default_version(cls): """Default api version if no version in request.""" return APIVersionRequest(cls._DEFAULT_API_VERSION) glare-0.5.0/glare/api/v1/api_versioning.py000066400000000000000000000145361317401036700204000ustar00rootroot00000000000000# Copyright (c) 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from glare.api.v1 import api_version_request as api_version from glare.common import exception as exc from glare.i18n import _ class VersionedMethod(object): def __init__(self, name, start_version, end_version, func): """Versioning information for a single method. :param name: Name of the method :param start_version: Minimum acceptable version :param end_version: Maximum acceptable_version :param func: Method to call """ # NOTE(kairat): minimums and maximums are inclusive self.name = name self.start_version = start_version self.end_version = end_version self.func = func def __str__(self): return ("Version Method %s: min: %s, max: %s" % (self.name, self.start_version, self.end_version)) class VersionedResource(object): """Versioned mixin that provides ability to define versioned methods and return appropriate methods based on user request. """ # prefix for all versioned methods in class VER_METHODS_ATTR_PREFIX = 'versioned_methods_' @staticmethod def check_for_versions_intersection(func_list): """Determines whether function list contains version intervals intersections or not. General algorithm: https://en.wikipedia.org/wiki/Intersection_algorithm :param func_list: list of VersionedMethod objects :return: boolean """ pairs = [] counter = 0 for f in func_list: pairs.append((f.start_version, 1, f)) pairs.append((f.end_version, -1, f)) def compare(x): return x[0] pairs.sort(key=compare) for p in pairs: counter += p[1] if counter > 1: return True return False @classmethod def supported_versions(cls, min_ver, max_ver=None): """Decorator for versioning api methods. Add the decorator to any method which takes a request object as the first parameter and belongs to a class which inherits from wsgi.Controller. The implementation inspired by Nova. :param min_ver: string representing minimum version :param max_ver: optional string representing maximum version """ def decorator(f): obj_min_ver = api_version.APIVersionRequest(min_ver) if max_ver: obj_max_ver = api_version.APIVersionRequest(max_ver) else: obj_max_ver = api_version.APIVersionRequest.max_version() # Add to list of versioned methods registered func_name = f.__name__ new_func = VersionedMethod(func_name, obj_min_ver, obj_max_ver, f) versioned_attr = cls.VER_METHODS_ATTR_PREFIX + cls.__name__ func_dict = getattr(cls, versioned_attr, {}) if not func_dict: setattr(cls, versioned_attr, func_dict) func_list = func_dict.get(func_name, []) if not func_list: func_dict[func_name] = func_list func_list.append(new_func) # Ensure the list is sorted by minimum version (reversed) # so later when we work through the list in order we find # the method which has the latest version which supports # the version requested. is_intersect = cls.check_for_versions_intersection( func_list) if is_intersect: raise exc.ApiVersionsIntersect( name=new_func.name, min_ver=new_func.start_version, max_ver=new_func.end_version, ) func_list.sort(key=lambda vf: vf.start_version, reverse=True) return f return decorator def __getattribute__(self, key): def version_select(*args, **kwargs): """Look for the method which matches the name supplied and version constraints and calls it with the supplied arguments. :returns: Returns the result of the method called :raises: VersionNotFoundForAPIMethod if there is no method which matches the name and version constraints """ # versioning is used in 3 classes: request deserializer and # controller have request as first argument # response serializer has response as first argument # we must respect all three cases if hasattr(args[0], 'api_version_request'): ver = args[0].api_version_request elif hasattr(args[0], 'request'): ver = args[0].request.api_version_request else: raise exc.VersionNotFoundForAPIMethod( message=_("Api version not found in the request.")) func_list = self.versioned_methods[key] for func in func_list: if ver.matches(func.start_version, func.end_version): # Update the version_select wrapper function so # other decorator attributes like wsgi.response # are still respected. functools.update_wrapper(version_select, func.func) return func.func(self, *args, **kwargs) # No version match raise exc.VersionNotFoundForAPIMethod(version=ver) class_obj = object.__getattribute__(self, '__class__') prefix = object.__getattribute__(self, 'VER_METHODS_ATTR_PREFIX') attr_name = prefix + object.__getattribute__(class_obj, '__name__') try: if key in object.__getattribute__(self, attr_name): return version_select except AttributeError: # No versioning on this class pass return object.__getattribute__(self, key) glare-0.5.0/glare/api/v1/resource.py000066400000000000000000000566561317401036700172240ustar00rootroot00000000000000# Copyright (c) 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """WSGI Resource definition for Glare. Defines Glare API and serialization/ deserialization of incoming requests.""" import json import jsonpatch import jsonschema from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils import six from six.moves import http_client import six.moves.urllib.parse as urlparse from glare.api.v1 import api_versioning from glare.common import exception as exc from glare.common import wsgi from glare import engine from glare.i18n import _ LOG = logging.getLogger(__name__) CONF = cfg.CONF list_configs = [ cfg.IntOpt('default_api_limit', default=25, help=_('Default value for the number of items returned by a ' 'request if not specified explicitly in the request')), cfg.IntOpt('max_api_limit', default=1000, help=_('Maximum permissible number of items that could be ' 'returned by a request')), ] CONF.register_opts(list_configs) supported_versions = api_versioning.VersionedResource.supported_versions QUOTA_SCHEMA = { 'type': 'object', 'properties': { 'quota_name': { u'maxLength': 255, u'minLength': 1, u'pattern': u'^[^:]*:?[^:]*$', # can have only 1 or 0 ':' u'type': u'string'}, 'quota_value': {'type': 'integer', u'minimum': -1}, }, 'required': ['quota_name', 'quota_value'] } QUOTA_INPUT_SCHEMA = { "$schema": "http://json-schema.org/draft-04/schema#", "items": { "properties": { "project_id": { u'maxLength': 255, u'minLength': 1, "type": "string" }, "project_quotas": { "items": QUOTA_SCHEMA, "type": "array" } }, "type": "object", "required": ["project_id", "project_quotas"] }, "type": "array" } class RequestDeserializer(api_versioning.VersionedResource, wsgi.JSONRequestDeserializer): """Glare deserializer for incoming webob requests. Deserializer checks and converts incoming request into a bunch of Glare primitives. So other service components don't work with requests at all. Deserializer also performs primary API validation without any knowledge about concrete artifact type structure. """ ALLOWED_LOCATION_TYPES = ('external', 'internal') @staticmethod def _get_content_type(req, expected=None): """Determine content type of the request body.""" if "Content-Type" not in req.headers: msg = _("Content-Type must be specified.") LOG.error(msg) raise exc.BadRequest(msg) content_type = req.content_type if expected is not None and content_type not in expected: msg = (_('Invalid content type: %(ct)s. Expected: %(exp)s') % {'ct': content_type, 'exp': ', '.join(expected)}) raise exc.UnsupportedMediaType(message=msg) return content_type @staticmethod def _get_content_length(req): """Determine content length of the request body.""" if req.content_length is None: return try: content_length = int(req.content_length) if content_length < 0: raise ValueError except ValueError: msg = _("Content-Length must be a non negative integer.") LOG.error(msg) raise exc.BadRequest(msg) return content_length def _get_request_body(self, req): """Get request json body and convert it to python structures.""" return self.from_json(req.body) @supported_versions(min_ver='1.0') def create(self, req): self._get_content_type(req, expected=['application/json']) body = self._get_request_body(req) if not isinstance(body, dict): msg = _("Dictionary expected as body value. Got %s.") % type(body) raise exc.BadRequest(msg) return {'values': body} @supported_versions(min_ver='1.0') def list(self, req): params = req.params.copy() marker = params.pop('marker', None) query_params = {} # step 1 - apply marker to query if exists if marker is not None: query_params['marker'] = marker # step 2 - apply limit (if exists OR setup default limit) limit = params.pop('limit', CONF.default_api_limit) try: limit = int(limit) except ValueError: msg = _("Limit param must be an integer.") raise exc.BadRequest(message=msg) if limit < 0: msg = _("Limit param must be positive.") raise exc.BadRequest(message=msg) query_params['limit'] = min(CONF.max_api_limit, limit) # step 3 - parse sort parameters if 'sort' in params: sort = [] for sort_param in params.pop('sort').strip().split(','): key, _sep, direction = sort_param.partition(':') if direction and direction not in ('asc', 'desc'): raise exc.BadRequest('Sort direction must be one of ' '["asc", "desc"]. Got %s direction' % direction) sort.append((key, direction or 'desc')) query_params['sort'] = sort # step 4 - parse filter parameters filters = [] for fname, fval in params.items(): if fname == 'version' and fval == 'latest': query_params['latest'] = True else: filters.append((fname, fval)) query_params['filters'] = filters return query_params @supported_versions(min_ver='1.0') def update(self, req): self._get_content_type( req, expected=['application/json-patch+json']) body = self._get_request_body(req) patch = jsonpatch.JsonPatch(body) try: # Initially patch object doesn't validate input. It's only checked # when we call get operation on each method tuple(map(patch._get_operation, patch.patch)) except (jsonpatch.InvalidJsonPatch, TypeError, AttributeError, jsonpatch.JsonPointerException): msg = _("Json Patch body is malformed") raise exc.BadRequest(msg) return {'patch': patch} @supported_versions(min_ver='1.0') def upload_blob(self, req): content_type = self._get_content_type(req) content_length = self._get_content_length(req) if content_type == ('application/vnd+openstack.glare-custom-location' '+json'): data = self._get_request_body(req) if 'url' not in data: msg = _("url is required when specifying external location. " "Cannot find 'url' in request body: %s") % str(data) raise exc.BadRequest(msg) location_type = data.get('location_type', 'external') if location_type not in self.ALLOWED_LOCATION_TYPES: msg = (_("Incorrect location type '%(location_type)s'. It " "must be one of the following %(allowed)s") % {'location_type': location_type, 'allowed': ', '.join(self.ALLOWED_LOCATION_TYPES)}) raise exc.BadRequest(msg) if location_type == 'external': url = data.get('url') if not url.startswith('http'): msg = _("Url '%s' doesn't have http(s) scheme") % url raise exc.BadRequest(msg) if 'md5' not in data: msg = _("Incorrect blob metadata. MD5 must be specified " "for external location in artifact blob.") raise exc.BadRequest(msg) else: data = req.body_file if self.is_valid_encoding(req) and self.is_valid_method(req): req.is_body_readable = True return {'data': data, 'content_type': content_type, 'content_length': content_length} @supported_versions(min_ver='1.1') def set_quotas(self, req): self._get_content_type(req, expected=['application/json']) body = self._get_request_body(req) try: jsonschema.validate(body, QUOTA_INPUT_SCHEMA) except jsonschema.exceptions.ValidationError as e: raise exc.BadRequest(e) values = {} for item in body: project_id = item['project_id'] values[project_id] = {} for quota in item['project_quotas']: values[project_id][quota['quota_name']] = quota['quota_value'] return {'values': values} # TODO(mfedosin) add pagination to list of quotas def log_request_progress(f): def log_decorator(self, req, *args, **kwargs): LOG.debug("Request %(request_id)s for %(api_method)s successfully " "deserialized. Pass request parameters to Engine", {'request_id': req.context.request_id, 'api_method': f.__name__}) result = f(self, req, *args, **kwargs) LOG.info( "Request %(request_id)s for artifact %(api_method)s " "successfully executed.", {'request_id': req.context.request_id, 'api_method': f.__name__}) return result return log_decorator class ArtifactsController(api_versioning.VersionedResource): """API controller for Glare Artifacts. Artifact Controller prepares incoming data for Glare Engine and redirects data to the appropriate engine method. Once the response data is returned from the engine Controller passes it next to Response Serializer. """ def __init__(self): self.engine = engine.Engine() @supported_versions(min_ver='1.0') @log_request_progress def list_type_schemas(self, req): """List of detailed descriptions of enabled artifact types. :param req: user request :return: list of json-schemas of all enabled artifact types. """ return self.engine.show_type_schemas(req.context) @supported_versions(min_ver='1.0') @log_request_progress def show_type_schema(self, req, type_name): """Get detailed artifact type description. :param req: user request :param type_name: artifact type name :return: json-schema representation of artifact type """ type_schema = self.engine.show_type_schemas(req.context, type_name) return {type_name: type_schema} @supported_versions(min_ver='1.0') @log_request_progress def create(self, req, type_name, values): """Create artifact record in Glare. :param req: user request :param type_name: artifact type name :param values: dict with artifact fields :return: definition of created artifact """ if req.context.tenant is None or req.context.read_only: msg = _("It's forbidden to anonymous users to create artifacts.") raise exc.Forbidden(msg) if not values.get('name'): msg = _("Name must be specified at creation.") raise exc.BadRequest(msg) for field in ('visibility', 'status'): if field in values: msg = _("%s is not allowed in a request at creation.") % field raise exc.BadRequest(msg) return self.engine.create(req.context, type_name, values) @supported_versions(min_ver='1.0') @log_request_progress def update(self, req, type_name, artifact_id, patch): """Update artifact record in Glare. :param req: User request :param type_name: Artifact type name :param artifact_id: id of artifact to update :param patch: json patch with artifact changes :return: definition of updated artifact """ return self.engine.save(req.context, type_name, artifact_id, patch) @supported_versions(min_ver='1.0') @log_request_progress def delete(self, req, type_name, artifact_id): """Delete artifact from Glare. :param req: User request :param type_name: Artifact type name :param artifact_id: id of artifact to delete """ return self.engine.delete(req.context, type_name, artifact_id) @supported_versions(min_ver='1.0') @log_request_progress def show(self, req, type_name, artifact_id): """Show detailed artifact info. :param req: User request :param type_name: Artifact type name :param artifact_id: id of artifact to show :return: definition of requested artifact """ return self.engine.show(req.context, type_name, artifact_id) @supported_versions(min_ver='1.0') @log_request_progress def list(self, req, type_name, filters=None, marker=None, limit=None, sort=None, latest=False): """List available artifacts. :param req: User request :param type_name: Artifact type name :param filters: filters that need to be applied to artifact :param marker: the artifact that considered as begin of the list so all artifacts before marker (including marker itself) will not be added to artifact list :param limit: maximum number of items in list :param sort: sorting options :param latest: flag that indicates, that only artifacts with highest versions should be returned in output :return: list of requested artifact definitions """ artifacts = self.engine.list(req.context, type_name, filters, marker, limit, sort, latest) result = {'artifacts': artifacts, 'type_name': type_name} if len(artifacts) != 0 and len(artifacts) == limit: result['next_marker'] = artifacts[-1]['id'] return result @staticmethod def _parse_blob_path(blob_path): field_name, _sep, blob_key = blob_path.partition('/') if not blob_key: blob_key = None return field_name, blob_key @supported_versions(min_ver='1.0') @log_request_progress def upload_blob(self, req, type_name, artifact_id, blob_path, data, content_type, content_length=None): """Upload blob into Glare repo. :param req: User request :param type_name: Artifact type name :param artifact_id: id of artifact where to perform upload :param blob_path: path to artifact blob :param data: blob payload :param content_type: data content-type :param content_length: amount of data user wants to upload :return: definition of requested artifact with uploaded blob """ field_name, blob_key = self._parse_blob_path(blob_path) if content_type == ('application/vnd+openstack.glare-custom-location' '+json'): url = data.pop('url') return self.engine.add_blob_location( req.context, type_name, artifact_id, field_name, url, data, blob_key) else: return self.engine.upload_blob( req.context, type_name, artifact_id, field_name, data, content_type, content_length, blob_key) @supported_versions(min_ver='1.0') @log_request_progress def download_blob(self, req, type_name, artifact_id, blob_path): """Download blob data from Artifact. :param req: User request :param type_name: artifact type name :param artifact_id: id of artifact from where to perform download :param blob_path: path to artifact blob :return: requested blob data """ field_name, blob_key = self._parse_blob_path(blob_path) data, meta = self.engine.download_blob( req.context, type_name, artifact_id, field_name, blob_key) result = {'data': data, 'meta': meta} return result @supported_versions(min_ver='1.1') @log_request_progress def delete_external_blob(self, req, type_name, artifact_id, blob_path): """Delete blob with external location from Glare repo. :param req: User request :param type_name: Artifact type name :param artifact_id: id of artifact with the blob to delete :param blob_path: path to artifact blob """ field_name, blob_key = self._parse_blob_path(blob_path) return self.engine.delete_external_blob( req.context, type_name, artifact_id, field_name, blob_key) @supported_versions(min_ver='1.1') @log_request_progress def set_quotas(self, req, values): """Set quota records in Glare. :param req: user request :param values: list with quota values to set """ self.engine.set_quotas(req.context, values) @supported_versions(min_ver='1.1') @log_request_progress def list_all_quotas(self, req): """Get detailed info about all available quotas. :param req: user request :return: definition of requested quotas for the project """ return self.engine.list_all_quotas(req.context) @supported_versions(min_ver='1.1') @log_request_progress def list_project_quotas(self, req, project_id=None): """Get detailed info about project quotas. :param req: user request :param project_id: id of the project for which to show quotas :return: definition of requested quotas for the project """ return self.engine.list_project_quotas(req.context, project_id) class ResponseSerializer(api_versioning.VersionedResource, wsgi.JSONResponseSerializer): """Glare serializer for outgoing responses. Converts data received from the engine to WSGI responses. It also specifies proper response status and content type as declared in the API. """ @staticmethod def _prepare_json_response(response, result, content_type='application/json'): body = json.dumps(result, ensure_ascii=False) response.text = six.text_type(body) response.content_type = content_type + '; charset=UTF-8' def list_type_schemas(self, response, type_schemas): self._prepare_json_response(response, {'schemas': type_schemas}, content_type='application/schema+json') def show_type_schema(self, response, type_schema): self._prepare_json_response(response, {'schemas': type_schema}, content_type='application/schema+json') @supported_versions(min_ver='1.0') def list_schemas(self, response, type_list): self._prepare_json_response(response, {'types': type_list}) @supported_versions(min_ver='1.0') def create(self, response, artifact): self._prepare_json_response(response, artifact) response.status_int = http_client.CREATED @supported_versions(min_ver='1.0') def show(self, response, artifact): self._prepare_json_response(response, artifact) @supported_versions(min_ver='1.0') def update(self, response, artifact): self._prepare_json_response(response, artifact) @supported_versions(min_ver='1.0') def list(self, response, af_list): params = dict(response.request.params) params.pop('marker', None) encode_params = {} for key, value in params.items(): encode_params[key] = encodeutils.safe_encode(value) query = urlparse.urlencode(encode_params) type_name = af_list['type_name'] body = { type_name: af_list['artifacts'], 'first': '/artifacts/%s' % type_name, 'schema': '/schemas/%s' % type_name, } if query: body['first'] = '%s?%s' % (body['first'], query) if 'next_marker' in af_list: params['marker'] = af_list['next_marker'] next_query = urlparse.urlencode(params) body['next'] = '/artifacts/%s?%s' % (type_name, next_query) self._prepare_json_response(response, body) @supported_versions(min_ver='1.0') def delete(self, response, result): response.status_int = http_client.NO_CONTENT @supported_versions(min_ver='1.0') def upload_blob(self, response, artifact): self._prepare_json_response(response, artifact) @staticmethod def _serialize_blob(response, result): data, meta = result['data'], result['meta'] response.app_iter = iter(data) response.headers['Content-Type'] = meta['content_type'] response.headers['Content-MD5'] = meta['md5'] response.headers['X-Openstack-Glare-Content-SHA1'] = meta['sha1'] response.headers['X-Openstack-Glare-Content-SHA256'] = meta['sha256'] response.content_length = str(meta['size']) @staticmethod def _serialize_location(response, result): data, meta = result['data'], result['meta'] response.headers['Content-MD5'] = meta['md5'] response.headers['X-Openstack-Glare-Content-SHA1'] = meta['sha1'] response.headers['X-Openstack-Glare-Content-SHA256'] = meta['sha256'] response.location = data['url'] response.content_type = 'application/json' response.status = http_client.MOVED_PERMANENTLY response.content_length = 0 @supported_versions(min_ver='1.0') def download_blob(self, response, result): external = result['meta']['external'] if external: self._serialize_location(response, result) else: self._serialize_blob(response, result) @supported_versions(min_ver='1.1') def delete_external_blob(self, response, result): self._prepare_json_response(response, result) @staticmethod def _serialize_quota(quotas): res = [] for project_id, project_quotas in quotas.items(): quota_list = [] for quota_name, quota_value in project_quotas.items(): quota_list.append({ 'quota_name': quota_name, 'quota_value': quota_value, }) res.append({ 'project_id': project_id, 'project_quotas': quota_list }) return res @supported_versions(min_ver='1.1') def list_all_quotas(self, response, quotas): quotas['quotas'] = self._serialize_quota(quotas['quotas']) self._prepare_json_response(response, quotas) @supported_versions(min_ver='1.1') def list_project_quotas(self, response, quotas): quotas = self._serialize_quota(quotas) self._prepare_json_response(response, quotas) def create_resource(): """Artifact resource factory method.""" deserializer = RequestDeserializer() serializer = ResponseSerializer() controller = ArtifactsController() return wsgi.Resource(controller, deserializer, serializer) glare-0.5.0/glare/api/v1/router.py000066400000000000000000000135211317401036700166750ustar00rootroot00000000000000# Copyright (c) 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glare.api.v1 import resource from glare.common import wsgi class API(wsgi.Router): """WSGI router for Glare v1 API requests. API Router redirects incoming requests to appropriate WSGI resource method. """ def __init__(self, mapper): glare_resource = resource.create_resource() reject_method_resource = wsgi.Resource(wsgi.RejectMethodController()) # ---schemas--- mapper.connect('/schemas', controller=glare_resource, action='list_type_schemas', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/schemas', controller=reject_method_resource, action='reject', allowed_methods='GET') mapper.connect('/schemas/{type_name}', controller=glare_resource, action='show_type_schema', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/schemas/{type_name}', controller=reject_method_resource, action='reject', allowed_methods='GET') # ---artifacts--- mapper.connect('/artifacts/{type_name}', controller=glare_resource, action='list', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/artifacts/{type_name}', controller=glare_resource, action='create', conditions={'method': ['POST']}) mapper.connect('/artifacts/{type_name}', controller=reject_method_resource, action='reject', allowed_methods='GET, POST') mapper.connect('/artifacts/{type_name}/{artifact_id}', controller=glare_resource, action='update', conditions={'method': ['PATCH']}) mapper.connect('/artifacts/{type_name}/{artifact_id}', controller=glare_resource, action='show', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/artifacts/{type_name}/{artifact_id}', controller=glare_resource, action='delete', conditions={'method': ['DELETE']}, body_reject=True) mapper.connect('/artifacts/{type_name}/{artifact_id}', controller=reject_method_resource, action='reject', allowed_methods='GET, PATCH, DELETE') # ---blobs--- mapper.connect('/artifacts/{type_name}/{artifact_id}/{blob_path:.*?}', controller=glare_resource, action='download_blob', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/artifacts/{type_name}/{artifact_id}/{blob_path:.*?}', controller=glare_resource, action='upload_blob', conditions={'method': ['PUT']}) mapper.connect('/artifacts/{type_name}/{artifact_id}/{blob_path:.*?}', controller=glare_resource, action='delete_external_blob', conditions={'method': ['DELETE']}) mapper.connect('/artifacts/{type_name}/{artifact_id}/{blob_path:.*?}', controller=reject_method_resource, action='reject', allowed_methods='GET, PUT, DELETE') # ---quotas--- mapper.connect('/quotas', controller=glare_resource, action='set_quotas', conditions={'method': ['PUT']}) mapper.connect('/quotas', controller=glare_resource, action='list_all_quotas', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/quotas', controller=reject_method_resource, action='reject', allowed_methods='PUT, GET') mapper.connect('/project-quotas', controller=glare_resource, action='list_project_quotas', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/project-quotas', controller=reject_method_resource, action='reject', allowed_methods='GET') mapper.connect('/project-quotas/{project_id}', controller=glare_resource, action='list_project_quotas', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/project-quotas/{project_id}', controller=reject_method_resource, action='reject', allowed_methods='GET') super(API, self).__init__(mapper) glare-0.5.0/glare/api/versions.py000066400000000000000000000051451317401036700167020ustar00rootroot00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_serialization import jsonutils from six.moves import http_client import webob.dec from glare.i18n import _ versions_opts = [ cfg.StrOpt('public_endpoint', help=_(""" Public url endpoint to use for Glare versions response. This is the public url endpoint that will appear in the Glare "versions" response. If no value is specified, the endpoint that is displayed in the version's response is that of the host running the API service. Change the endpoint to represent the proxy URL if the API service is running behind a proxy. If the service is running behind a load balancer, add the load balancer's URL for this value. Services which consume this: * glare Possible values: * None * Proxy URL * Load balancer URL Related options: * None """)), ] CONF = cfg.CONF CONF.register_opts(versions_opts) _LINKS = [{ "rel": "describedby", "type": "text/html", "href": "http://docs.openstack.org/", }] class Controller(object): """A controller that reports which API versions are there.""" @staticmethod def index(req): """Respond to a request for all OpenStack Glare API versions. :param req: user request object :return: list of supported API versions """ version_objs = [ { 'version': '1.0', 'status': 'STABLE', 'links': _LINKS, 'media-type': 'application/vnd.openstack.artifacts-1.0', }, { 'version': '1.1', 'status': 'EXPERIMENTAL', 'links': _LINKS, 'media-type': 'application/vnd.openstack.artifacts-1.1', }] response = webob.Response(request=req, status=http_client.MULTIPLE_CHOICES, content_type='application/json') response.body = jsonutils.dump_as_bytes(dict(versions=version_objs)) return response glare-0.5.0/glare/cmd/000077500000000000000000000000001317401036700144455ustar00rootroot00000000000000glare-0.5.0/glare/cmd/__init__.py000066400000000000000000000036441317401036700165650ustar00rootroot00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys import oslo_utils.strutils as strutils from glare import i18n try: import dns # noqa except ImportError: dnspython_installed = False else: dnspython_installed = True def fix_greendns_ipv6(): if dnspython_installed: # All of this is because if dnspython is present in your environment # then eventlet monkeypatches socket.getaddrinfo() with an # implementation which doesn't work for IPv6. What we're checking here # is that the magic environment variable was set when the import # happened. nogreendns = 'EVENTLET_NO_GREENDNS' flag = os.environ.get(nogreendns, '') if 'eventlet' in sys.modules and not strutils.bool_from_string(flag): msg = i18n._("It appears that the eventlet module has been " "imported prior to setting %s='yes'. It is currently " "necessary to disable eventlet.greendns " "if using ipv6 since eventlet.greendns currently " "breaks with ipv6 addresses. Please ensure that " "eventlet is not imported prior to this being set.") raise ImportError(msg % nogreendns) os.environ[nogreendns] = 'yes' i18n.enable_lazy() fix_greendns_ipv6() glare-0.5.0/glare/cmd/api.py000077500000000000000000000051501317401036700155740ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright (c) 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Glare (Glare Artifact Repository) API service. """ import os import sys import eventlet from oslo_utils import encodeutils eventlet.patcher.monkey_patch(all=False, socket=True, time=True, select=True, thread=True, os=True, MySQLdb=True) # If ../glare/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)) if os.path.exists(os.path.join(possible_topdir, 'glare', '__init__.py')): sys.path.insert(0, possible_topdir) import glance_store from oslo_config import cfg from oslo_log import log as logging from osprofiler import initializer from glare.common import config from glare.common import exception from glare.common import wsgi from glare import notification CONF = cfg.CONF CONF.import_group("profiler", "glare.common.wsgi") logging.register_options(CONF) KNOWN_EXCEPTIONS = (RuntimeError, exception.WorkerCreationFailure, glance_store.exceptions.BadStoreConfiguration) def fail(e): global KNOWN_EXCEPTIONS return_code = KNOWN_EXCEPTIONS.index(type(e)) + 1 sys.stderr.write("ERROR: %s\n" % encodeutils.exception_to_unicode(e)) sys.exit(return_code) def main(): try: config.parse_args() wsgi.set_eventlet_hub() logging.setup(CONF, 'glare') notification.set_defaults() if CONF.profiler.enabled: initializer.init_from_conf( conf=CONF, context={}, project="glare", service="api", host=CONF.bind_host ) server = wsgi.Server(initialize_glance_store=True) server.start(config.load_paste_app('glare-api'), default_port=9494) server.wait() except KNOWN_EXCEPTIONS as e: fail(e) if __name__ == '__main__': main() glare-0.5.0/glare/cmd/db_manage.py000077500000000000000000000046561317401036700167320ustar00rootroot00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_db import options from glare.db.migration import migration CONF = cfg.CONF options.set_defaults(CONF) class DBCommand(object): def upgrade(self, config): migration.upgrade(CONF.command.revision, config=config) def downgrade(self, config): migration.downgrade(CONF.command.revision, config=config) def revision(self, config): migration.revision(CONF.command.message, CONF.command.autogenerate, config=config) def stamp(self, config): migration.stamp(CONF.command.revision, config=config) def version(self, config): print(migration.version()) def add_command_parsers(subparsers): command_object = DBCommand() parser = subparsers.add_parser('upgrade') parser.set_defaults(func=command_object.upgrade) parser.add_argument('--revision', nargs='?') parser = subparsers.add_parser('downgrade') parser.set_defaults(func=command_object.downgrade) parser.add_argument('--revision', nargs='?') parser = subparsers.add_parser('stamp') parser.add_argument('--revision', nargs='?') parser.set_defaults(func=command_object.stamp) parser = subparsers.add_parser('revision') parser.add_argument('-m', '--message') parser.add_argument('--autogenerate', action='store_true') parser.set_defaults(func=command_object.revision) parser = subparsers.add_parser('version') parser.set_defaults(func=command_object.version) command_opt = cfg.SubCommandOpt('command', title='Command', help='Available commands', handler=add_command_parsers) CONF.register_cli_opt(command_opt) def main(): config = migration.get_alembic_config() CONF(project='glare') CONF.command.func(config) if __name__ == '__main__': main() glare-0.5.0/glare/cmd/scrubber.py000066400000000000000000000042451317401036700166330ustar00rootroot00000000000000#!/usr/bin/env python # Copyright 2017 - Nokia Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Glare Scrub Service """ import os import sys # If ../glare/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)) if os.path.exists(os.path.join(possible_topdir, 'glare', '__init__.py')): sys.path.insert(0, possible_topdir) import eventlet import glance_store from oslo_config import cfg from oslo_log import log as logging from glare.common import config from glare import scrubber eventlet.patcher.monkey_patch(all=False, socket=True, time=True, select=True, thread=True, os=True) CONF = cfg.CONF logging.register_options(CONF) CONF.set_default(name='use_stderr', default=True) def main(): CONF.register_cli_opts(scrubber.scrubber_cmd_cli_opts, group='scrubber') CONF.register_opts(scrubber.scrubber_cmd_opts, group='scrubber') try: config.parse_args() logging.setup(CONF, 'glare') glance_store.register_opts(config.CONF) glance_store.create_stores(config.CONF) glance_store.verify_default_store() app = scrubber.Scrubber() if CONF.scrubber.daemon: server = scrubber.Daemon(CONF.scrubber.wakeup_time) server.start(app) server.wait() else: app.run() except RuntimeError as e: sys.exit("ERROR: %s" % e) if __name__ == '__main__': main() glare-0.5.0/glare/common/000077500000000000000000000000001317401036700151725ustar00rootroot00000000000000glare-0.5.0/glare/common/__init__.py000066400000000000000000000000001317401036700172710ustar00rootroot00000000000000glare-0.5.0/glare/common/config.py000066400000000000000000000116611317401036700170160ustar00rootroot00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Routines for configuring Glare. """ import logging.config import logging.handlers import os from oslo_config import cfg from oslo_log import log as logging from oslo_policy import policy from paste import deploy from glare.i18n import _ paste_deploy_opts = [ cfg.StrOpt('flavor', help=_('Partial name of a pipeline in your paste configuration ' 'file with the service name removed. For example, if ' 'your paste section name is ' '[pipeline:glare-keystone] use the value ' '"keystone"')), cfg.StrOpt('config_file', help=_('Name of the paste configuration file.')), ] common_opts = [ cfg.StrOpt('digest_algorithm', default='sha256', help=_(""" Digest algorithm to use for digital signature. Provide a string value representing the digest algorithm to use for generating digital signatures. By default, ``sha256`` is used. To get a list of the available algorithms supported by the version of OpenSSL on your platform, run the command: ``openssl list-message-digest-algorithms``. Examples are 'sha1', 'sha256', and 'sha512'. Possible values: * An OpenSSL message digest algorithm identifier Relation options: * None """)), ] CONF = cfg.CONF CONF.register_opts(paste_deploy_opts, group='paste_deploy') CONF.register_opts(common_opts) policy.Enforcer(CONF) def parse_args(args=None, usage=None, default_config_files=None): CONF(args=args, project='glare', usage=usage, default_config_files=default_config_files) def _get_deployment_flavor(flavor=None): """Retrieve the paste_deploy.flavor config item, formatted appropriately for appending to the application name. :param flavor: if specified, use this setting rather than the paste_deploy.flavor configuration setting """ if not flavor: flavor = CONF.paste_deploy.flavor return '' if not flavor else ('-' + flavor) def _get_paste_config_path(): paste_suffix = '-paste.ini' conf_suffix = '.conf' if CONF.config_file: # Assume paste config is in a paste.ini file corresponding # to the last config file path = CONF.config_file[-1].replace(conf_suffix, paste_suffix) else: path = CONF.prog + paste_suffix return CONF.find_file(os.path.basename(path)) def _get_deployment_config_file(): """Retrieve the deployment_config_file config item, formatted as an absolute pathname. """ path = CONF.paste_deploy.config_file if not path: path = _get_paste_config_path() if not path: msg = _("Unable to locate paste config file for %s.") % CONF.prog raise RuntimeError(msg) return os.path.abspath(path) def load_paste_app(app_name, flavor=None, conf_file=None): """Builds and returns a WSGI app from a paste config file. We assume the last config file specified in the supplied ConfigOpts object is the paste config file, if conf_file is None. :param app_name: name of the application to load :param flavor: name of the variant of the application to load :param conf_file: path to the paste config file :raises: RuntimeError when config file cannot be located or application cannot be loaded from config file """ # append the deployment flavor to the application name, # in order to identify the appropriate paste pipeline app_name += _get_deployment_flavor(flavor) if not conf_file: conf_file = _get_deployment_config_file() logger = logging.getLogger(__name__) try: logger.debug("Loading %(app_name)s from %(conf_file)s", {'conf_file': conf_file, 'app_name': app_name}) app = deploy.loadapp("config:%s" % conf_file, name=app_name) # Log the options used when starting if we're in debug mode... if CONF.debug: CONF.log_opt_values(logger, logging.DEBUG) return app except (LookupError, ImportError) as e: msg = (_("Unable to load %(app_name)s from " "configuration file %(conf_file)s." "\nGot: %(e)r") % {'app_name': app_name, 'conf_file': conf_file, 'e': e}) logger.error(msg) raise RuntimeError(msg) glare-0.5.0/glare/common/exception.py000066400000000000000000000116521317401036700175470ustar00rootroot00000000000000# Copyright 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import six from glare.i18n import _ LOG = logging.getLogger(__name__) class GlareException(Exception): """Base Glare Exception class. To correctly use this class, inherit from it and define a 'message' property. That message will get printf'd with the keyword arguments provided to the constructor. """ message = _("An unknown exception occurred") def __init__(self, message=None, **kwargs): if message: self.message = message self.kwargs = kwargs if self.kwargs: self.message = self.message % kwargs LOG.error(self.message) super(GlareException, self).__init__(self.message) def __unicode__(self): return six.text_type(self.message) class BadRequest(GlareException): message = _("Bad request") class InvalidParameterValue(BadRequest): message = _("Invalid filter value ") class InvalidFilterOperatorValue(BadRequest): msg = _("Unable to filter by unknown operator.") class InvalidVersion(GlareException): message = _("Provided version is invalid") class NotAcceptable(GlareException): message = _("Not acceptable") class InvalidGlobalAPIVersion(NotAcceptable): message = _("Version %(req_ver)s is not supported by the API. Minimum " "is %(min_ver)s and maximum is %(max_ver)s.") class VersionNotFoundForAPIMethod(GlareException): message = _("API version %(version)s is not supported on this method.") class ApiVersionsIntersect(GlareException): message = _("Version of %(name)s %(min_ver)s %(max_ver)s intersects " "with another versions.") class Unauthorized(GlareException): message = _('You are not authenticated') class Forbidden(GlareException): message = _("You are not authorized to complete this action.") class PolicyException(Forbidden): message = _("Policy check for %(policy_name)s " "failed with user credentials.") class NotFound(GlareException): message = _("An object with the specified identifier was not found.") class TypeNotFound(NotFound): message = _("Glare type with name '%(name)s' was not found.") class IncorrectArtifactType(GlareException): message = _("Artifact type is incorrect: %(explanation)s") class ArtifactNotFound(NotFound): message = _("Artifact with type name '%(type_name)s' and id '%(id)s' was " "not found.") class RequestTimeout(GlareException): message = _("The client did not produce a request within the time " "that the server was prepared to wait.") class Conflict(GlareException): message = _("The request could not be completed due to a conflict " "with the current state of the resource.") class Gone(GlareException): message = _("The requested resource is no longer available at the " "server and no forwarding address is known.") class PreconditionFailed(GlareException): message = _("The precondition given in one or more of the request-header " "fields evaluated to false when it was tested on the server.") class RequestEntityTooLarge(GlareException): message = _("The server is refusing to process a request because the " "request entity is larger than the server is willing or " "able to process.") class RequestRangeNotSatisfiable(GlareException): message = _("The request included a Range request-header field, and none " "of the range-specifier values in this field overlap the " "current extent of the selected resource, and the request " "did not include an If-Range request-header field.") class Locked(GlareException): message = _('The resource is locked.') class FailedDependency(GlareException): message = _('The method could not be performed because the requested ' 'action depended on another action and that action failed.') class UnsupportedMediaType(GlareException): message = _("Unsupported media type.") class SIGHUPInterrupt(GlareException): message = _("System SIGHUP signal received.") class WorkerCreationFailure(GlareException): message = _("Server worker creation failed: %(reason)s.") class DBNotAllowed(GlareException): msg_fmt = _('This operation is not allowed with current DB') glare-0.5.0/glare/common/policy.py000066400000000000000000000137261317401036700170540ustar00rootroot00000000000000# Copyright 2011-2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Glare policy operations inspired by Nova implementation.""" from oslo_config import cfg from oslo_log import log as logging from oslo_policy import policy from glare.common import exception CONF = cfg.CONF LOG = logging.getLogger(__name__) _ENFORCER = None artifact_policy_rules = [ policy.RuleDefault('context_is_admin', 'role:admin'), policy.RuleDefault('admin_or_owner', 'is_admin:True or project_id:%(owner)s'), policy.RuleDefault("artifact:type_list", "", "Policy to request list of artifact types"), policy.RuleDefault("artifact:create", "", "Policy to create artifact."), policy.RuleDefault("artifact:update_public", "'public':%(visibility)s and rule:context_is_admin " "or not 'public':%(visibility)s", "Policy to update public artifact"), policy.RuleDefault("artifact:update", "rule:admin_or_owner and " "rule:artifact:update_public", "Policy to update artifact"), policy.RuleDefault("artifact:activate", "rule:admin_or_owner", "Policy to activate artifact"), policy.RuleDefault("artifact:reactivate", "rule:context_is_admin", "Policy to reactivate artifact"), policy.RuleDefault("artifact:deactivate", "rule:context_is_admin", "Policy to update artifact"), policy.RuleDefault("artifact:publish", "rule:context_is_admin", "Policy to publish artifact"), policy.RuleDefault("artifact:get", "", "Policy to get artifact definition"), policy.RuleDefault("artifact:list", "", "Policy to list artifacts"), policy.RuleDefault("artifact:delete_public", "'public':%(visibility)s and rule:context_is_admin " "or not 'public':%(visibility)s", "Policy to delete public artifacts"), policy.RuleDefault("artifact:delete_deactivated", "'deactivated':%(status)s and rule:context_is_admin " "or not 'deactivated':%(status)s", "Policy to delete deactivated artifacts"), policy.RuleDefault("artifact:delete", "rule:admin_or_owner and " "rule:artifact:delete_public and " "rule:artifact:delete_deactivated", "Policy to delete artifacts"), policy.RuleDefault("artifact:set_location", "rule:admin_or_owner", "Policy to set custom location for artifact blob"), policy.RuleDefault("artifact:set_internal_location", "rule:context_is_admin", "Policy to set internal location for artifact blob"), policy.RuleDefault("artifact:upload", "rule:admin_or_owner", "Policy to upload blob for artifact"), policy.RuleDefault("artifact:download_deactivated", "'deactivated':%(status)s and rule:context_is_admin " "or not 'deactivated':%(status)s", "Policy to download blob from deactivated artifact"), policy.RuleDefault("artifact:download", "rule:admin_or_owner and " "rule:artifact:download_deactivated", "Policy to download blob from artifact"), policy.RuleDefault("artifact:delete_blob", "rule:admin_or_owner", "Policy to delete blob with external location " "from artifact"), policy.RuleDefault("artifact:set_quotas", "rule:context_is_admin", "Policy to set quotas for projects"), policy.RuleDefault("artifact:list_all_quotas", "rule:context_is_admin", "Policy to list all quotas for all projects"), policy.RuleDefault("artifact:list_project_quotas", "project_id:%(project_id)s or rule:context_is_admin", "Policy to get info about project quotas"), ] def list_rules(): return artifact_policy_rules def init(use_conf=True): """Init an Enforcer class. """ global _ENFORCER if not _ENFORCER: _ENFORCER = policy.Enforcer(CONF, use_conf=use_conf) _ENFORCER.register_defaults(list_rules()) return _ENFORCER def reset(): global _ENFORCER if _ENFORCER: _ENFORCER.clear() _ENFORCER = None def authorize(policy_name, target, context, do_raise=True): """Method checks that user action can be executed according to policies. :param policy_name: policy name :param target: :param do_raise :param context: :return: True if check passed """ creds = context.to_policy_values() result = init().authorize( policy_name, target, creds, do_raise=do_raise, exc=exception.PolicyException, policy_name=policy_name) LOG.debug("Policy %(policy)s check %(result)s for request %(request_id)s", {'policy': policy_name, 'result': 'passed' if result else 'failed', 'request_id': context.request_id}) return result def check_is_admin(context): """Whether or not roles contains 'admin' role according to policy setting. """ return authorize('context_is_admin', {}, context, do_raise=False) glare-0.5.0/glare/common/semver_db.py000066400000000000000000000137371317401036700175250ustar00rootroot00000000000000# Copyright (c) 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import operator import semantic_version from sqlalchemy.orm.properties import CompositeProperty from sqlalchemy import sql from glare.common import exception from glare.i18n import _ MAX_COMPONENT_LENGTH = pow(2, 16) - 1 MAX_NUMERIC_PRERELEASE_LENGTH = 6 class DBVersion(object): def __init__(self, components_long, prerelease, build): """Creates a DBVersion object out of 3 component fields. This initializer is supposed to be called from SQLAlchemy if 3 database columns are mapped to this composite field. :param components_long: a 64-bit long value, containing numeric components of the version :param prerelease: a prerelease label of the version, optionally preformatted with leading zeroes in numeric-only parts of the label :param build: a build label of the version """ version_string = '%s.%s.%s' % _long_to_components(components_long) if prerelease: version_string += '-' + _strip_leading_zeroes_from_prerelease( prerelease) if build: version_string += '+' + build self.version = semantic_version.Version(version_string) def __repr__(self): return str(self.version) def __eq__(self, other): return (isinstance(other, DBVersion) and other.version == self.version) def __ne__(self, other): return (not isinstance(other, DBVersion) or self.version != other.version) def __composite_values__(self): long_version = _version_to_long(self.version) prerelease = _add_leading_zeroes_to_prerelease(self.version.prerelease) build = '.'.join(self.version.build) if self.version.build else None return long_version, prerelease, build def parse(version_string): version = semantic_version.Version.coerce(version_string) return DBVersion(_version_to_long(version), '.'.join(version.prerelease), '.'.join(version.build)) def _check_limit(value): if value > MAX_COMPONENT_LENGTH: message = _("Version component is too " "large (%d max)") % MAX_COMPONENT_LENGTH raise exception.InvalidVersion(message) def _version_to_long(version): """Converts the numeric part of the semver version into the 64-bit long value using the following logic: * major version is stored in first 16 bits of the value * minor version is stored in next 16 bits * patch version is stored in following 16 bits * next 2 bits are used to store the flag: if the version has pre-release label then these bits are 00, otherwise they are 11. Intermediate values of the flag (01 and 10) are reserved for future usage. * last 14 bits of the value are reserved for future usage The numeric components of version are checked so their value does not exceed 16 bits. :param version: a semantic_version.Version object """ _check_limit(version.major) _check_limit(version.minor) _check_limit(version.patch) major = version.major << 48 minor = version.minor << 32 patch = version.patch << 16 flag = 0 if version.prerelease else 2 flag <<= 14 return major | minor | patch | flag def _long_to_components(value): major = value >> 48 minor = (value - (major << 48)) >> 32 patch = (value - (major << 48) - (minor << 32)) >> 16 return str(major), str(minor), str(patch) def _add_leading_zeroes_to_prerelease(label_tuple): if label_tuple is None: return None res = [] for component in label_tuple: if component.isdigit(): if len(component) > MAX_NUMERIC_PRERELEASE_LENGTH: message = _("Prerelease numeric component is too large " "(%d characters " "max)") % MAX_NUMERIC_PRERELEASE_LENGTH raise exception.InvalidVersion(message) res.append(component.rjust(MAX_NUMERIC_PRERELEASE_LENGTH, '0')) else: res.append(component) return '.'.join(res) def _strip_leading_zeroes_from_prerelease(string_value): res = [] for component in string_value.split('.'): if component.isdigit(): val = component.lstrip('0') if len(val) == 0: # Corner case: when the component is just '0' val = '0' # it will be stripped completely, so restore it res.append(val) else: res.append(component) return '.'.join(res) strict_op_map = { operator.ge: operator.gt, operator.le: operator.lt } class VersionComparator(CompositeProperty.Comparator): def _get_comparison(self, values, op): columns = self.__clause_element__().clauses if op in strict_op_map: stricter_op = strict_op_map[op] else: stricter_op = op return sql.or_(stricter_op(columns[0], values[0]), sql.and_(columns[0] == values[0], op(columns[1], values[1]))) def __gt__(self, other): return self._get_comparison(other.__composite_values__(), operator.gt) def __ge__(self, other): return self._get_comparison(other.__composite_values__(), operator.ge) def __lt__(self, other): return self._get_comparison(other.__composite_values__(), operator.lt) def __le__(self, other): return self._get_comparison(other.__composite_values__(), operator.le) glare-0.5.0/glare/common/store_api.py000066400000000000000000000143551317401036700175410ustar00rootroot00000000000000# Copyright (c) 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glance_store import backend from glance_store import exceptions as store_exc from oslo_config import cfg from oslo_log import log as logging from glare.common import exception from glare.common import utils from glare.store import database CONF = cfg.CONF LOG = logging.getLogger(__name__) database_api = database.DatabaseStoreAPI() # we restrict several schemes because of potential security bugs. # read more about the bugs here: # https://bugs.launchpad.net/glance/+bug/942118 # https://bugs.launchpad.net/glance/+bug/1400966 # https://bugs.launchpad.net/glance/+bug/1334196 RESTRICTED_URI_SCHEMES = ('file', 'filesystem', 'swift+config', 'sql') error_map = [{'catch': store_exc.NotFound, 'raise': exception.NotFound}, {'catch': store_exc.UnknownScheme, 'raise': exception.BadRequest}, {'catch': store_exc.BadStoreUri, 'raise': exception.BadRequest}, {'catch': store_exc.Duplicate, 'raise': exception.Conflict}, {'catch': store_exc.StorageFull, 'raise': exception.Forbidden}, {'catch': store_exc.StorageWriteDenied, 'raise': exception.Forbidden}, {'catch': store_exc.Forbidden, 'raise': exception.Forbidden}, {'catch': store_exc.Invalid, 'raise': exception.BadRequest}, {'catch': store_exc.BadStoreConfiguration, 'raise': exception.GlareException}, {'catch': store_exc.RemoteServiceUnavailable, 'raise': exception.BadRequest}, {'catch': store_exc.HasSnapshot, 'raise': exception.Conflict}, {'catch': store_exc.InUseByStore, 'raise': exception.Conflict}, {'catch': store_exc.BackendException, 'raise': exception.GlareException}, {'catch': store_exc.GlanceStoreException, 'raise': exception.GlareException}] @utils.error_handler(error_map) def save_blob_to_store(blob_id, blob, context, max_size, store_type=None, verifier=None): """Save file to specified store type and return location info to the user. :param store_type: type of the store, None means save to default store. :param blob_id: id of blob :param blob: blob file iterator :param context: user context :param verifier:signature verified :return: tuple of values: (location_uri, size, checksums) """ data = utils.LimitingReader(utils.CooperativeReader(blob), max_size) LOG.debug('Start uploading blob %s.', blob_id) if store_type == 'database': location = database_api.add_to_backend( blob_id, data, context, verifier) else: (location, size, md5checksum, __) = backend.add_to_backend( CONF, blob_id, data, 0, store_type, context, verifier) LOG.debug('Uploading of blob %s is finished.', blob_id) checksums = {"md5": data.md5.hexdigest(), "sha1": data.sha1.hexdigest(), "sha256": data.sha256.hexdigest()} return location, data.bytes_read, checksums @utils.error_handler(error_map) def save_blobs_to_store(blobs, context, max_size, store_type=None, verifier=None): """Save several files to specified store. :param store_type: type of the store, None means save to default store. :param blobs: list of tuples (blob_data_id, data) :param context: user context :param verifier:signature verified :return: dict {blob_data_id: (location_uri, size, checksums)} """ # wrap data in CooperativeReader blobs = [(blob_data_id, utils.LimitingReader(utils.CooperativeReader(data), max_size)) for (blob_data_id, data) in blobs] if store_type == 'database': locations = database_api.add_to_backend_batch(blobs, context, verifier) else: locations = [] for blob_data_id, data in blobs: (location, __, __, __) = backend.add_to_backend( CONF, blob_data_id, data, 0, store_type, context, verifier) locations.append(location) # combine location, size and checksums together res = {} for i in range(len(locations)): data = blobs[i][1] checksums = {"md5": data.md5.hexdigest(), "sha1": data.sha1.hexdigest(), "sha256": data.sha256.hexdigest()} res[blobs[i][0]] = (locations[i], data.bytes_read, checksums) return res @utils.error_handler(error_map) def load_from_store(uri, context): """Load file from store backend. :param uri: blob uri :param context: user context :return: file iterator """ if uri.startswith("sql://"): return utils.BlobIterator( database_api.get_from_store(uri, context)) return backend.get_from_backend(uri=uri, context=context)[0] @utils.error_handler(error_map) def delete_blob(uri, context): """Delete blob from backend store. :param uri: blob uri :param context: user context """ if uri.startswith("sql://"): return database_api.delete_from_store(uri, context) return backend.delete_from_backend(uri, context) def get_known_schemes(): return list(backend.get_known_schemes()) + ['sql'] def read_data(flobj, limit=16777216): """Read data into memory from the file-like object. :param flobj: file-like object that contains data :param limit: max file size that can be read into memory :return: string with data from the object """ bytes_read = 0 data = b'' for chunk in flobj: bytes_read += len(chunk) if bytes_read > limit: raise exception.RequestEntityTooLarge() data += chunk return data glare-0.5.0/glare/common/utils.py000066400000000000000000000562651317401036700167220ustar00rootroot00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2014 SoftLayer Technologies, Inc. # Copyright 2015 Mirantis, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ System-level utilities and helper functions. """ try: from eventlet import sleep except ImportError: from time import sleep from eventlet.green import socket import hashlib import os import re import glance_store from OpenSSL import crypto from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils from oslo_utils import excutils from oslo_utils import timeutils from oslo_utils import uuidutils from oslo_versionedobjects import fields import requests import six from glare.common import exception from glare.i18n import _ from glare.objects.meta import fields as glare_fields CONF = cfg.CONF LOG = logging.getLogger(__name__) GLARE_TEST_SOCKET_FD_STR = 'GLARE_TEST_SOCKET_FD' def cooperative_iter(iter): """Return an iterator which schedules after each iteration. This can prevent eventlet thread starvation. :param iter: an iterator to wrap """ try: for chunk in iter: sleep(0) yield chunk except Exception as err: with excutils.save_and_reraise_exception(): LOG.error("Error: cooperative_iter exception %s", err) def cooperative_read(fd): """Wrap a file descriptor's read with a partial function which schedules after each read. This can prevent eventlet thread starvation. :param fd: a file descriptor to wrap """ def readfn(*args): result = fd.read(*args) sleep(0) return result return readfn MAX_COOP_READER_BUFFER_SIZE = 134217728 # 128M seems like a sane buffer limit class CooperativeReader(object): """An eventlet thread friendly class for reading in blob data. When accessing data either through the iterator or the read method we perform a sleep to allow a co-operative yield. When there is more than one blob being uploaded/downloaded this prevents eventlet thread starvation, ie allows all threads to be scheduled periodically rather than having the same thread be continuously active. """ def __init__(self, fd): """:param fd: Underlying blob file object """ self.fd = fd self.iterator = None # NOTE(markwash): if the underlying supports read(), overwrite the # default iterator-based implementation with cooperative_read which # is more straightforward if hasattr(fd, 'read'): self.read = cooperative_read(fd) else: self.iterator = None self.buffer = b'' self.position = 0 def read(self, length=None): """Return the requested amount of bytes, fetching the next chunk of the underlying iterator when needed. This is replaced with cooperative_read in __init__ if the underlying fd already supports read(). """ if length is None: if len(self.buffer) - self.position > 0: # if no length specified but some data exists in buffer, # return that data and clear the buffer result = self.buffer[self.position:] self.buffer = b'' self.position = 0 return str(result) else: # otherwise read the next chunk from the underlying iterator # and return it as a whole. Reset the buffer, as subsequent # calls may specify the length try: if self.iterator is None: self.iterator = self.__iter__() return next(self.iterator) except StopIteration: return '' finally: self.buffer = b'' self.position = 0 else: result = bytearray() while len(result) < length: if self.position < len(self.buffer): to_read = length - len(result) chunk = self.buffer[self.position:self.position + to_read] result.extend(chunk) # This check is here to prevent potential OOM issues if # this code is called with unreasonably high values of read # size. Currently it is only called from the HTTP clients # of Glare backend stores, which use httplib for data # streaming, which has readsize hardcoded to 8K, so this # check should never fire. Regardless it still worths to # make the check, as the code may be reused somewhere else. if len(result) >= MAX_COOP_READER_BUFFER_SIZE: raise exception.RequestEntityTooLarge() self.position += len(chunk) else: try: if self.iterator is None: self.iterator = self.__iter__() self.buffer = next(self.iterator) self.position = 0 except StopIteration: self.buffer = b'' self.position = 0 return bytes(result) return bytes(result) def __iter__(self): return cooperative_iter(self.fd.__iter__()) class LimitingReader(object): """Reader designed to fail when reading blob data past the configured allowable amount. """ def __init__(self, data, limit): """ :param data: Underlying blob data object :param limit: maximum number of bytes the reader should allow """ self.data = data self.limit = limit self.bytes_read = 0 self.md5 = hashlib.md5() self.sha1 = hashlib.sha1() self.sha256 = hashlib.sha256() def __iter__(self): for chunk in self.data: self.bytes_read += len(chunk) if self.bytes_read > self.limit: raise exception.RequestEntityTooLarge() else: yield chunk def read(self, length=None): res = self.data.read() if length is None else self.data.read(length) len_result = len(res) self.bytes_read += len_result if len_result: self.md5.update(res) self.sha1.update(res) self.sha256.update(res) if self.bytes_read > self.limit: message = _("The server is refusing to process a request because" " the request entity is larger than the server is" " willing or able to process - %s bytes.") % self.limit raise exception.RequestEntityTooLarge(message=message) return res def validate_key_cert(key_file, cert_file): try: error_key_name = "private key" error_filename = key_file with open(key_file, 'r') as keyfile: key_str = keyfile.read() key = crypto.load_privatekey(crypto.FILETYPE_PEM, key_str) error_key_name = "certificate" error_filename = cert_file with open(cert_file, 'r') as certfile: cert_str = certfile.read() cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_str) except IOError as ioe: raise RuntimeError(_("There is a problem with your %(error_key_name)s " "%(error_filename)s. Please verify it." " Error: %(ioe)s") % {'error_key_name': error_key_name, 'error_filename': error_filename, 'ioe': ioe}) except crypto.Error as ce: raise RuntimeError(_("There is a problem with your %(error_key_name)s " "%(error_filename)s. Please verify it. OpenSSL" " error: %(ce)s") % {'error_key_name': error_key_name, 'error_filename': error_filename, 'ce': ce}) try: data = uuidutils.generate_uuid() # On Python 3, explicitly encode to UTF-8 to call crypto.sign() which # requires bytes. Otherwise, it raises a deprecation warning (and # will raise an error later). data = encodeutils.to_utf8(data) digest = CONF.digest_algorithm if digest == 'sha1': LOG.warning( 'The FIPS (FEDERAL INFORMATION PROCESSING STANDARDS)' ' state that the SHA-1 is not suitable for' ' general-purpose digital signature applications (as' ' specified in FIPS 186-3) that require 112 bits of' ' security. The default value is sha1 in Kilo for a' ' smooth upgrade process, and it will be updated' ' with sha256 in next release(L).') out = crypto.sign(key, data, digest) crypto.verify(cert, out, data, digest) except crypto.Error as ce: raise RuntimeError(_("There is a problem with your key pair. " "Please verify that cert %(cert_file)s and " "key %(key_file)s belong together. OpenSSL " "error %(ce)s") % {'cert_file': cert_file, 'key_file': key_file, 'ce': ce}) def get_test_suite_socket(): global GLARE_TEST_SOCKET_FD_STR if GLARE_TEST_SOCKET_FD_STR in os.environ: fd = int(os.environ[GLARE_TEST_SOCKET_FD_STR]) sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM) if six.PY2: sock = socket.SocketType(_sock=sock) sock.listen(CONF.backlog) del os.environ[GLARE_TEST_SOCKET_FD_STR] os.close(fd) return sock return None try: REGEX_4BYTE_UNICODE = re.compile(u'[\U00010000-\U0010ffff]') except re.error: # UCS-2 build case REGEX_4BYTE_UNICODE = re.compile(u'[\uD800-\uDBFF][\uDC00-\uDFFF]') def no_4byte_params(f): """Checks that no 4 byte unicode characters are allowed in dicts' keys/values and string's parameters. """ def wrapper(*args, **kwargs): def _is_match(some_str): return (isinstance(some_str, six.text_type) and REGEX_4BYTE_UNICODE.findall(some_str) != []) def _check_dict(data_dict): # a dict of dicts has to be checked recursively for key, value in data_dict.items(): if isinstance(value, dict): _check_dict(value) else: if _is_match(key): msg = _("Property names can't contain 4 byte unicode.") raise exception.BadRequest(msg) if _is_match(value): msg = (_("%s can't contain 4 byte unicode characters.") % key.title()) raise exception.BadRequest(msg) for data_dict in [arg for arg in args if isinstance(arg, dict)]: _check_dict(data_dict) # now check args for str values for arg in args: if _is_match(arg): msg = _("Param values can't contain 4 byte unicode.") raise exception.BadRequest(msg) # check kwargs as well, as params are passed as kwargs via # registry calls _check_dict(kwargs) return f(*args, **kwargs) return wrapper def stash_conf_values(): """Make a copy of some of the current global CONF's settings. Allows determining if any of these values have changed when the config is reloaded. """ conf = { 'bind_host': CONF.bind_host, 'bind_port': CONF.bind_port, 'tcp_keepidle': CONF.cert_file, 'backlog': CONF.backlog, 'key_file': CONF.key_file, 'cert_file': CONF.cert_file, 'enabled_artifact_types': CONF.enabled_artifact_types, 'custom_artifact_types_modules': CONF.custom_artifact_types_modules } return conf def split_filter_op(expression): """Split operator from threshold in an expression. Designed for use on a comparative-filtering query field. When no operator is found, default to an equality comparison. :param expression: the expression to parse :return: a tuple (operator, threshold) parsed from expression """ left, sep, right = expression.partition(':') if sep: # If the expression is a date of the format ISO 8601 like # CCYY-MM-DDThh:mm:ss+hh:mm and has no operator, it should # not be partitioned, and a default operator of eq should be # assumed. try: timeutils.parse_isotime(expression) op = 'eq' threshold = expression except ValueError: op = left threshold = right else: op = 'eq' # default operator threshold = left # NOTE stevelle decoding escaped values may be needed later return op, threshold def validate_quotes(value): """Validate filter values Validation opening/closing quotes in the expression. """ open_quotes = True for i in range(len(value)): if value[i] == '"': if i and value[i - 1] == '\\': continue if open_quotes: if i and value[i - 1] != ',': msg = _("Invalid filter value %s. There is no comma " "before opening quotation mark.") % value raise exception.InvalidParameterValue(message=msg) else: if i + 1 != len(value) and value[i + 1] != ",": msg = _("Invalid filter value %s. There is no comma " "after closing quotation mark.") % value raise exception.InvalidParameterValue(message=msg) open_quotes = not open_quotes if not open_quotes: msg = _("Invalid filter value %s. The quote is not closed.") % value raise exception.InvalidParameterValue(message=msg) def split_filter_value_for_quotes(value): """Split filter values Split values by commas and quotes for 'in' operator, according api-wg. """ validate_quotes(value) tmp = re.compile(r''' "( # if found a double-quote [^\"\\]* # take characters either non-quotes or backslashes (?:\\. # take backslashes and character after it [^\"\\]*)* # take characters either non-quotes or backslashes ) # before double-quote ",? # a double-quote with comma maybe | ([^,]+),? # if not found double-quote take any non-comma # characters with comma maybe | , # if we have only comma take empty string ''', re.VERBOSE) return [val[0] or val[1] for val in re.findall(tmp, value)] class error_handler(object): def __init__(self, error_map, default_exception=None): """Init method of the class. :param error_map: dict of exception that can be raised in func and exceptions that must be raised for these exceptions. For example, if sqlalchemy NotFound might be raised and we need re-raise it as glare NotFound exception then error_map must contain {"catch": SQLAlchemyNotFound, "raise": exceptions.NotFound} :param default_exception: default exception that must be raised if exception that cannot be found in error map was raised :return: func """ self.error_map = error_map self.default_exception = default_exception def __call__(self, f): """Decorator that catches exception that came from func or method. :param f: target func """ def new_function(*args, **kwargs): try: return f(*args, **kwargs) except Exception as e: for map_record in self.error_map: if isinstance(e, map_record['catch']): raise map_record['raise'](str(e)) else: if self.default_exception: raise self.default_exception(str(e)) else: raise return new_function def get_schema_type(attr): if isinstance(attr, fields.IntegerField) or attr is fields.Integer: return 'integer' elif isinstance(attr, fields.FloatField) or attr is fields.Float: return 'number' elif isinstance(attr, fields.FlexibleBooleanField) \ or attr is fields.FlexibleBoolean: return 'boolean' elif isinstance(attr, glare_fields.List): return 'array' elif isinstance(attr, (glare_fields.Dict, glare_fields.BlobField)): return 'object' return 'string' def get_glare_type(attr): if isinstance(attr, fields.IntegerField): return 'Integer' elif isinstance(attr, fields.FloatField): return 'Float' elif isinstance(attr, fields.FlexibleBooleanField): return 'Boolean' elif isinstance(attr, fields.DateTimeField): return 'DateTime' elif isinstance(attr, glare_fields.BlobField): return 'Blob' elif isinstance(attr, glare_fields.Link): return 'Link' elif isinstance(attr, glare_fields.List): return _get_element_type(attr.element_type) + 'List' elif isinstance(attr, glare_fields.Dict): return _get_element_type(attr.element_type) + 'Dict' return 'String' def _get_element_type(element_type): if element_type is fields.FlexibleBooleanField: return 'Boolean' elif element_type is fields.Integer: return 'Integer' elif element_type is fields.Float: return 'Float' elif element_type is glare_fields.BlobFieldType: return 'Blob' elif element_type is glare_fields.LinkFieldType: return 'Link' return 'String' class BlobIterator(object): """Reads data from a blob, one chunk at a time. """ def __init__(self, data, chunk_size=65536): self.chunk_size = chunk_size self.data = data def __iter__(self): bytes_left = len(self.data) i = 0 while bytes_left > 0: data = self.data[i * self.chunk_size:(i + 1) * self.chunk_size] bytes_left -= len(data) yield data raise StopIteration() def validate_status_transition(af, from_status, to_status): if from_status == 'deleted': msg = _("Cannot change status if artifact is deleted.") raise exception.Forbidden(msg) if to_status == 'active': if from_status == 'drafted': for name, type_obj in af.fields.items(): if type_obj.required_on_activate and getattr(af, name) is None: msg = _("'%s' field value must be set before " "activation.") % name raise exception.Forbidden(msg) elif to_status == 'drafted': if from_status != 'drafted': msg = _("Cannot change status to 'drafted'") % from_status raise exception.Forbidden(msg) elif to_status == 'deactivated': if from_status not in ('active', 'deactivated'): msg = _("Cannot deactivate artifact if it's not active.") raise exception.Forbidden(msg) elif to_status == 'deleted': msg = _("Cannot delete artifact with PATCH requests. Use special " "API to do this.") raise exception.Forbidden(msg) else: msg = _("Unknown artifact status: %s.") % to_status raise exception.BadRequest(msg) def validate_visibility_transition(af, from_visibility, to_visibility): if to_visibility == 'private': if from_visibility != 'private': msg = _("Cannot make artifact private again.") raise exception.Forbidden() elif to_visibility == 'public': if af.status != 'active': msg = _("Cannot change visibility to 'public' if artifact" " is not active.") raise exception.Forbidden(msg) else: msg = _("Unknown artifact visibility: %s.") % to_visibility raise exception.BadRequest(msg) def validate_change_allowed(af, field_name): """Validate if fields can be set for the artifact.""" if field_name not in af.fields: msg = _("Cannot add new field '%s' to artifact.") % field_name raise exception.BadRequest(msg) if af.status not in ('active', 'drafted'): msg = _("Forbidden to change fields " "if artifact is not active or drafted.") raise exception.Forbidden(message=msg) if af.fields[field_name].system is True: msg = _("Forbidden to specify system field %s. It is not " "available for modifying by users.") % field_name raise exception.Forbidden(msg) if af.status == 'active' and not af.fields[field_name].mutable: msg = (_("Forbidden to change field '%s' after activation.") % field_name) raise exception.Forbidden(message=msg) def initialize_glance_store(): """Initialize glance store.""" glance_store.register_opts(CONF) set_glance_store_config_defaults() glance_store.create_stores(CONF) glance_store.verify_default_store() def set_glance_store_config_defaults(): # By default glance and glare share common place to store data. # To prevent possible collisions we have to set other glance_store default # values for various backends. cfg.set_defaults(glance_store.backend._list_opts()[1][1], # default '/var/lib/glance/images filesystem_store_datadir='/var/lib/glare/artifacts', # default 'images' rbd_store_pool='artifacts', # default '/openstack_glance vmware_store_image_dir='/openstack_glare', # default 'glance' swift_store_container='glare') def get_system_ca_file(): """Return path to system default CA file.""" # Standard CA file locations for Debian/Ubuntu, RedHat/Fedora, # Suse, FreeBSD/OpenBSD, MacOSX, and the bundled ca ca_path = ['/etc/ssl/certs/ca-certificates.crt', '/etc/pki/tls/certs/ca-bundle.crt', '/etc/ssl/ca-bundle.pem', '/etc/ssl/cert.pem', '/System/Library/OpenSSL/certs/cacert.pem', requests.certs.where()] for ca in ca_path: LOG.debug("Looking for ca file %s", ca) if os.path.exists(ca): LOG.debug("Using ca file %s", ca) return ca LOG.warning("System ca file could not be found.") glare-0.5.0/glare/common/wsgi.py000066400000000000000000000760131317401036700165240ustar00rootroot00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2010 OpenStack Foundation # Copyright 2014 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utility methods for working with WSGI servers """ from __future__ import print_function import errno import functools import os import signal import sys import time import eventlet from eventlet.green import socket from eventlet.green import ssl import eventlet.greenio import eventlet.wsgi from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import encodeutils from oslo_utils import strutils from osprofiler import opts as profiler_opts import routes import routes.middleware import six import webob.dec import webob.exc from webob import multidict from glare.common import exception as glare_exc from glare.common import utils from glare import i18n from glare.i18n import _ bind_opts = [ cfg.HostAddressOpt('bind_host', default='0.0.0.0', help=_('Address to bind the server. Useful when ' 'selecting a particular network interface.')), cfg.PortOpt('bind_port', help=_('The port on which the server will listen.')), ] socket_opts = [ cfg.IntOpt('backlog', default=4096, help=_('The backlog value that will be used when creating the ' 'TCP listener socket.')), cfg.IntOpt('tcp_keepidle', default=600, help=_('The value for the socket option TCP_KEEPIDLE. This is ' 'the time in seconds that the connection must be idle ' 'before TCP starts sending keepalive probes.')), cfg.StrOpt('ca_file', help=_('CA certificate file to use to verify ' 'connecting clients.')), cfg.StrOpt('cert_file', help=_('Certificate file to use when starting API ' 'server securely.')), cfg.StrOpt('key_file', help=_('Private key file to use when starting API ' 'server securely.')), ] eventlet_opts = [ cfg.IntOpt('workers', default=0, min=0, help=_('The number of child process workers that will be ' 'created to service requests. The default will be ' 'equal to the number of CPUs available.')), cfg.IntOpt('max_header_line', default=16384, min=0, help=_('Maximum line size of message headers to be accepted. ' 'max_header_line may need to be increased when using ' 'large tokens (typically those generated by the ' 'Keystone v3 API with big service catalogs')), cfg.BoolOpt('http_keepalive', default=True, help=_('If False, server will return the header ' '"Connection: close", ' 'If True, server will return "Connection: Keep-Alive" ' 'in its responses. In order to close the client socket ' 'connection explicitly after the response is sent and ' 'read successfully by the client, you simply have to ' 'set this option to False when you create a wsgi ' 'server.')), cfg.IntOpt('client_socket_timeout', default=900, min=0, help=_('Timeout for client connections\' socket operations. ' 'If an incoming connection is idle for this number of ' 'seconds it will be closed. A value of \'0\' means ' 'wait forever.')), ] LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.register_opts(bind_opts) CONF.register_opts(socket_opts) CONF.register_opts(eventlet_opts) profiler_opts.set_defaults(CONF) ASYNC_EVENTLET_THREAD_POOL_LIST = [] def get_num_workers(): """Return the configured number of workers.""" if CONF.workers == 0: # 0 implies the number of CPUs return processutils.get_worker_count() return CONF.workers def get_bind_addr(default_port=None): """Return the host and port to bind to.""" return (CONF.bind_host, CONF.bind_port or default_port) def ssl_wrap_socket(sock): """Wrap an existing socket in SSL :param sock: non-SSL socket to wrap :returns: An SSL wrapped socket """ utils.validate_key_cert(CONF.key_file, CONF.cert_file) ssl_kwargs = { 'server_side': True, 'certfile': CONF.cert_file, 'keyfile': CONF.key_file, 'cert_reqs': ssl.CERT_NONE, } if CONF.ca_file: ssl_kwargs['ca_certs'] = CONF.ca_file ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED return ssl.wrap_socket(sock, **ssl_kwargs) def get_socket(default_port): """Bind socket to bind ip:port in conf :param default_port: port to bind to if none is specified in conf :returns: a socket object as returned from socket.listen or ssl.wrap_socket if conf specifies cert_file """ bind_addr = get_bind_addr(default_port) # TODO(jaypipes): eventlet's greened socket module does not actually # support IPv6 in getaddrinfo(). We need to get around this in the # future or monitor upstream for a fix address_family = [ addr[0] for addr in socket.getaddrinfo(bind_addr[0], bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM) if addr[0] in (socket.AF_INET, socket.AF_INET6) ][0] use_ssl = CONF.key_file or CONF.cert_file if use_ssl and (not CONF.key_file or not CONF.cert_file): raise RuntimeError(_("When running server in SSL mode, you must " "specify both a cert_file and key_file " "option value in your configuration file")) sock = utils.get_test_suite_socket() retry_until = time.time() + 30 while not sock and time.time() < retry_until: try: sock = eventlet.listen(bind_addr, backlog=CONF.backlog, family=address_family) except socket.error as err: if err.args[0] != errno.EADDRINUSE: raise eventlet.sleep(0.1) if not sock: raise RuntimeError(_("Could not bind to %(host)s:%(port)s after" " trying for 30 seconds") % {'host': bind_addr[0], 'port': bind_addr[1]}) return sock def set_eventlet_hub(): try: eventlet.hubs.use_hub('poll') except Exception: try: eventlet.hubs.use_hub('selects') except Exception: msg = _("eventlet 'poll' nor 'selects' hubs are available " "on this platform") raise glare_exc.WorkerCreationFailure( reason=msg) def get_asynchronous_eventlet_pool(size=1000): """Return eventlet pool to caller. Also store pools created in global list, to wait on it after getting signal for graceful shutdown. :param size: eventlet pool size :returns: eventlet pool """ global ASYNC_EVENTLET_THREAD_POOL_LIST pool = eventlet.GreenPool(size=size) # Add pool to global ASYNC_EVENTLET_THREAD_POOL_LIST ASYNC_EVENTLET_THREAD_POOL_LIST.append(pool) return pool class Server(object): """Server class to manage multiple WSGI sockets and applications. This class requires initialize_glance_store set to True if glance store needs to be initialized. """ def __init__(self, threads=1000, initialize_glance_store=False): os.umask(0o27) # ensure files are created with the correct privileges self._logger = logging.getLogger("eventlet.wsgi.server") self.threads = threads self.children = set() self.stale_children = set() self.running = True self.initialize_glance_store = initialize_glance_store self.pgid = os.getpid() try: os.setpgid(self.pgid, self.pgid) except OSError: self.pgid = 0 def hup(self, *args): """Reloads configuration files with zero down time """ signal.signal(signal.SIGHUP, signal.SIG_IGN) raise glare_exc.SIGHUPInterrupt def kill_children(self, *args): """Kills the entire process group.""" signal.signal(signal.SIGTERM, signal.SIG_IGN) signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGCHLD, signal.SIG_IGN) self.running = False os.killpg(self.pgid, signal.SIGTERM) def start(self, application, default_port): """Run a WSGI server with the given application. :param application: The application to be run in the WSGI server :param default_port: Port to bind to if none is specified in conf """ self.application = application self.default_port = default_port self.configure() self.start_wsgi() def start_wsgi(self): workers = get_num_workers() if workers is None: # Useful for profiling, test, debug etc. self.pool = self.create_pool() self.pool.spawn_n(self._single_run, self.application, self.sock) return else: LOG.info("Starting %d workers", workers) signal.signal(signal.SIGTERM, self.kill_children) signal.signal(signal.SIGINT, self.kill_children) signal.signal(signal.SIGHUP, self.hup) while len(self.children) < workers: self.run_child() def create_pool(self): return get_asynchronous_eventlet_pool(size=self.threads) def _remove_children(self, pid): if pid in self.children: self.children.remove(pid) LOG.info('Removed dead child %s', pid) elif pid in self.stale_children: self.stale_children.remove(pid) LOG.info('Removed stale child %s', pid) else: LOG.warning('Unrecognised child %s', pid) def _verify_and_respawn_children(self, pid, status): if len(self.stale_children) == 0: LOG.debug('No stale children') if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0: LOG.error('Not respawning child %d, cannot ' 'recover from termination', pid) if not self.children and not self.stale_children: LOG.info('All workers have terminated. Exiting') self.running = False else: if len(self.children) < get_num_workers(): self.run_child() def wait_on_children(self): while self.running: try: pid, status = os.wait() if os.WIFEXITED(status) or os.WIFSIGNALED(status): self._remove_children(pid) self._verify_and_respawn_children(pid, status) except OSError as err: if err.errno not in (errno.EINTR, errno.ECHILD): raise except KeyboardInterrupt: LOG.info('Caught keyboard interrupt. Exiting.') break except glare_exc.SIGHUPInterrupt: self.reload() continue eventlet.greenio.shutdown_safe(self.sock) self.sock.close() LOG.debug('Exited') def configure(self, old_conf=None, has_changed=None): """Apply configuration settings :param old_conf: Cached old configuration settings (if any) :param has_changed: callable to determine if a parameter has changed """ eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line self.client_socket_timeout = CONF.client_socket_timeout or None # determine if we need to reload artifact type definitions if old_conf is not None and ( has_changed('enabled_artifact_types') or has_changed('custom_artifact_types_modules')): from glare import engine engine.Engine.registry.reset_registry() engine.Engine.registry.register_all_artifacts() self.configure_socket(old_conf, has_changed) if self.initialize_glance_store: utils.initialize_glance_store() def reload(self): """Reload and re-apply configuration settings Existing child processes are sent a SIGHUP signal and will exit after completing existing requests. New child processes, which will have the updated configuration, are spawned. This allows preventing interruption to the service. """ def _has_changed(old, new, param): old = old.get(param) new = getattr(new, param) return new != old old_conf = utils.stash_conf_values() has_changed = functools.partial(_has_changed, old_conf, CONF) CONF.reload_config_files() os.killpg(self.pgid, signal.SIGHUP) self.stale_children = self.children self.children = set() # Ensure any logging config changes are picked up logging.setup(CONF, 'glare') self.configure(old_conf, has_changed) self.start_wsgi() def wait(self): """Wait until all servers have completed running.""" try: if self.children: self.wait_on_children() else: self.pool.waitall() except KeyboardInterrupt: pass def run_child(self): def child_hup(*args): """Shuts down child processes, existing requests are handled.""" signal.signal(signal.SIGHUP, signal.SIG_IGN) eventlet.wsgi.is_accepting = False self.sock.close() pid = os.fork() if pid == 0: signal.signal(signal.SIGHUP, child_hup) signal.signal(signal.SIGTERM, signal.SIG_DFL) # ignore the interrupt signal to avoid a race whereby # a child worker receives the signal before the parent # and is respawned unnecessarily as a result signal.signal(signal.SIGINT, signal.SIG_IGN) # The child has no need to stash the unwrapped # socket, and the reference prevents a clean # exit on sighup self._sock = None self.run_server() LOG.info('Child %d exiting normally', os.getpid()) # self.pool.waitall() is now called in wsgi's server so # it's safe to exit here sys.exit(0) else: LOG.info('Started child %s', pid) self.children.add(pid) def run_server(self): """Run a WSGI server.""" eventlet.wsgi.HttpProtocol.default_request_version = "HTTP/1.0" self.pool = self.create_pool() try: eventlet.wsgi.server(self.sock, self.application, log=self._logger, custom_pool=self.pool, debug=False, keepalive=CONF.http_keepalive, socket_timeout=self.client_socket_timeout) except socket.error as err: if err.args[0] != errno.EINVAL: raise # waiting on async pools if ASYNC_EVENTLET_THREAD_POOL_LIST: for pool in ASYNC_EVENTLET_THREAD_POOL_LIST: pool.waitall() def _single_run(self, application, sock): """Start a WSGI server in a new green thread.""" LOG.info("Starting single process server") eventlet.wsgi.server(sock, application, custom_pool=self.pool, log=self._logger, debug=False, keepalive=CONF.http_keepalive, socket_timeout=self.client_socket_timeout) def configure_socket(self, old_conf=None, has_changed=None): """Ensure a socket exists and is appropriately configured. This function is called on start up, and can also be called in the event of a configuration reload. When called for the first time a new socket is created. If reloading and either bind_host or bind_port have been changed the existing socket must be closed and a new socket opened (laws of physics). In all other cases (bind_host/bind_port have not changed) the existing socket is reused. :param old_conf: Cached old configuration settings (if any) :param has_changed: callable to determine if a parameter has changed """ # Do we need a fresh socket? new_sock = (old_conf is None or ( has_changed('bind_host') or has_changed('bind_port'))) # Will we be using https? use_ssl = not (not CONF.cert_file or not CONF.key_file) # Were we using https before? old_use_ssl = (old_conf is not None and not ( not old_conf.get('key_file') or not old_conf.get('cert_file'))) # Do we now need to perform an SSL wrap on the socket? wrap_sock = use_ssl is True and (old_use_ssl is False or new_sock) # Do we now need to perform an SSL unwrap on the socket? unwrap_sock = use_ssl is False and old_use_ssl is True if new_sock: self._sock = None if old_conf is not None: self.sock.close() _sock = get_socket(self.default_port) _sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # sockets can hang around forever without keepalive _sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) self._sock = _sock if wrap_sock: self.sock = ssl_wrap_socket(self._sock) if unwrap_sock or new_sock and not use_ssl: self.sock = self._sock # Pick up newly deployed certs if old_conf is not None and use_ssl is True and old_use_ssl is True: if has_changed('cert_file') or has_changed('key_file'): utils.validate_key_cert(CONF.key_file, CONF.cert_file) if has_changed('cert_file'): self.sock.certfile = CONF.cert_file if has_changed('key_file'): self.sock.keyfile = CONF.key_file if new_sock or (old_conf is not None and has_changed('tcp_keepidle')): # This option isn't available in the OS X version of eventlet if hasattr(socket, 'TCP_KEEPIDLE'): self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, CONF.tcp_keepidle) if old_conf is not None and has_changed('backlog'): self.sock.listen(CONF.backlog) class APIMapper(routes.Mapper): """Handle route matching when url is '' because routes.Mapper returns an error in this case. """ def routematch(self, url=None, environ=None): if url is "": result = self._match("", environ) return result[0], result[1] return routes.Mapper.routematch(self, url, environ) class RejectMethodController(object): def reject(self, req, allowed_methods, *args, **kwargs): LOG.debug("The method %s is not allowed for this resource", req.environ['REQUEST_METHOD']) raise webob.exc.HTTPMethodNotAllowed( headers=[('Allow', allowed_methods)]) class Router(object): """WSGI middleware that maps incoming requests to WSGI apps. """ def __init__(self, mapper): """Create a router for the given routes.Mapper. Each route in `mapper` must specify a 'controller', which is a WSGI app to call. You'll probably want to specify an 'action' as well and have your controller be a wsgi.Controller, who will route the request to the action method. Examples: mapper = routes.Mapper() sc = ServerController() # Explicit mapping of one route to a controller+action mapper.connect(None, "/svrlist", controller=sc, action="list") # Actions are all implicitly defined mapper.resource("server", "servers", controller=sc) # Pointing to an arbitrary WSGI app. You can specify the # {path_info:.*} parameter so the target app can be handed just that # section of the URL. mapper.connect(None, "/v1.0/{path_info:.*}", controller=BlogApp()) """ mapper.redirect("", "/") self.map = mapper self._router = routes.middleware.RoutesMiddleware(self._dispatch, self.map) @classmethod def factory(cls, global_conf, **local_conf): return cls(APIMapper()) @webob.dec.wsgify def __call__(self, req): """Route the incoming request to a controller based on self.map. If no match, return either a 404(Not Found) or 501(Not Implemented). """ return self._router @staticmethod @webob.dec.wsgify def _dispatch(req): """Called by self._router after matching the incoming request to a route and putting the information into req.environ. Either returns 404, 501, or the routed WSGI app's response. """ match = req.environ['wsgiorg.routing_args'][1] if not match: implemented_http_methods = ['GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'PATCH'] if req.environ['REQUEST_METHOD'] not in implemented_http_methods: return webob.exc.HTTPNotImplemented() else: return webob.exc.HTTPNotFound() app = match['controller'] return app class Request(webob.Request): """Add some OpenStack API-specific logic to the base webob.Request.""" def best_match_content_type(self): """Determine the requested response content-type.""" supported = ('application/json',) bm = self.accept.best_match(supported) return bm or 'application/json' def best_match_language(self): """Determines best available locale from the Accept-Language header. :returns: the best language match or None if the 'Accept-Language' header was not available in the request. """ if not self.accept_language: return None langs = i18n.get_available_languages('glare') return self.accept_language.best_match(langs) def get_content_range(self): """Return the `Range` in a request.""" range_str = self.headers.get('Content-Range') if range_str is not None: range_ = webob.byterange.ContentRange.parse(range_str) if range_ is None: msg = _('Malformed Content-Range header: %s') % range_str raise webob.exc.HTTPBadRequest(explanation=msg) return range_ class JSONRequestDeserializer(object): valid_transfer_encoding = frozenset(['chunked', 'compress', 'deflate', 'gzip', 'identity']) httpverb_may_have_body = frozenset({'POST', 'PUT', 'PATCH'}) @classmethod def is_valid_encoding(cls, request): request_encoding = request.headers.get('transfer-encoding', '').lower() return request_encoding in cls.valid_transfer_encoding @classmethod def is_valid_method(cls, request): return request.method.upper() in cls.httpverb_may_have_body def has_body(self, request): """Returns whether a Webob.Request object will possess an entity body. :param request: Webob.Request object """ if self.is_valid_encoding(request) and self.is_valid_method(request): request.is_body_readable = True return True if request.content_length is not None and request.content_length > 0: return True return False @staticmethod def _sanitizer(obj): """Sanitizer method that will be passed to jsonutils.loads.""" return obj def from_json(self, datastring): try: jsondata = jsonutils.loads(datastring, object_hook=self._sanitizer) if not isinstance(jsondata, (dict, list)): msg = _('Unexpected body type. Expected list/dict.') raise webob.exc.HTTPBadRequest(explanation=msg) return jsondata except ValueError: msg = _('Malformed JSON in request body.') raise webob.exc.HTTPBadRequest(explanation=msg) def default(self, request): if self.has_body(request): return {'body': self.from_json(request.body)} else: return {} class JSONResponseSerializer(object): def _sanitizer(self, obj): """Sanitizer method that will be passed to jsonutils.dumps.""" if hasattr(obj, "to_dict"): return obj.to_dict() if isinstance(obj, multidict.MultiDict): return obj.mixed() return jsonutils.to_primitive(obj) def to_json(self, data): return jsonutils.dump_as_bytes(data, default=self._sanitizer) def default(self, response, result): response.content_type = 'application/json' body = self.to_json(result) body = encodeutils.to_utf8(body) response.body = body def translate_exception(req, e): """Translates all translatable elements of the given exception.""" # The RequestClass attribute in the webob.dec.wsgify decorator # does not guarantee that the request object will be a particular # type; this check is therefore necessary. if not hasattr(req, "best_match_language"): return e locale = req.best_match_language() if isinstance(e, webob.exc.HTTPError): e.explanation = i18n.translate(e.explanation, locale) e.detail = i18n.translate(e.detail, locale) if getattr(e, 'body_template', None): e.body_template = i18n.translate(e.body_template, locale) return e class Resource(object): """WSGI app that handles (de)serialization and controller dispatch. Reads routing information supplied by RoutesMiddleware and calls the requested action method upon its deserializer, controller, and serializer. Those three objects may implement any of the basic controller action methods (create, update, show, index, delete) along with any that may be specified in the api router. A 'default' method may also be implemented to be used in place of any non-implemented actions. Deserializer methods must accept a request argument and return a dictionary. Controller methods must accept a request argument. Additionally, they must also accept keyword arguments that represent the keys returned by the Deserializer. They may raise a webob.exc exception or return a dict, which will be serialized by requested content type. """ def __init__(self, controller, deserializer=None, serializer=None): """ :param controller: object that implement methods created by routes lib :param deserializer: object that supports webob request deserialization through controller-like actions :param serializer: object that supports webob response serialization through controller-like actions """ self.controller = controller self.serializer = serializer or JSONResponseSerializer() self.deserializer = deserializer or JSONRequestDeserializer() @webob.dec.wsgify(RequestClass=Request) def __call__(self, request): """WSGI method that controls (de)serialization and method dispatch.""" action_args = self.get_action_args(request.environ) action = action_args.pop('action', None) body_reject = strutils.bool_from_string( action_args.pop('body_reject', None)) try: if body_reject and self.deserializer.has_body(request): msg = _('A body is not expected with this request.') raise webob.exc.HTTPBadRequest(explanation=msg) deserialized_request = self.dispatch(self.deserializer, action, request) action_args.update(deserialized_request) action_result = self.dispatch(self.controller, action, request, **action_args) except webob.exc.WSGIHTTPException as e: exc_info = sys.exc_info() e = translate_exception(request, e) six.reraise(type(e), e, exc_info[2]) except glare_exc.GlareException: raise except UnicodeDecodeError: msg = _("Error decoding your request. Either the URL or the " "request body contained characters that could not be " "decoded by Glare") raise webob.exc.HTTPBadRequest(explanation=msg) except Exception as e: LOG.exception("Caught error: %s", encodeutils.exception_to_unicode(e)) response = webob.exc.HTTPInternalServerError(explanation=str(e)) return response try: response = webob.Response(request=request) self.dispatch(self.serializer, action, response, action_result) # encode all headers in response to utf-8 to prevent unicode errors for name, value in list(response.headers.items()): if six.PY2 and isinstance(value, six.text_type): response.headers[name] = encodeutils.safe_encode(value) return response except webob.exc.WSGIHTTPException as e: return translate_exception(request, e) except webob.exc.HTTPException as e: return e except glare_exc.GlareException: raise # return unserializable result (typically a webob exc) except Exception: return action_result def dispatch(self, obj, action, *args, **kwargs): """Find action-specific method on self and call it.""" try: method = getattr(obj, action) except AttributeError: method = getattr(obj, 'default') return method(*args, **kwargs) def get_action_args(self, request_environment): """Parse dictionary created by routes library.""" try: args = request_environment['wsgiorg.routing_args'][1].copy() except Exception: return {} args.pop("controller", None) args.pop("format", None) return args glare-0.5.0/glare/db/000077500000000000000000000000001317401036700142675ustar00rootroot00000000000000glare-0.5.0/glare/db/__init__.py000066400000000000000000000000001317401036700163660ustar00rootroot00000000000000glare-0.5.0/glare/db/artifact_api.py000066400000000000000000000140521317401036700172710ustar00rootroot00000000000000# Copyright (c) 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Database API for all artifact types""" from oslo_db import exception as db_exception from oslo_log import log as logging from retrying import retry from glare.db.sqlalchemy import api from glare import locking LOG = logging.getLogger(__name__) def _retry_on_connection_error(exc): """Function to retry a DB API call if connection error was received.""" if isinstance(exc, db_exception.DBConnectionError): LOG.warning("Connection error detected. Retrying...") return True return False class ArtifactAPI(object): def _serialize_values(self, values): new_values = {} if 'tags' in values: new_values['tags'] = values.pop('tags') if values['tags'] else [] for key, value in values.items(): if key in api.BASE_ARTIFACT_PROPERTIES: new_values[key] = value else: new_values.setdefault('properties', {})[key] = value return new_values @retry(retry_on_exception=_retry_on_connection_error, wait_fixed=1000, stop_max_attempt_number=20) def save(self, context, artifact_id, values): """Save artifact values in database :param artifact_id: id of artifact that needs to be updated :param context: user context :param values: values that needs to be updated :return: dict of updated artifact values """ session = api.get_session() return api.create_or_update( context, artifact_id, self._serialize_values(values), session) @retry(retry_on_exception=_retry_on_connection_error, wait_fixed=1000, stop_max_attempt_number=20) def update_blob(self, context, artifact_id, values): """Create and update blob records in db :param artifact_id: id of artifact that needs to be updated :param context: user context :param values: blob values that needs to be updated :return: dict of updated artifact values """ session = api.get_session() return api.create_or_update( context, artifact_id, {'blobs': values}, session) @retry(retry_on_exception=_retry_on_connection_error, wait_fixed=1000, stop_max_attempt_number=20) def delete(self, context, artifact_id): """Delete artifacts from db :param context: user context :param artifact_id: id of artifact that needs to be deleted """ session = api.get_session() api.delete(context, artifact_id, session) @retry(retry_on_exception=_retry_on_connection_error, wait_fixed=1000, stop_max_attempt_number=20) def get(self, context, artifact_id): """Return artifact values from database :param context: user context :param artifact_id: id of the artifact :return: dict of artifact values """ session = api.get_session() return api.get(context, artifact_id, session) @retry(retry_on_exception=_retry_on_connection_error, wait_fixed=1000, stop_max_attempt_number=20) def list(self, context, filters, marker, limit, sort, latest): """List artifacts from db :param context: user request context :param filters: filter conditions from url :param marker: id of first artifact where we need to start artifact lookup :param limit: max number of items in list :param sort: sort conditions :param latest: flag that indicates, that only artifacts with highest versions should be returned in output :return: list of artifacts. Each artifact is represented as dict of values. """ session = api.get_session() return api.get_all(context=context, session=session, filters=filters, marker=marker, limit=limit, sort=sort, latest=latest) @retry(retry_on_exception=_retry_on_connection_error, wait_fixed=1000, stop_max_attempt_number=20) def count_artifact_number(self, context, type_name=None): """Count the number of artifacts for the tenant. :param context: user context :param type_name: name of specific artifact type to count artifacts. If None count artifacts of all types. :return: number of artifacts for given tenant """ session = api.get_session() return api.count_artifact_number(context, session, type_name) @retry(retry_on_exception=_retry_on_connection_error, wait_fixed=1000, stop_max_attempt_number=20) def calculate_uploaded_data(self, context, type_name=None): """Calculate the amount of uploaded data for tenant. :param context: user context :param type_name: name of specific artifact type to calculate data. If None calculate data of artifacts of all types. :return: amount of uploaded data for given user """ session = api.get_session() return api.calculate_uploaded_data(context, session, type_name) class ArtifactLockApi(locking.LockApiBase): @retry(retry_on_exception=_retry_on_connection_error, wait_fixed=1000, stop_max_attempt_number=20) def create_lock(self, context, lock_key): session = api.get_session() return api.create_lock(context, lock_key, session) @retry(retry_on_exception=_retry_on_connection_error, wait_fixed=1000, stop_max_attempt_number=20) def delete_lock(self, context, lock_id): session = api.get_session() api.delete_lock(context, lock_id, session) glare-0.5.0/glare/db/migration/000077500000000000000000000000001317401036700162605ustar00rootroot00000000000000glare-0.5.0/glare/db/migration/__init__.py000066400000000000000000000000001317401036700203570ustar00rootroot00000000000000glare-0.5.0/glare/db/migration/alembic.ini000066400000000000000000000016761317401036700203670ustar00rootroot00000000000000# A generic, single database configuration. [alembic] # path to migration scripts script_location = glare/db/migration/alembic_migrations # template used to generate migration files # file_template = %%(rev)s_%%(slug)s # max length of characters to apply to the # "slug" field #truncate_slug_length = 40 # set to 'true' to run the environment during # the 'revision' command, regardless of autogenerate # revision_environment = false sqlalchemy.url = # Logging configuration [loggers] keys = root,sqlalchemy,alembic [handlers] keys = console [formatters] keys = generic [logger_root] level = WARN handlers = console qualname = [logger_sqlalchemy] level = WARN handlers = qualname = sqlalchemy.engine [logger_alembic] level = INFO handlers = qualname = alembic [handler_console] class = StreamHandler args = (sys.stderr,) level = NOTSET formatter = generic [formatter_generic] format = %(levelname)-5.5s [%(name)s] %(message)s datefmt = %H:%M:%Sglare-0.5.0/glare/db/migration/alembic_migrations/000077500000000000000000000000001317401036700221105ustar00rootroot00000000000000glare-0.5.0/glare/db/migration/alembic_migrations/README000066400000000000000000000007461317401036700227770ustar00rootroot00000000000000Please see https://alembic.readthedocs.org/en/latest/index.html for general documentation To create alembic migrations use: $ glare-db-manage revision --message --autogenerate Stamp db with most recent migration version, without actually running migrations $ glare-db-manage stamp --revision head Upgrade can be performed by: $ glare-db-manage upgrade $ glare-db-manage upgrade --revision head Downgrading db: $ glare-db-manage downgrade $ glare-db-manage downgrade --revision base glare-0.5.0/glare/db/migration/alembic_migrations/env.py000066400000000000000000000025451317401036700232600ustar00rootroot00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from alembic import context from glare.db.sqlalchemy import api from glare.db.sqlalchemy import models # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config # add your model's MetaData object here # for 'autogenerate' support # from myapp import mymodel target_metadata = models.BASE.metadata def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ engine = api.get_engine() with engine.connect() as connection: context.configure(connection=connection, target_metadata=target_metadata) with context.begin_transaction(): context.run_migrations() run_migrations_online() glare-0.5.0/glare/db/migration/alembic_migrations/script.py.mako000066400000000000000000000017651317401036700247250ustar00rootroot00000000000000# Copyright ${create_date.year} OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """${message} Revision ID: ${up_revision} Revises: ${down_revision} Create Date: ${create_date} """ # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} from alembic import op import sqlalchemy as sa ${imports if imports else ""} def upgrade(): ${upgrades if upgrades else "pass"} def downgrade(): ${downgrades if downgrades else "pass"}glare-0.5.0/glare/db/migration/alembic_migrations/versions/000077500000000000000000000000001317401036700237605ustar00rootroot00000000000000glare-0.5.0/glare/db/migration/alembic_migrations/versions/001_initial_version.py000066400000000000000000000135171317401036700301170ustar00rootroot00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Initial version Revision ID: 001 Revises: None Create Date: 2016-08-18 12:28:37.372366 """ # revision identifiers, used by Alembic. revision = '001' down_revision = None from alembic import op import sqlalchemy as sa MYSQL_ENGINE = 'InnoDB' MYSQL_CHARSET = 'utf8' def upgrade(): op.create_table( 'glare_artifacts', sa.Column('id', sa.String(36), primary_key=True, nullable=False), sa.Column('name', sa.String(255), nullable=False), sa.Column('type_name', sa.String(255), nullable=False), sa.Column('version_prefix', sa.BigInteger(), nullable=False), sa.Column('version_suffix', sa.String(255)), sa.Column('version_meta', sa.String(255)), sa.Column('description', sa.Text()), sa.Column('visibility', sa.String(32), nullable=False), sa.Column('status', sa.String(32), nullable=False), sa.Column('owner', sa.String(255)), sa.Column('created_at', sa.DateTime(), nullable=False), sa.Column('updated_at', sa.DateTime(), nullable=False), sa.Column('activated_at', sa.DateTime()), sa.PrimaryKeyConstraint('id'), mysql_engine=MYSQL_ENGINE, mysql_charset=MYSQL_CHARSET ) op.create_index('ix_glare_artifact_name_and_version', 'glare_artifacts', ['name', 'version_prefix', 'version_suffix'] ) op.create_index('ix_glare_artifact_type', 'glare_artifacts', ['type_name'] ) op.create_index('ix_glare_artifact_status', 'glare_artifacts', ['status'] ) op.create_index('ix_glare_artifact_owner', 'glare_artifacts', ['owner'] ) op.create_index('ix_glare_artifact_visibility', 'glare_artifacts', ['visibility'] ) op.create_table( 'glare_artifact_tags', sa.Column('id', sa.String(36), primary_key=True, nullable=False), sa.Column('artifact_id', sa.String(36), sa.ForeignKey('glare_artifacts.id'), nullable=False), sa.Column('value', sa.String(255), nullable=False), sa.PrimaryKeyConstraint('id'), mysql_engine=MYSQL_ENGINE, mysql_charset=MYSQL_CHARSET ) op.create_index('ix_glare_artifact_tags_artifact_id', 'glare_artifact_tags', ['artifact_id'] ) op.create_index('ix_glare_artifact_tags_artifact_id_tag_value', 'glare_artifact_tags', ['artifact_id', 'value'] ) op.create_table( 'glare_artifact_blobs', sa.Column('id', sa.String(36), primary_key=True, nullable=False), sa.Column('artifact_id', sa.String(36), sa.ForeignKey('glare_artifacts.id'), nullable=False), sa.Column('size', sa.BigInteger()), sa.Column('md5', sa.String(32)), sa.Column('sha1', sa.String(40)), sa.Column('sha256', sa.String(64)), sa.Column('name', sa.String(255), nullable=False), sa.Column('status', sa.String(32), nullable=False), sa.Column('external', sa.Boolean()), sa.Column('url', sa.Text()), sa.Column('key_name', sa.String(2048)), sa.Column('content_type', sa.String(255)), sa.PrimaryKeyConstraint('id'), mysql_engine=MYSQL_ENGINE, mysql_charset=MYSQL_CHARSET ) op.create_index('ix_glare_artifact_blobs_artifact_id', 'glare_artifact_blobs', ['artifact_id'] ) op.create_index('ix_glare_artifact_blobs_name', 'glare_artifact_blobs', ['name'] ) op.create_table( 'glare_artifact_properties', sa.Column('id', sa.String(36), primary_key=True, nullable=False), sa.Column('artifact_id', sa.String(36), sa.ForeignKey('glare_artifacts.id'), nullable=False), sa.Column('name', sa.String(255), nullable=False), sa.Column('string_value', sa.String(20000)), sa.Column('int_value', sa.Integer()), sa.Column('numeric_value', sa.Numeric()), sa.Column('bool_value', sa.Boolean()), sa.Column('position', sa.Integer()), sa.Column('key_name', sa.String(255)), sa.PrimaryKeyConstraint('id'), mysql_engine=MYSQL_ENGINE, mysql_charset=MYSQL_CHARSET ) op.create_index('ix_glare_artifact_properties_artifact_id', 'glare_artifact_properties', ['artifact_id'] ) op.create_index('ix_glare_artifact_properties_name', 'glare_artifact_properties', ['name'] ) op.create_table( 'glare_artifact_locks', sa.Column('id', sa.String(255), primary_key=True, nullable=False), sa.PrimaryKeyConstraint('id'), mysql_engine=MYSQL_ENGINE, mysql_charset=MYSQL_CHARSET ) def downgrade(): op.drop_table('glare_artifact_locks') op.drop_table('glare_artifact_properties') op.drop_table('glare_artifact_blobs') op.drop_table('glare_artifact_tags') op.drop_table('glare_artifacts') # end Alembic commands # glare-0.5.0/glare/db/migration/alembic_migrations/versions/002_add_acquired_at_column.py000066400000000000000000000030051317401036700313570ustar00rootroot00000000000000# Copyright 2016 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Add acquired_at column Revision ID: 002 Revises: 001 Create Date: 2016-10-05 16:03:43.207147 """ # revision identifiers, used by Alembic. revision = '002' down_revision = '001' from alembic import op import sqlalchemy as sa MYSQL_ENGINE = 'InnoDB' MYSQL_CHARSET = 'utf8' def upgrade(): op.drop_table('glare_artifact_locks') op.create_table( 'glare_artifact_locks', sa.Column('id', sa.String(255), primary_key=True, nullable=False), sa.Column('acquired_at', sa.DateTime(), nullable=False), sa.PrimaryKeyConstraint('id'), mysql_engine=MYSQL_ENGINE, mysql_charset=MYSQL_CHARSET ) def downgrade(): op.drop_table('glare_artifact_locks') op.create_table( 'glare_artifact_locks', sa.Column('id', sa.String(255), primary_key=True, nullable=False), sa.PrimaryKeyConstraint('id'), mysql_engine=MYSQL_ENGINE, mysql_charset=MYSQL_CHARSET ) glare-0.5.0/glare/db/migration/alembic_migrations/versions/003_add_database_blob_storage.py000066400000000000000000000027751317401036700320250ustar00rootroot00000000000000# Copyright 2017 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Add acquired_at column Revision ID: 003 Revises: 002 Create Date: 2017-01-10 12:53:25.108149 """ # revision identifiers, used by Alembic. revision = '003' down_revision = '002' from alembic import op import sqlalchemy as sa from sqlalchemy.dialects import mysql MYSQL_ENGINE = 'InnoDB' MYSQL_CHARSET = 'utf8' def upgrade(): op.create_table( 'glare_blob_data', sa.Column('id', sa.String(255), primary_key=True, nullable=False), # Because of strange behavior of mysql LargeBinary is converted to # BLOB instead of LONGBLOB. So we have to fix it explicitly with # 'with_variant' call. sa.Column( 'data', sa.LargeBinary().with_variant(mysql.LONGBLOB(), 'mysql'), nullable=False), sa.PrimaryKeyConstraint('id'), mysql_engine=MYSQL_ENGINE, mysql_charset=MYSQL_CHARSET ) def downgrade(): op.drop_table('glare_blob_data') glare-0.5.0/glare/db/migration/alembic_migrations/versions/004_add_quota_tables.py000066400000000000000000000024411317401036700302110ustar00rootroot00000000000000# Copyright 2017 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Add quota tables Revision ID: 004 Revises: 003 Create Date: 2017-07-29 14:32:33.717353 """ # revision identifiers, used by Alembic. revision = '004' down_revision = '003' from alembic import op import sqlalchemy as sa MYSQL_ENGINE = 'InnoDB' MYSQL_CHARSET = 'utf8' def upgrade(): op.create_table( 'glare_quotas', sa.Column('project_id', sa.String(255), primary_key=True), sa.Column('quota_name', sa.String(32), primary_key=True), sa.Column('quota_value', sa.BigInteger(), nullable=False), sa.PrimaryKeyConstraint('project_id', 'quota_name'), mysql_engine=MYSQL_ENGINE, mysql_charset=MYSQL_CHARSET ) def downgrade(): op.drop_table('glare_quotas') glare-0.5.0/glare/db/migration/migration.py000066400000000000000000000052401317401036700206240ustar00rootroot00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import alembic from alembic import config as alembic_config from alembic import migration as alembic_migration from glare.db.sqlalchemy import api as db_api def get_alembic_config(): path = os.path.join(os.path.dirname(__file__), 'alembic.ini') config = alembic_config.Config(path) config.set_main_option('script_location', 'glare.db.migration:alembic_migrations') return config def version(engine=None): """Returns current database version.""" engine = engine or db_api.get_engine() with engine.connect() as conn: context = alembic_migration.MigrationContext.configure(conn) return context.get_current_revision() def upgrade(revision, config=None): """Used for upgrading database. :param revision: Desired database version :type revision: string """ revision = revision or 'head' config = config or get_alembic_config() alembic.command.upgrade(config, revision or 'head') def downgrade(revision, config=None): """Used for downgrading database. :param revision: Desired database version7 :type revision: string """ revision = revision or 'base' config = config or get_alembic_config() return alembic.command.downgrade(config, revision) def stamp(revision, config=None): """Stamps database with provided revision. Don't run any migrations. :param revision: Should match one from repository or head - to stamp database with most recent revision :type revision: string """ config = config or get_alembic_config() return alembic.command.stamp(config, revision=revision) def revision(message=None, autogenerate=False, config=None): """Creates template for migration. :param message: Text that will be used for migration title :type message: string :param autogenerate: If True - generates diff based on current database state :type autogenerate: bool """ config = config or get_alembic_config() return alembic.command.revision(config, message=message, autogenerate=autogenerate) glare-0.5.0/glare/db/sqlalchemy/000077500000000000000000000000001317401036700164315ustar00rootroot00000000000000glare-0.5.0/glare/db/sqlalchemy/__init__.py000066400000000000000000000000001317401036700205300ustar00rootroot00000000000000glare-0.5.0/glare/db/sqlalchemy/api.py000066400000000000000000000646161317401036700175710ustar00rootroot00000000000000# Copyright (c) 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib import operator import threading from oslo_config import cfg from oslo_db import exception as db_exception from oslo_db import options from oslo_db.sqlalchemy import session from oslo_log import log as os_logging from oslo_utils import timeutils import osprofiler.sqlalchemy from retrying import retry import six import sqlalchemy from sqlalchemy import and_ import sqlalchemy.exc from sqlalchemy import exists from sqlalchemy import func from sqlalchemy import or_ import sqlalchemy.orm as orm from sqlalchemy.orm import aliased from sqlalchemy.orm import joinedload from glare.common import exception from glare.common import semver_db from glare.common import utils from glare.db.sqlalchemy import models from glare.i18n import _ LOG = os_logging.getLogger(__name__) CONF = cfg.CONF CONF.import_group("profiler", "glare.common.wsgi") options.set_defaults(CONF) BASE_ARTIFACT_PROPERTIES = ('id', 'visibility', 'created_at', 'updated_at', 'activated_at', 'owner', 'status', 'description', 'name', 'type_name', 'version') _FACADE = None _LOCK = threading.Lock() def _retry_on_deadlock(exc): """Decorator to retry a DB API call if Deadlock was received.""" if isinstance(exc, db_exception.DBDeadlock): LOG.warning("Deadlock detected. Retrying...") return True return False def _create_facade_lazily(): global _LOCK, _FACADE if _FACADE is None: with _LOCK: if _FACADE is None: _FACADE = session.EngineFacade.from_config(CONF) if CONF.profiler.enabled and CONF.profiler.trace_sqlalchemy: osprofiler.sqlalchemy.add_tracing(sqlalchemy, _FACADE.get_engine(), "db") return _FACADE def get_engine(): facade = _create_facade_lazily() return facade.get_engine() def get_session(autocommit=True, expire_on_commit=False): facade = _create_facade_lazily() return facade.get_session(autocommit=autocommit, expire_on_commit=expire_on_commit) def setup_db(): engine = get_engine() models.register_models(engine) def drop_db(): engine = get_engine() models.unregister_models(engine) @retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, stop_max_attempt_number=50) def delete(context, artifact_id, session): with session.begin(): session.query(models.Artifact).filter_by(id=artifact_id).delete() def _drop_protected_attrs(model_class, values): """Removed protected attributes from values dictionary using the models __protected_attributes__ field. """ for attr in model_class.__protected_attributes__: if attr in values: del values[attr] @retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, stop_max_attempt_number=50) @utils.no_4byte_params def create_or_update(context, artifact_id, values, session): with session.begin(): _drop_protected_attrs(models.Artifact, values) if artifact_id is None: # create new artifact artifact = models.Artifact() artifact.id = values.pop('id') else: # update the existing artifact artifact = _get(context, artifact_id, session) if 'version' in values: values['version'] = semver_db.parse(values['version']) if 'tags' in values: tags = values.pop('tags') artifact.tags = _do_tags(artifact, tags) if 'properties' in values: properties = values.pop('properties', {}) artifact.properties = _do_properties(artifact, properties) if 'blobs' in values: blobs = values.pop('blobs') artifact.blobs = _do_blobs(artifact, blobs) artifact.updated_at = timeutils.utcnow() if 'status' in values: if session.query(exists().where(and_( models.ArtifactBlob.status == 'saving', models.ArtifactBlob.artifact_id == artifact_id)) ).one()[0]: raise exception.Conflict( "You cannot change artifact status if it has " "uploading blobs.") if values['status'] == 'active': artifact.activated_at = timeutils.utcnow() artifact.update(values) artifact.save(session=session) LOG.debug("Response from the database was received.") return artifact.to_dict() def _get(context, artifact_id, session): try: query = _do_artifacts_query(context, session).filter_by( id=artifact_id) artifact = query.one() except orm.exc.NoResultFound: msg = _("Artifact with id=%s not found.") % artifact_id LOG.warning(msg) raise exception.ArtifactNotFound(msg) return artifact def get(context, artifact_id, session): return _get(context, artifact_id, session).to_dict() def get_all(context, session, filters=None, marker=None, limit=None, sort=None, latest=False): """List all visible artifacts :param filters: dict of filter keys and values. :param marker: artifact id after which to start page :param limit: maximum number of artifacts to return :param sort: a tuple (key, dir, type) where key is an attribute by which results should be sorted, dir is a direction: 'asc' or 'desc', and type is type of the attribute: 'bool', 'string', 'numeric' or 'int' or None if attribute is base. :param latest: flag that indicates, that only artifacts with highest versions should be returned in output """ artifacts = _get_all( context, session, filters, marker, limit, sort, latest) return [af.to_dict() for af in artifacts] def _apply_latest_filter(context, session, query, basic_conds, tag_conds, prop_conds): # Subquery to fetch max version suffix for a group (name, # version_prefix) ver_suffix_subq = _apply_query_base_filters( session.query( models.Artifact.name, models.Artifact.version_prefix, func.max(models.Artifact.version_suffix).label( 'max_suffix')).group_by( models.Artifact.name, models.Artifact.version_prefix), context) ver_suffix_subq = _apply_user_filters( ver_suffix_subq, basic_conds, tag_conds, prop_conds).subquery() # Subquery to fetch max version prefix for a name group ver_prefix_subq = _apply_query_base_filters( session.query(models.Artifact.name, func.max( models.Artifact.version_prefix).label('max_prefix')).group_by( models.Artifact.name), context) ver_prefix_subq = _apply_user_filters( ver_prefix_subq, basic_conds, tag_conds, prop_conds).subquery() # Combine two subqueries together joining them with Artifact table query = query.join( ver_prefix_subq, and_(models.Artifact.name == ver_prefix_subq.c.name, models.Artifact.version_prefix == ver_prefix_subq.c.max_prefix)).join( ver_suffix_subq, and_(models.Artifact.name == ver_suffix_subq.c.name, models.Artifact.version_prefix == ver_suffix_subq.c.version_prefix, models.Artifact.version_suffix == ver_suffix_subq.c.max_suffix) ) return query def _apply_user_filters(query, basic_conds, tag_conds, prop_conds): if basic_conds: for basic_condition in basic_conds: query = query.filter(and_(*basic_condition)) if tag_conds: for tag_condition in tag_conds: query = query.join(models.ArtifactTag, aliased=True).filter( and_(*tag_condition)) if prop_conds: for prop_condition in prop_conds: query = query.join(models.ArtifactProperty, aliased=True).filter( and_(*prop_condition)) return query def _get_all(context, session, filters=None, marker=None, limit=None, sort=None, latest=False): filters = filters or {} query = _do_artifacts_query(context, session) basic_conds, tag_conds, prop_conds = _do_query_filters(filters) query = _apply_user_filters(query, basic_conds, tag_conds, prop_conds) if latest: query = _apply_latest_filter(context, session, query, basic_conds, tag_conds, prop_conds) marker_artifact = None if marker is not None: marker_artifact = get(context, marker, session) query = _do_paginate_query(query=query, limit=limit, marker=marker_artifact, sort=sort) return query.all() def _do_paginate_query(query, marker=None, limit=None, sort=None): # Add sorting number_of_custom_props = 0 for sort_key, sort_dir, sort_type in sort: try: sort_dir_func = { 'asc': sqlalchemy.asc, 'desc': sqlalchemy.desc, }[sort_dir] except KeyError: msg = _("Unknown sort direction, must be 'desc' or 'asc'.") raise exception.BadRequest(msg) # Note(mfedosin): Workaround to deal with situation that sqlalchemy # cannot work with composite keys correctly if sort_key == 'version': query = query.order_by(sort_dir_func(models.Artifact.version_prefix))\ .order_by(sort_dir_func(models.Artifact.version_suffix))\ .order_by(sort_dir_func(models.Artifact.version_meta)) elif sort_key in BASE_ARTIFACT_PROPERTIES: # sort by generic property query = query.order_by(sort_dir_func(getattr(models.Artifact, sort_key))) else: # sort by custom property number_of_custom_props += 1 if number_of_custom_props > 1: msg = _("For performance sake it's not allowed to sort by " "more than one custom property with this db backend.") raise exception.BadRequest(msg) prop_table = aliased(models.ArtifactProperty) query = ( query.join(prop_table). filter(prop_table.name == sort_key). order_by(sort_dir_func(getattr(prop_table, sort_type + '_value')))) # Add pagination if marker is not None: marker_values = [] for sort_key, __, __ in sort: v = marker.get(sort_key, None) marker_values.append(v) # Build up an array of sort criteria as in the docstring criteria_list = [] for i in range(len(sort)): crit_attrs = [] for j in range(i): value = marker_values[j] if sort[j][0] in BASE_ARTIFACT_PROPERTIES: if sort[j][0] == 'version': value = semver_db.parse(value) crit_attrs.append([getattr(models.Artifact, sort[j][0]) == value]) else: conds = [models.ArtifactProperty.name == sort[j][0]] conds.extend([getattr(models.ArtifactProperty, sort[j][2] + '_value') == value]) crit_attrs.append(conds) value = marker_values[i] sort_dir_func = operator.gt if sort[i][1] == 'asc' else operator.lt if sort[i][0] in BASE_ARTIFACT_PROPERTIES: if sort[i][0] == 'version': value = semver_db.parse(value) crit_attrs.append([sort_dir_func(getattr(models.Artifact, sort[i][0]), value)]) else: query = query.join(models.ArtifactProperty, aliased=True) conds = [models.ArtifactProperty.name == sort[i][0]] conds.extend([sort_dir_func(getattr(models.ArtifactProperty, sort[i][2] + '_value'), value)]) crit_attrs.append(conds) criteria = [and_(*crit_attr) for crit_attr in crit_attrs] criteria_list.append(criteria) criteria_list = [and_(*cr) for cr in criteria_list] query = query.filter(or_(*criteria_list)) if limit is not None: query = query.limit(limit) return query def _do_artifacts_query(context, session): """Build the query to get all artifacts based on the context""" query = session.query(models.Artifact) query = (query.options(joinedload(models.Artifact.properties)). options(joinedload(models.Artifact.tags)). options(joinedload(models.Artifact.blobs))) return _apply_query_base_filters(query, context) def _apply_query_base_filters(query, context): # If admin, return everything. if context.is_admin: return query # If anonymous user, return only public artifacts. # However, if context.tenant has a value, return both # public and private artifacts of the owner. if context.tenant is not None: query = query.filter( or_(models.Artifact.owner == context.tenant, models.Artifact.visibility == 'public')) else: query = query.filter( models.Artifact.visibility == 'public') return query op_mappings = { 'eq': operator.eq, 'gt': operator.gt, 'gte': operator.ge, 'lt': operator.lt, 'lte': operator.le, 'neq': operator.ne, } def _do_query_filters(filters): basic_conds = [] tag_conds = [] prop_conds = [] for field_name, key_name, op, field_type, value in filters: if field_name == 'tags': tags = utils.split_filter_value_for_quotes(value) for tag in tags: tag_conds.append([models.ArtifactTag.value == tag]) elif field_name == 'tags-any': tags = utils.split_filter_value_for_quotes(value) tag_conds.append([models.ArtifactTag.value.in_(tags)]) elif field_name in BASE_ARTIFACT_PROPERTIES: if op != 'in': fn = op_mappings[op] if field_name == 'version': value = semver_db.parse(value) basic_conds.append([fn(getattr(models.Artifact, field_name), value)]) else: if field_name == 'version': value = [semver_db.parse(val) for val in value] basic_conds.append( [or_(*[ models.Artifact.version == ver for ver in value])]) else: basic_conds.append( [getattr(models.Artifact, field_name).in_(value)]) else: conds = [models.ArtifactProperty.name == field_name] if key_name is not None: if op == 'eq' or value is not None: conds.extend( [models.ArtifactProperty.key_name == key_name]) elif op == 'in': conds.extend( [models.ArtifactProperty.key_name.in_(key_name)]) if value is not None: if op != 'in': fn = op_mappings[op] conds.extend([fn(getattr(models.ArtifactProperty, field_type + '_value'), value)]) else: conds.extend([getattr(models.ArtifactProperty, field_type + '_value').in_(value)]) prop_conds.append(conds) return basic_conds, tag_conds, prop_conds def _do_tags(artifact, new_tags): tags_to_update = [] # don't touch existing tags for tag in artifact.tags: if tag.value in new_tags: tags_to_update.append(tag) new_tags.remove(tag.value) # add new tags for tag in new_tags: db_tag = models.ArtifactTag() db_tag.value = tag tags_to_update.append(db_tag) return tags_to_update def _get_prop_type(value): if isinstance(value, bool): return 'bool_value' if isinstance(value, int): return 'int_value' if isinstance(value, six.string_types): return 'string_value' if isinstance(value, float): return 'numeric_value' def _create_property(prop_name, prop_value, position=None, key_name=None): db_prop = models.ArtifactProperty() db_prop.name = prop_name setattr(db_prop, _get_prop_type(prop_value), prop_value) db_prop.position = position db_prop.key_name = key_name return db_prop def _do_properties(artifact, new_properties): props_to_update = [] # don't touch the existing properties for prop in artifact.properties: if prop.name not in new_properties: props_to_update.append(prop) for prop_name, prop_value in new_properties.items(): if prop_value is None: continue if isinstance(prop_value, list): for pos, list_prop in enumerate(prop_value): for prop in artifact.properties: if prop.name == prop_name and pos == prop.position: if getattr(prop, _get_prop_type( list_prop)) != list_prop: setattr(prop, _get_prop_type(list_prop), list_prop) props_to_update.append(prop) break else: props_to_update.append( _create_property(prop_name, list_prop, position=pos) ) elif isinstance(prop_value, dict): for dict_key, dict_val in prop_value.items(): for prop in artifact.properties: if prop.name == prop_name and prop.key_name == dict_key: if getattr(prop, _get_prop_type(dict_val)) != dict_val: setattr(prop, _get_prop_type(dict_val), dict_val) props_to_update.append(prop) break else: props_to_update.append( _create_property(prop_name, dict_val, key_name=dict_key) ) elif prop_value is not None: for prop in artifact.properties: if prop.name == prop_name: setattr(prop, _get_prop_type(prop_value), prop_value) props_to_update.append(prop) break else: props_to_update.append(_create_property( prop_name, prop_value)) return props_to_update def _update_blob_values(blob, values): for elem in ('size', 'md5', 'sha1', 'sha256', 'url', 'external', 'status', 'content_type'): setattr(blob, elem, values[elem]) return blob def _do_blobs(artifact, new_blobs): blobs_to_update = [] # don't touch the existing blobs for blob in artifact.blobs: if blob.name not in new_blobs: blobs_to_update.append(blob) for blob_name, blob_value in new_blobs.items(): if blob_value is None: continue if isinstance(blob_value.get('status'), str): for blob in artifact.blobs: if blob.name == blob_name: _update_blob_values(blob, blob_value) blobs_to_update.append(blob) break else: blob = models.ArtifactBlob() blob.name = blob_name _update_blob_values(blob, blob_value) blobs_to_update.append(blob) else: for dict_key, dict_val in blob_value.items(): for blob in artifact.blobs: if blob.name == blob_name and blob.key_name == dict_key: _update_blob_values(blob, dict_val) blobs_to_update.append(blob) break else: blob = models.ArtifactBlob() blob.name = blob_name blob.key_name = dict_key _update_blob_values(blob, dict_val) blobs_to_update.append(blob) return blobs_to_update def count_artifact_number(context, session, type_name=None): """Return a number of artifacts for tenant.""" query = session.query(func.count(models.Artifact.id)).filter( models.Artifact.owner == context.tenant) if type_name is not None: query = query.filter(models.Artifact.type_name == type_name) return query.order_by(None).scalar() or 0 def calculate_uploaded_data(context, session, type_name=None): """Return the amount of uploaded data for tenant.""" query = session.query( func.sum(models.ArtifactBlob.size)).join( models.Artifact, aliased=True).filter( models.Artifact.owner == context.tenant) if type_name is not None: query = query.filter(models.Artifact.type_name == type_name) return query.order_by(None).scalar() or 0 def _generate_quota_id(project_id, quota_name, type_name=None): quota_id = b"%s:%s" % (project_id.encode(), quota_name.encode()) if type_name is not None: quota_id += b":%s" % type_name.encode() return hashlib.md5(quota_id).hexdigest() @retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, stop_max_attempt_number=50) @utils.no_4byte_params def set_quotas(values, session): """Create new quota instances in database""" with session.begin(): for project_id, project_quotas in values.items(): # reset all project quotas session.query(models.ArtifactQuota).filter( models.ArtifactQuota.project_id == project_id).delete() # generate new quotas for quota_name, quota_value in project_quotas.items(): q = models.ArtifactQuota() q.project_id = project_id q.quota_name = quota_name q.quota_value = quota_value session.add(q) # save all quotas session.flush() @retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, stop_max_attempt_number=50) def get_all_quotas(session, project_id=None): """List all available quotas.""" query = session.query(models.ArtifactQuota) if project_id is not None: query = query.filter( models.ArtifactQuota.project_id == project_id) quotas = query.order_by(models.ArtifactQuota.project_id).all() res = {} for quota in quotas: res.setdefault( quota.project_id, {})[quota.quota_name] = quota.quota_value return res @retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, stop_max_attempt_number=50) @utils.no_4byte_params def create_lock(context, lock_key, session): """Try to create lock record.""" with session.begin(): existing = session.query(models.ArtifactLock).get(lock_key) if existing is None: try: lock = models.ArtifactLock() lock.id = lock_key lock.save(session=session) return lock.id except (sqlalchemy.exc.IntegrityError, db_exception.DBDuplicateEntry): msg = _("Cannot lock an item with key %s. " "Lock already acquired by other request") % lock_key raise exception.Conflict(msg) else: if timeutils.is_older_than(existing.acquired_at, 5): existing.acquired_at = timeutils.utcnow() existing.save(session) return existing.id else: msg = _("Cannot lock an item with key %s. " "Lock already acquired by other request") % lock_key raise exception.Conflict(msg) @retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, stop_max_attempt_number=50) def delete_lock(context, lock_id, session): with session.begin(): session.query(models.ArtifactLock).filter_by(id=lock_id).delete() @retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, stop_max_attempt_number=50) def save_blob_data(context, blob_data_id, data, session): """Save blob data to database.""" with session.begin(): blob_data = models.ArtifactBlobData() blob_data.id = blob_data_id blob_data.data = data.read() blob_data.save(session=session) return "sql://" + blob_data.id @retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, stop_max_attempt_number=50) def save_blob_data_batch(context, blobs, session): """Perform batch uploading to database.""" with session.begin(): locations = [] # blobs is a list of tuples (blob_data_id, data) for blob_data_id, data in blobs: blob_data = models.ArtifactBlobData() blob_data.id = blob_data_id blob_data.data = data.read() session.add(blob_data) locations.append("sql://" + blob_data.id) session.flush() return locations @retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, stop_max_attempt_number=50) def get_blob_data(context, uri, session): """Download blob data from database.""" blob_data_id = uri[6:] try: blob_data = session.query( models.ArtifactBlobData).filter_by(id=blob_data_id).one() except orm.exc.NoResultFound: msg = _("Cannot find a blob data with id %s.") % blob_data_id raise exception.NotFound(msg) return blob_data.data @retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, stop_max_attempt_number=50) def delete_blob_data(context, uri, session): """Delete blob data from database.""" with session.begin(): blob_data_id = uri[6:] session.query( models.ArtifactBlobData).filter_by(id=blob_data_id).delete() glare-0.5.0/glare/db/sqlalchemy/models.py000066400000000000000000000235651317401036700203010ustar00rootroot00000000000000# Copyright (c) 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db.sqlalchemy import models from oslo_utils import timeutils from oslo_utils import uuidutils from sqlalchemy import BigInteger from sqlalchemy import Boolean from sqlalchemy import Column from sqlalchemy import DateTime from sqlalchemy.ext import declarative from sqlalchemy import ForeignKey from sqlalchemy import Index from sqlalchemy import Integer from sqlalchemy import LargeBinary from sqlalchemy import Numeric from sqlalchemy.orm import backref from sqlalchemy.orm import composite from sqlalchemy.orm import relationship from sqlalchemy import String from sqlalchemy import Text from glare.common import semver_db BASE = declarative.declarative_base() class ArtifactBase(models.ModelBase): """Base class for Artifact Models.""" __table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'} __table_initialized__ = False def save(self, session=None): from glare.db.sqlalchemy import api as db_api super(ArtifactBase, self).save(session or db_api.get_session()) def keys(self): return self.__dict__.keys() def values(self): return self.__dict__.values() def items(self): return self.__dict__.items() def to_dict(self): d = {} for c in self.__table__.columns: d[c.name] = self[c.name] return d def _parse_property_value(prop): columns = [ 'int_value', 'string_value', 'bool_value', 'numeric_value'] for prop_type in columns: if getattr(prop, prop_type) is not None: return getattr(prop, prop_type) def _parse_blob_value(blob): return { "id": blob.id, "url": blob.url, "status": blob.status, "external": blob.external, "md5": blob.md5, "sha1": blob.sha1, "sha256": blob.sha256, "size": blob.size, "content_type": blob.content_type } class Artifact(BASE, ArtifactBase): __tablename__ = 'glare_artifacts' __table_args__ = ( Index('ix_glare_artifact_name_and_version', 'name', 'version_prefix', 'version_suffix'), Index('ix_glare_artifact_type', 'type_name'), Index('ix_glare_artifact_status', 'status'), Index('ix_glare_artifact_owner', 'owner'), Index('ix_glare_artifact_visibility', 'visibility'), {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}) __protected_attributes__ = set(["created_at", "updated_at"]) id = Column(String(36), primary_key=True, default=lambda: uuidutils.generate_uuid()) name = Column(String(255), nullable=False) type_name = Column(String(255), nullable=False) version_prefix = Column(BigInteger().with_variant(Integer, "sqlite"), nullable=False) version_suffix = Column(String(255)) version_meta = Column(String(255)) version = composite(semver_db.DBVersion, version_prefix, version_suffix, version_meta, comparator_factory=semver_db.VersionComparator) description = Column(Text()) visibility = Column(String(32), nullable=False) status = Column(String(32), nullable=False) owner = Column(String(255)) created_at = Column(DateTime, default=lambda: timeutils.utcnow(), nullable=False) updated_at = Column(DateTime, default=lambda: timeutils.utcnow(), nullable=False, onupdate=lambda: timeutils.utcnow()) activated_at = Column(DateTime) def to_dict(self): d = super(Artifact, self).to_dict() d.pop('version_prefix') d.pop('version_suffix') d.pop('version_meta') d['version'] = str(self.version) # parse tags tags = [] for tag in self.tags: tags.append(tag.value) d['tags'] = tags # parse properties for prop in self.properties: prop_value = _parse_property_value(prop) if prop.position is not None: if prop.name not in d: # create new list d[prop.name] = [] # insert value in position d[prop.name].insert(prop.position, prop_value) elif prop.key_name is not None: if prop.name not in d: # create new dict d[prop.name] = {} # insert value in the dict d[prop.name][prop.key_name] = prop_value else: # make scalar d[prop.name] = prop_value # parse blobs for blob in self.blobs: blob_value = _parse_blob_value(blob) if blob.key_name is not None: if blob.name not in d: # create new dict d[blob.name] = {} # insert value in the dict d[blob.name][blob.key_name] = blob_value else: # make scalar d[blob.name] = blob_value return d class ArtifactTag(BASE, ArtifactBase): __tablename__ = 'glare_artifact_tags' __table_args__ = (Index('ix_glare_artifact_tags_artifact_id_tag_value', 'artifact_id', 'value'), Index('ix_glare_artifact_tags_artifact_id', 'artifact_id'), {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'},) id = Column(String(36), primary_key=True, nullable=False, default=lambda: uuidutils.generate_uuid()) artifact_id = Column(String(36), ForeignKey('glare_artifacts.id'), nullable=False) artifact = relationship(Artifact, backref=backref('tags', cascade="all, delete-orphan")) value = Column(String(255), nullable=False) class ArtifactProperty(BASE, ArtifactBase): __tablename__ = 'glare_artifact_properties' __table_args__ = ( Index('ix_glare_artifact_properties_artifact_id', 'artifact_id'), Index('ix_glare_artifact_properties_name', 'name'), {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'},) id = Column(String(36), primary_key=True, nullable=False, default=lambda: uuidutils.generate_uuid()) artifact_id = Column(String(36), ForeignKey('glare_artifacts.id'), nullable=False) artifact = relationship(Artifact, backref=backref('properties', cascade="all, delete-orphan")) name = Column(String(255), nullable=False) string_value = Column(String(20000)) int_value = Column(Integer) numeric_value = Column(Numeric) bool_value = Column(Boolean) position = Column(Integer) key_name = Column(String(255)) class ArtifactBlob(BASE, ArtifactBase): __tablename__ = 'glare_artifact_blobs' __table_args__ = ( Index('ix_glare_artifact_blobs_artifact_id', 'artifact_id'), Index('ix_glare_artifact_blobs_name', 'name'), {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'},) id = Column(String(36), primary_key=True, nullable=False, default=lambda: uuidutils.generate_uuid()) artifact_id = Column(String(36), ForeignKey('glare_artifacts.id'), nullable=False) name = Column(String(255), nullable=False) size = Column(BigInteger().with_variant(Integer, "sqlite")) md5 = Column(String(32)) sha1 = Column(String(40)) sha256 = Column(String(64)) external = Column(Boolean) url = Column(Text) status = Column(String(32), nullable=False) key_name = Column(String(2048)) content_type = Column(String(255)) artifact = relationship(Artifact, backref=backref('blobs', cascade="all, delete-orphan")) class ArtifactLock(BASE, ArtifactBase): __tablename__ = 'glare_artifact_locks' __table_args__ = ( {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'},) id = Column(String(255), primary_key=True, nullable=False) acquired_at = Column( DateTime, nullable=False, default=lambda: timeutils.utcnow()) class ArtifactBlobData(BASE, ArtifactBase): __tablename__ = 'glare_blob_data' __table_args__ = ( {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'},) id = Column(String(255), primary_key=True, nullable=False) data = Column(LargeBinary(length=(2 ** 32) - 1), nullable=False) class ArtifactQuota(BASE, ArtifactBase): __tablename__ = 'glare_quotas' __table_args__ = ( {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'},) project_id = Column(String(255), primary_key=True) quota_name = Column(String(32), primary_key=True) quota_value = Column(BigInteger().with_variant(Integer, "sqlite"), nullable=False) def register_models(engine): """Create database tables for all models with the given engine.""" models = (Artifact, ArtifactTag, ArtifactProperty, ArtifactBlob, ArtifactLock, ArtifactQuota) for model in models: model.metadata.create_all(engine) def unregister_models(engine): """Drop database tables for all models with the given engine.""" models = (ArtifactQuota, ArtifactLock, ArtifactBlob, ArtifactProperty, ArtifactTag, Artifact) for model in models: model.metadata.drop_all(engine) glare-0.5.0/glare/engine.py000066400000000000000000000774511317401036700155370ustar00rootroot00000000000000# Copyright 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from copy import deepcopy from eventlet import tpool import jsonpatch from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import timeutils from oslo_utils import uuidutils import six.moves.urllib.parse as urlparse from glare.common import exception from glare.common import policy from glare.common import store_api from glare.common import utils from glare.db import artifact_api from glare.i18n import _ from glare import locking from glare.notification import Notifier from glare.objects.meta import registry from glare import quota CONF = cfg.CONF LOG = logging.getLogger(__name__) class Engine(object): """Engine is responsible for executing different helper operations when processing incoming requests from Glare API. Engine receives incoming data and does the following: - check basic policy permissions; - requests artifact definition from artifact type registry; - check access permission(ro, rw); - lock artifact for update if needed; - pass data to base artifact type to execute all business logic operations with database; - check quotas during upload; - call operations pre- and post- hooks; - notify other users about finished operation. Engine should not include any business logic and validation related to artifacts types. Engine should not know any internal details of artifact type, because this part of the work is done by Base artifact type. """ def __init__(self): # register all artifact types registry.ArtifactRegistry.register_all_artifacts() # generate all schemas and quotas self.schemas = {} self.config_quotas = { 'max_artifact_number': CONF.max_artifact_number, 'max_uploaded_data': CONF.max_uploaded_data } for name, type_list in registry.ArtifactRegistry.obj_classes().items(): type_name = type_list[0].get_type_name() self.schemas[type_name] = registry.ArtifactRegistry.\ get_artifact_type(type_name).gen_schemas() type_conf_section = getattr(CONF, 'artifact_type:' + type_name) if type_conf_section.max_artifact_number != -1: self.config_quotas['max_artifact_number:' + type_name] = \ type_conf_section.max_artifact_number if type_conf_section.max_uploaded_data != -1: self.config_quotas['max_uploaded_data:' + type_name] = \ type_conf_section.max_uploaded_data lock_engine = locking.LockEngine(artifact_api.ArtifactLockApi()) def _create_scoped_lock(self, context, type_name, name, version, owner, visibility='private'): """Create scoped lock for artifact.""" # validate that artifact doesn't exist for the scope filters = [('name', 'eq:' + name), ('version', 'eq:' + version)] if visibility == 'public': filters.extend([('visibility', 'public')]) elif visibility == 'private': filters.extend([('owner', 'eq:' + owner), ('visibility', 'private')]) scope_id = "%s:%s:%s" % (type_name, name, version) if visibility != 'public': scope_id += ':%s' % owner lock = self.lock_engine.acquire(context, scope_id) try: if len(self.list(context, type_name, filters)) > 0: msg = _("Artifact with this name and version is already " "exists for this scope.") raise exception.Conflict(msg) except Exception: with excutils.save_and_reraise_exception(logger=LOG): self.lock_engine.release(lock) return lock @staticmethod def _show_artifact(ctx, type_name, artifact_id, read_only=False): """Return artifact requested by user. Check access permissions and policies. :param ctx: user context :param type_name: artifact type name :param artifact_id: id of the artifact to be updated :param read_only: flag, if set to True only read access is checked, if False then engine checks if artifact can be modified by the user """ artifact_type = registry.ArtifactRegistry.get_artifact_type(type_name) # only artifact is available for class users af = artifact_type.show(ctx, artifact_id) if not read_only: if not ctx.is_admin and ctx.tenant != af.owner or ctx.read_only: raise exception.Forbidden() LOG.debug("Artifact %s acquired for read-write access", artifact_id) else: LOG.debug("Artifact %s acquired for read-only access", artifact_id) return af def show_type_schemas(self, context, type_name=None): policy.authorize("artifact:type_list", {}, context) if type_name is None: return self.schemas if type_name not in self.schemas: msg = _("Artifact type %s does not exist") % type_name raise exception.NotFound(message=msg) return self.schemas[type_name] def _apply_patch(self, context, af, patch): # This function is a collection of hacks and workarounds to make # json patch apply changes to artifact object. action_names = ['update'] af_dict = af.to_dict() policy.authorize('artifact:update', af_dict, context) af.pre_update_hook(context, af) try: for operation in patch._ops: # apply the change to make sure that it's correct af_dict = operation.apply(af_dict) # format of location is "/key/value" or just "/key" # first case symbolizes that we have dict or list insertion, # second, that we work with a field itself. items = operation.location.split('/', 2) field_name = items[1] if af.is_blob(field_name) or af.is_blob_dict(field_name): msg = _("Cannot add blob with this request. " "Use special Blob API for that.") raise exception.BadRequest(msg) if len(items) == 2 and operation.operation['op'] == 'remove': msg = _("Cannot remove field '%s' from " "artifact.") % field_name raise exception.BadRequest(msg) # work with hooks and define action names if field_name == 'visibility': utils.validate_visibility_transition( af, from_visibility=af.visibility, to_visibility=af_dict['visibility'] ) if af_dict['visibility'] == 'public': policy.authorize( 'artifact:publish', af_dict, context) af.pre_publish_hook(context, af) action_names.append('publish') elif field_name == 'status': utils.validate_status_transition( af, from_status=af.status, to_status=af_dict['status']) if af_dict['status'] == 'deactivated': policy.authorize( 'artifact:deactivate', af_dict, context) af.pre_deactivate_hook(context, af) action_names.append('deactivate') elif af_dict['status'] == 'active': if af.status == 'deactivated': policy.authorize( 'artifact:reactivate', af_dict, context) af.pre_reactivate_hook(context, af) action_names.append('reactivate') else: policy.authorize( 'artifact:activate', af_dict, context) af.pre_activate_hook(context, af) action_names.append('activate') else: utils.validate_change_allowed(af, field_name) old_val = getattr(af, field_name) setattr(af, field_name, af_dict[field_name]) new_val = getattr(af, field_name) if new_val == old_val: # No need to save value to db if it's not changed af.obj_reset_changes([field_name]) except (jsonpatch.JsonPatchException, jsonpatch.JsonPointerException, TypeError) as e: raise exception.BadRequest(message=str(e)) return action_names def create(self, context, type_name, values): """Create artifact record in Glare. :param context: user context :param type_name: artifact type name :param values: dict with artifact fields :return: dict representation of created artifact """ action_name = "artifact:create" policy.authorize(action_name, values, context) artifact_type = registry.ArtifactRegistry.get_artifact_type(type_name) version = values.get('version', artifact_type.DEFAULT_ARTIFACT_VERSION) init_values = { 'id': uuidutils.generate_uuid(), 'name': values.pop('name'), 'version': version, 'owner': context.tenant, 'created_at': timeutils.utcnow(), 'updated_at': timeutils.utcnow() } af = artifact_type.init_artifact(context, init_values) # acquire scoped lock and execute artifact create with self._create_scoped_lock(context, type_name, af.name, af.version, context.tenant): quota.verify_artifact_count(context, type_name) for field_name, value in values.items(): if af.is_blob(field_name) or af.is_blob_dict(field_name): msg = _("Cannot add blob with this request. " "Use special Blob API for that.") raise exception.BadRequest(msg) utils.validate_change_allowed(af, field_name) setattr(af, field_name, value) artifact_type.pre_create_hook(context, af) af = af.create(context) artifact_type.post_create_hook(context, af) # notify about new artifact Notifier.notify(context, action_name, af) # return artifact to the user return af.to_dict() def save(self, context, type_name, artifact_id, patch): """Update artifact with json patch. Apply patch to artifact and validate artifact before updating it in database. If there is request for visibility or status change then call specific method for that. :param context: user context :param type_name: name of artifact type :param artifact_id: id of the artifact to be updated :param patch: json patch object :return: dict representation of updated artifact """ lock_key = "%s:%s" % (type_name, artifact_id) with self.lock_engine.acquire(context, lock_key): af = self._show_artifact(context, type_name, artifact_id) af.obj_reset_changes() action_names = self._apply_patch(context, af, patch) updates = af.obj_changes_to_primitive() LOG.debug("Update diff successfully calculated for artifact " "%(af)s %(diff)s", {'af': artifact_id, 'diff': updates}) if not updates: return af.to_dict() if any(i in updates for i in ('name', 'version', 'visibility')): # to change an artifact scope it's required to set a lock first with self._create_scoped_lock( context, type_name, updates.get('name', af.name), updates.get('version', af.version), af.owner, updates.get('visibility', af.visibility)): af = af.save(context) else: af = af.save(context) # call post hooks for all operations when data is written in db and # send broadcast notifications for action_name in action_names: getattr(af, 'post_%s_hook' % action_name)(context, af) Notifier.notify(context, 'artifact:' + action_name, af) return af.to_dict() def show(self, context, type_name, artifact_id): """Show detailed artifact info. :param context: user context :param type_name: Artifact type name :param artifact_id: id of artifact to show :return: definition of requested artifact """ policy.authorize("artifact:get", {}, context) af = self._show_artifact(context, type_name, artifact_id, read_only=True) return af.to_dict() @staticmethod def list(context, type_name, filters, marker=None, limit=None, sort=None, latest=False): """Return list of artifacts requested by user. :param context: user context :param type_name: Artifact type name :param filters: filters that need to be applied to artifact :param marker: the artifact that considered as begin of the list so all artifacts before marker (including marker itself) will not be added to artifact list :param limit: maximum number of items in list :param sort: sorting options :param latest: flag that indicates, that only artifacts with highest versions should be returned in output :return: list of artifact definitions """ policy.authorize("artifact:list", {}, context) artifact_type = registry.ArtifactRegistry.get_artifact_type(type_name) # return list to the user af_list = [af.to_dict() for af in artifact_type.list(context, filters, marker, limit, sort, latest)] return af_list @staticmethod def _delete_blobs(context, af, blobs): for name, blob in blobs.items(): if af.is_blob(name): if not blob['external']: try: store_api.delete_blob(blob['url'], context=context) except exception.NotFound: # data has already been removed pass af.db_api.update_blob(context, af.id, {name: None}) elif af.is_blob_dict(name): upd_blob = deepcopy(blob) for key, val in blob.items(): if not val['external']: try: store_api.delete_blob(val['url'], context=context) except exception.NotFound: pass del upd_blob[key] af.db_api.update_blob(context, af.id, {name: upd_blob}) def delete(self, context, type_name, artifact_id): """Delete artifact from Glare. :param context: User context :param type_name: Artifact type name :param artifact_id: id of artifact to delete """ af = self._show_artifact(context, type_name, artifact_id) action_name = 'artifact:delete' policy.authorize(action_name, af.to_dict(), context) af.pre_delete_hook(context, af) blobs = af.delete(context, af) delayed_delete = getattr( CONF, 'artifact_type:' + type_name).delayed_delete # use global parameter if delayed delete isn't set per artifact type if delayed_delete is None: delayed_delete = CONF.delayed_delete if not delayed_delete: if blobs: # delete blobs one by one self._delete_blobs(context, af, blobs) LOG.info("Blobs successfully deleted for artifact %s", af.id) # delete artifact itself af.db_api.delete(context, af.id) af.post_delete_hook(context, af) Notifier.notify(context, action_name, af) @staticmethod def _get_blob_info(af, field_name, blob_key=None): """Return requested blob info.""" if blob_key: if not af.is_blob_dict(field_name): msg = _("%s is not a blob dict") % field_name raise exception.BadRequest(msg) return getattr(af, field_name).get(blob_key) else: if not af.is_blob(field_name): msg = _("%s is not a blob") % field_name raise exception.BadRequest(msg) return getattr(af, field_name, None) @staticmethod def _save_blob_info(context, af, field_name, blob_key, value): """Save blob instance in database.""" if blob_key is not None: # Insert blob value in the folder folder = getattr(af, field_name) if value is not None: folder[blob_key] = value else: del folder[blob_key] value = folder return af.update_blob(context, af.id, field_name, value) @staticmethod def _generate_blob_name(field_name, blob_key=None): return "%s[%s]" % (field_name, blob_key) if blob_key else field_name def add_blob_location(self, context, type_name, artifact_id, field_name, location, blob_meta, blob_key=None): """Add external/internal location to blob. :param context: user context :param type_name: name of artifact type :param artifact_id: id of the artifact to be updated :param field_name: name of blob or blob dict field :param location: blob url :param blob_meta: dictionary containing blob metadata like md5 checksum :param blob_key: if field_name is blob dict it specifies key in this dict :return: dict representation of updated artifact """ blob_name = self._generate_blob_name(field_name, blob_key) location_type = blob_meta.pop('location_type', 'external') if location_type == 'external': action_name = 'artifact:set_location' elif location_type == 'internal': scheme = urlparse.urlparse(location).scheme if scheme in store_api.RESTRICTED_URI_SCHEMES: msg = _("Forbidden to set internal locations with " "scheme '%s'") % scheme raise exception.Forbidden(msg) if scheme not in store_api.get_known_schemes(): msg = _("Unknown scheme '%s'") % scheme raise exception.BadRequest(msg) action_name = 'artifact:set_internal_location' else: msg = _("Invalid location type: %s") % location_type raise exception.BadRequest(msg) blob = {'url': location, 'size': None, 'md5': blob_meta.get("md5"), 'sha1': blob_meta.get("sha1"), 'id': uuidutils.generate_uuid(), 'sha256': blob_meta.get("sha256"), 'status': 'active', 'external': location_type == 'external', 'content_type': None} lock_key = "%s:%s" % (type_name, artifact_id) with self.lock_engine.acquire(context, lock_key): af = self._show_artifact(context, type_name, artifact_id) policy.authorize(action_name, af.to_dict(), context) if self._get_blob_info(af, field_name, blob_key): msg = _("Blob %(blob)s already exists for artifact " "%(af)s") % {'blob': field_name, 'af': af.id} raise exception.Conflict(message=msg) utils.validate_change_allowed(af, field_name) af.pre_add_location_hook( context, af, field_name, location, blob_key) af = self._save_blob_info(context, af, field_name, blob_key, blob) LOG.info("External location %(location)s has been created " "successfully for artifact %(artifact)s blob %(blob)s", {'location': location, 'artifact': af.id, 'blob': blob_name}) af.post_add_location_hook(context, af, field_name, blob_key) Notifier.notify(context, action_name, af) return af.to_dict() def _calculate_allowed_space(self, context, af, field_name, content_length=None, blob_key=None): """Calculate the maximum amount of data user can upload to the blob.""" # As a default we take the maximum blob size blob_name = self._generate_blob_name(field_name, blob_key) max_blob_size = af.get_max_blob_size(field_name) if blob_key is not None: # For folders we also compare it with the maximum folder size blobs_dict = getattr(af, field_name) overall_folder_size = sum( blob["size"] for blob in blobs_dict.values() if blob["size"] is not None) available_folder_space = af.get_max_folder_size( field_name) - overall_folder_size # always non-negative max_blob_size = min(max_blob_size, available_folder_space) # check quotas quota_size = quota.verify_uploaded_data_amount( context, af.get_type_name(), content_length) if content_length is None: # if no content_length was provided we have to allocate # all allowed space for the blob. It's minimum of max blob size # and available quota limit. -1 means that user don't have upload # limits. size = max_blob_size if quota_size == -1 else min( max_blob_size, quota_size) else: if content_length > max_blob_size: msg = _("Can't upload %(content_length)d bytes of data to " "blob %(blob_name)s. Its max allowed size is " "%(max_blob_size)d") % { 'content_length': content_length, 'blob_name': blob_name, 'max_blob_size': max_blob_size} raise exception.RequestEntityTooLarge(msg) size = content_length return size def upload_blob(self, context, type_name, artifact_id, field_name, fd, content_type, content_length=None, blob_key=None): """Upload Artifact blob. :param context: user context :param type_name: name of artifact type :param artifact_id: id of the artifact to be updated :param field_name: name of blob or blob dict field :param fd: file descriptor that Glare uses to upload the file :param content_type: data content-type :param content_length: amount of data user wants to upload :param blob_key: if field_name is blob dict it specifies key in this dictionary :return: dict representation of updated artifact """ blob_name = self._generate_blob_name(field_name, blob_key) blob_id = uuidutils.generate_uuid() blob_info = {'url': None, 'size': None, 'md5': None, 'sha1': None, 'sha256': None, 'id': blob_id, 'status': 'saving', 'external': False, 'content_type': content_type} # Step 1. Initialize blob lock_key = "%s:%s" % (type_name, artifact_id) with self.lock_engine.acquire(context, lock_key): af = self._show_artifact(context, type_name, artifact_id) action_name = "artifact:upload" policy.authorize(action_name, af.to_dict(), context) # create an an empty blob instance in db with 'saving' status if self._get_blob_info(af, field_name, blob_key): msg = _("Blob %(blob)s already exists for artifact " "%(af)s") % {'blob': field_name, 'af': af.id} raise exception.Conflict(message=msg) utils.validate_change_allowed(af, field_name) blob_info['size'] = self._calculate_allowed_space( context, af, field_name, content_length, blob_key) af = self._save_blob_info( context, af, field_name, blob_key, blob_info) LOG.debug("Parameters validation for artifact %(artifact)s blob " "upload passed for blob %(blob_name)s. " "Start blob uploading to backend.", {'artifact': af.id, 'blob_name': blob_name}) # Step 2. Call pre_upload_hook and upload data to the store try: try: # call upload hook first if hasattr(af, 'validate_upload'): LOG.warning("Method 'validate_upload' was deprecated. " "Please use 'pre_upload_hook' instead.") fd, path = tpool.execute( af.validate_upload, context, af, field_name, fd) else: fd = tpool.execute(af.pre_upload_hook, context, af, field_name, blob_key, fd) except exception.GlareException: raise except Exception as e: raise exception.BadRequest(message=str(e)) default_store = getattr( CONF, 'artifact_type:' + type_name).default_store # use global parameter if default store isn't set per artifact type if default_store is None: default_store = CONF.glance_store.default_store location_uri, size, checksums = store_api.save_blob_to_store( blob_id, fd, context, blob_info['size'], store_type=default_store) blob_info.update({'url': location_uri, 'status': 'active', 'size': size}) blob_info.update(checksums) except Exception: # if upload failed remove blob from db and storage with excutils.save_and_reraise_exception(logger=LOG): self._save_blob_info( context, af, field_name, blob_key, None) LOG.info("Successfully finished blob uploading for artifact " "%(artifact)s blob field %(blob)s.", {'artifact': af.id, 'blob': blob_name}) # Step 3. Change blob status to 'active' with self.lock_engine.acquire(context, lock_key): af = af.show(context, artifact_id) af = self._save_blob_info( context, af, field_name, blob_key, blob_info) af.post_upload_hook(context, af, field_name, blob_key) Notifier.notify(context, action_name, af) return af.to_dict() def download_blob(self, context, type_name, artifact_id, field_name, blob_key=None): """Download binary data from Glare Artifact. :param context: user context :param type_name: name of artifact type :param artifact_id: id of the artifact to be updated :param field_name: name of blob or blob dict field :param blob_key: if field_name is blob dict it specifies key in this dict :return: file iterator for requested file """ af = self._show_artifact(context, type_name, artifact_id, read_only=True) policy.authorize("artifact:download", af.to_dict(), context) blob_name = self._generate_blob_name(field_name, blob_key) if af.status == 'deleted': msg = _("Cannot download data when artifact is deleted") raise exception.Forbidden(message=msg) blob = self._get_blob_info(af, field_name, blob_key) if blob is None: msg = _("No data found for blob %s") % blob_name raise exception.NotFound(message=msg) if blob['status'] != 'active': msg = _("%s is not ready for download") % blob_name raise exception.Conflict(message=msg) af.pre_download_hook(context, af, field_name, blob_key) meta = {'md5': blob.get('md5'), 'sha1': blob.get('sha1'), 'sha256': blob.get('sha256'), 'external': blob.get('external')} if blob['external']: data = {'url': blob['url']} else: data = store_api.load_from_store(uri=blob['url'], context=context) meta['size'] = blob.get('size') meta['content_type'] = blob.get('content_type') try: # call download hook in the end data = af.post_download_hook( context, af, field_name, blob_key, data) except exception.GlareException: raise except Exception as e: raise exception.BadRequest(message=str(e)) return data, meta def delete_external_blob(self, context, type_name, artifact_id, field_name, blob_key=None): """Delete artifact blob with external location. :param context: user context :param type_name: name of artifact type :param artifact_id: id of artifact with the blob to delete :param field_name: name of blob or blob dict field :param blob_key: if field_name is blob dict it specifies key in this dictionary """ af = self._show_artifact(context, type_name, artifact_id) action_name = 'artifact:delete_blob' policy.authorize(action_name, af.to_dict(), context) blob_name = self._generate_blob_name(field_name, blob_key) blob = self._get_blob_info(af, field_name, blob_key) if blob is None: msg = _("Blob %s wasn't found for artifact") % blob_name raise exception.NotFound(message=msg) if not blob['external']: msg = _("Blob %s is not external") % blob_name raise exception.Forbidden(message=msg) af = self._save_blob_info(context, af, field_name, blob_key, None) Notifier.notify(context, action_name, af) return af.to_dict() @staticmethod def set_quotas(context, values): """Set quota records in Glare. :param context: user request context :param values: dict with quota values to set """ action_name = "artifact:set_quotas" policy.authorize(action_name, {}, context) qs = quota.set_quotas(values) Notifier.notify(context, action_name, qs) def list_all_quotas(self, context): """Get detailed info about all available quotas. :param context: user request context :return: dict with definitions of redefined quotas for all projects and global defaults """ action_name = "artifact:list_all_quotas" policy.authorize(action_name, {}, context) return { 'quotas': quota.list_quotas(), 'global_quotas': self.config_quotas } def list_project_quotas(self, context, project_id=None): """Get detailed info about project quotas. :param context: user request context :param project_id: id of the project for which to show quotas :return: definition of requested quotas for the project """ project_id = project_id or context.tenant action_name = "artifact:list_project_quotas" policy.authorize(action_name, {'project_id': project_id}, context) qs = self.config_quotas.copy() qs.update(quota.list_quotas(project_id)[project_id]) return {project_id: qs} glare-0.5.0/glare/hacking/000077500000000000000000000000001317401036700153065ustar00rootroot00000000000000glare-0.5.0/glare/hacking/__init__.py000066400000000000000000000000001317401036700174050ustar00rootroot00000000000000glare-0.5.0/glare/hacking/checks.py000066400000000000000000000121111317401036700171140ustar00rootroot00000000000000# Copyright (c) 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re """ Guidelines for writing new hacking checks - Use only for Glare-specific tests. OpenStack general tests should be submitted to the common 'hacking' module. - Pick numbers in the range G3xx. Find the current test with the highest allocated number and then pick the next value. If nova has an N3xx code for that test, use the same number. - Keep the test method code in the source file ordered based on the G3xx value. - List the new rule in the top level HACKING.rst file - Add test cases for each new rule to glare/tests/test_hacking.py """ asse_trueinst_re = re.compile( r"(.)*assertTrue\(isinstance\((\w|\.|\'|\"|\[|\])+, " "(\w|\.|\'|\"|\[|\])+\)\)") asse_equal_type_re = re.compile( r"(.)*assertEqual\(type\((\w|\.|\'|\"|\[|\])+\), " "(\w|\.|\'|\"|\[|\])+\)") asse_equal_end_with_none_re = re.compile( r"(.)*assertEqual\((\w|\.|\'|\"|\[|\])+, None\)") asse_equal_start_with_none_re = re.compile( r"(.)*assertEqual\(None, (\w|\.|\'|\"|\[|\])+\)") unicode_func_re = re.compile(r"(\s|\W|^)unicode\(") _all_log_levels = {'debug', 'error', 'info', 'warning', 'critical', 'exception'} # Since _Lx have been removed, we just need to check _() translated_logs = re.compile( r"(.)*LOG\.(%(level)s)\(\s*_\(" % {'level': '|'.join(_all_log_levels)}) dict_constructor_with_list_copy_re = re.compile(r".*\bdict\((\[)?(\(|\[)") def assert_true_instance(logical_line): """Check for assertTrue(isinstance(a, b)) sentences G316 """ if asse_trueinst_re.match(logical_line): yield (0, "G316: assertTrue(isinstance(a, b)) sentences not allowed") def assert_equal_type(logical_line): """Check for assertEqual(type(A), B) sentences G317 """ if asse_equal_type_re.match(logical_line): yield (0, "G317: assertEqual(type(A), B) sentences not allowed") def assert_equal_none(logical_line): """Check for assertEqual(A, None) or assertEqual(None, A) sentences G318 """ res = (asse_equal_start_with_none_re.match(logical_line) or asse_equal_end_with_none_re.match(logical_line)) if res: yield (0, "G318: assertEqual(A, None) or assertEqual(None, A) " "sentences not allowed") def no_translate_logs(logical_line): """Check for use of LOG.*(_( G319 """ if translated_logs.match(logical_line): yield (0, "G319: Don't translate logs") def no_direct_use_of_unicode_function(logical_line): """Check for use of unicode() builtin G320 """ if unicode_func_re.match(logical_line): yield(0, "G320: Use six.text_type() instead of unicode()") def check_no_contextlib_nested(logical_line): msg = ("G327: contextlib.nested is deprecated since Python 2.7. See " "https://docs.python.org/2/library/contextlib.html#contextlib." "nested for more information.") if ("with contextlib.nested(" in logical_line or "with nested(" in logical_line): yield(0, msg) def dict_constructor_with_list_copy(logical_line): msg = ("G328: Must use a dict comprehension instead of a dict constructor " "with a sequence of key-value pairs.") if dict_constructor_with_list_copy_re.match(logical_line): yield (0, msg) def check_python3_xrange(logical_line): if re.search(r"\bxrange\s*\(", logical_line): yield(0, "G329: Do not use xrange. Use range, or six.moves.range for " "large loops.") def check_python3_no_iteritems(logical_line): msg = ("G330: Use six.iteritems() or dict.items() instead of " "dict.iteritems().") if re.search(r".*\.iteritems\(\)", logical_line): yield(0, msg) def check_python3_no_iterkeys(logical_line): msg = ("G331: Use six.iterkeys() or dict.keys() instead of " "dict.iterkeys().") if re.search(r".*\.iterkeys\(\)", logical_line): yield(0, msg) def check_python3_no_itervalues(logical_line): msg = ("G332: Use six.itervalues() or dict.values instead of " "dict.itervalues().") if re.search(r".*\.itervalues\(\)", logical_line): yield(0, msg) def factory(register): register(assert_true_instance) register(assert_equal_type) register(assert_equal_none) register(no_translate_logs) register(no_direct_use_of_unicode_function) register(check_no_contextlib_nested) register(dict_constructor_with_list_copy) register(check_python3_xrange) register(check_python3_no_iteritems) register(check_python3_no_iterkeys) register(check_python3_no_itervalues) glare-0.5.0/glare/i18n.py000066400000000000000000000014421317401036700150340ustar00rootroot00000000000000# Copyright 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_i18n import * # noqa _translators = TranslatorFactory(domain='glare') # The primary translation function using the well-known name "_" _ = _translators.primary glare-0.5.0/glare/locking.py000066400000000000000000000076111317401036700157070ustar00rootroot00000000000000# Copyright 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging LOG = logging.getLogger(__name__) class LockApiBase(object): """Lock Api Base class that responsible for acquiring/releasing locks.""" def create_lock(self, context, lock_key): """Acquire lock for current user. :param context: user context :param lock_key: unique lock identifier that defines lock scope :return: lock internal identifier """ raise NotImplementedError() def delete_lock(self, context, lock_id): """Delete acquired user lock. :param context: user context :param lock_id: lock internal identifier """ raise NotImplementedError() class Lock(object): """Object that stores lock context for users. This class is internal and used only in lock engine, so users shouldn't use this class directly. """ def __init__(self, context, lock_id, lock_key, release_method): """Initialize lock context.""" self.context = context self.lock_id = lock_id self.lock_key = lock_key self.release = release_method def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): # TODO(kairat) catch all exceptions here self.release(self) class LockEngine(object): """Glare lock engine. Defines how artifact updates must be synchronized with each other. When some user obtains a lock for the same artifact then other user cannot request that lock and gets a Conflict error. """ # NOTE(kairat): Lock Engine also allows to encapsulate lock logic in one # place so we can potentially add tooz functionality in future to Glare. # Right now there are troubles with locks in Galera (especially in mysql) # and zookeeper requires additional work from IT engineers. So we need # support production ready DB locks in our implementation. MAX_LOCK_LENGTH = 255 def __init__(self, lock_api): """Initialize lock engine with some lock api. :param lock_api: api that allows to create/delete locks """ # NOTE(kairat): lock_api is db_api now but it might be # replaced with DLM in near future. self.lock_api = lock_api def acquire(self, context, lock_key): """Acquire lock for artifact. If there is some other lock with the same key then raise Conflict Error. :param context: user context :param lock_key: lock key :return: lock definition """ if lock_key is not None and len(lock_key) < self.MAX_LOCK_LENGTH: lock_id = self.lock_api.create_lock(context, lock_key) LOG.debug("Lock %(lock_id)s acquired for lock_key %(lock_key)s", {'lock_id': lock_id, 'lock_key': lock_key}) else: lock_id = None LOG.debug("No lock for lock_key %s", lock_key) return Lock(context, lock_id, lock_key, self.release) def release(self, lock): """Release lock for artifact. :param lock: Lock object """ if lock.lock_id is not None: self.lock_api.delete_lock(lock.context, lock.lock_id) LOG.debug("Lock %(lock_id)s released for lock_key %(key)s", {'lock_id': lock.lock_id, 'key': lock.lock_key}) glare-0.5.0/glare/notification.py000066400000000000000000000044671317401036700167550ustar00rootroot00000000000000# Copyright (c) 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging import oslo_messaging CONF = cfg.CONF LOG = logging.getLogger(__name__) notifier_opts = [ cfg.HostAddressOpt('glare_publisher_id', default="artifact.localhost", help='Default publisher_id for outgoing ' 'Glare notifications.')] CONF.register_opts(notifier_opts) def get_transport(): return oslo_messaging.get_notification_transport(CONF) def set_defaults(control_exchange='glare'): oslo_messaging.set_transport_defaults(control_exchange) class Notifier(object): """Simple interface to receive Glare notifier.""" SERVICE_NAME = 'artifact' GLARE_NOTIFIER = None @classmethod def _get_notifier(cls): if cls.GLARE_NOTIFIER is None: cls.GLARE_NOTIFIER = oslo_messaging.Notifier( get_transport(), publisher_id=CONF.glare_publisher_id) return cls.GLARE_NOTIFIER @classmethod def notify(cls, context, event_type, body, level='INFO'): """Notify Glare listeners with some useful info. :param context: User request context :param event_type: type of event :param body: notification payload :param level: notification level ("INFO", "WARN", "ERROR", etc) """ af_notifier = cls._get_notifier() method = getattr(af_notifier, level.lower()) if hasattr(body, 'to_notification'): body = body.to_notification() method({}, "%s.%s" % (cls.SERVICE_NAME, event_type), body) LOG.debug('Notification event %(event)s send successfully for ' 'request %(request)s', {'event': event_type, 'request': context.request_id}) glare-0.5.0/glare/objects/000077500000000000000000000000001317401036700153335ustar00rootroot00000000000000glare-0.5.0/glare/objects/__init__.py000066400000000000000000000000001317401036700174320ustar00rootroot00000000000000glare-0.5.0/glare/objects/all.py000066400000000000000000000034451317401036700164630ustar00rootroot00000000000000# Copyright (c) 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields from glare.common import exception from glare.objects import base from glare.objects.meta import registry from glare.objects.meta import wrappers Field = wrappers.Field.init class All(base.BaseArtifact): """Artifact type that allows to get artifacts regardless of their type""" fields = { 'type_name': Field(fields.StringField, description="Name of artifact type."), } @classmethod def create(cls, context): raise exception.Forbidden("This type is read only.") def save(self, context): raise exception.Forbidden("This type is read only.") @classmethod def delete(cls, context, af): raise exception.Forbidden("This type is read only.") @classmethod def update_blob(cls, context, af_id, field_name, values): raise exception.Forbidden("This type is read only.") @classmethod def get_type_name(cls): return "all" def to_dict(self): # Use specific method of artifact type to convert it to dict values = self.obj_to_primitive()['versioned_object.data'] return registry.ArtifactRegistry.get_artifact_type( self.type_name).format_all(values) glare-0.5.0/glare/objects/base.py000066400000000000000000000661731317401036700166340ustar00rootroot00000000000000# Copyright 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from oslo_versionedobjects import base from oslo_versionedobjects import fields from glare.common import exception from glare.common import utils from glare.db import artifact_api from glare.i18n import _ from glare.objects.meta import fields as glare_fields from glare.objects.meta import validators from glare.objects.meta import wrappers global_artifact_opts = [ cfg.IntOpt('max_uploaded_data', default=-1, # disabled min=-1, help=_("Defines how many bytes of data user can upload to " "storage. This parameter is global and doesn't take " "into account data of what type was uploaded. " "Value -1 means no limit.")), cfg.IntOpt('max_artifact_number', default=-1, # disabled min=-1, help=_("Defines how many artifacts user can have. This " "parameter is global and doesn't take " "into account artifacts of what type were created. " "Value -1 means no limit.")), cfg.BoolOpt('delayed_delete', default=False, help=_("If False defines that artifacts must be deleted " "immediately after the user call. Otherwise they just " "will be marked as deleted so they can be scrubbed " "by some other tool in the background.")), ] CONF = cfg.CONF CONF.register_opts(global_artifact_opts) LOG = logging.getLogger(__name__) class BaseArtifact(base.VersionedObject): """BaseArtifact is a central place in Glare. It execute Glare business logic operations and checks in like: 1) Check if artifact satisfies all requirements and can be activated 2) Check that artifact is not deactivated and download blobs ... BaseArtifact interacts with database and saves/request artifact info from specified database API. Base Artifact is an abstract class so all concrete classes must be inherited from that class. Concrete classes must define custom fields in addition to BaseArtifact fields and db_api that must be used for interaction with database. """ OBJ_PROJECT_NAMESPACE = 'glare' DEFAULT_ARTIFACT_VERSION = '0.0.0' STATUS = ('drafted', 'active', 'deactivated', 'deleted') Field = wrappers.Field.init DictField = wrappers.DictField.init ListField = wrappers.ListField.init Blob = wrappers.BlobField.init fields = { 'id': Field(fields.StringField, system=True, validators=[validators.UUID()], nullable=False, sortable=True, description="Artifact UUID."), 'name': Field(fields.StringField, required_on_activate=False, nullable=False, sortable=True, validators=[validators.MinStrLen(1)], description="Artifact Name."), 'owner': Field(fields.StringField, system=True, required_on_activate=False, nullable=False, sortable=True, description="ID of user/tenant who " "uploaded artifact."), 'status': Field(fields.StringField, default='drafted', nullable=False, sortable=True, mutable=True, validators=[validators.AllowedValues(STATUS)], description="Artifact status."), 'created_at': Field(fields.DateTimeField, system=True, nullable=False, sortable=True, description="Datetime when artifact has " "been created."), 'updated_at': Field(fields.DateTimeField, system=True, nullable=False, sortable=True, mutable=True, description="Datetime when artifact has " "been updated last time."), 'activated_at': Field(fields.DateTimeField, system=True, required_on_activate=False, sortable=True, description="Datetime when artifact has became " "active."), 'description': Field(fields.StringField, mutable=True, required_on_activate=False, default="", validators=[validators.MaxStrLen(4096)], filter_ops=[], description="Artifact description."), 'tags': ListField(fields.String, mutable=True, required_on_activate=False, # tags are filtered without any operators filter_ops=[], validators=[validators.Unique(convert_to_set=True)], element_validators=[ validators.ForbiddenChars([',', '/']), validators.MinStrLen(1) ], description="List of tags added to Artifact."), 'metadata': DictField(fields.String, required_on_activate=False, element_validators=[validators.MinStrLen(1)], description="Key-value dict with useful " "information about an artifact."), 'visibility': Field(fields.StringField, default='private', nullable=False, sortable=True, mutable=True, validators=[validators.AllowedValues( ['private', 'public'])], description="Artifact visibility that defines " "if artifact can be available to " "other users."), 'version': Field(glare_fields.VersionField, required_on_activate=False, default=DEFAULT_ARTIFACT_VERSION, nullable=False, sortable=True, validators=[validators.Version()], description="Artifact version(semver).") } common_artifact_type_opts = [ cfg.IntOpt('max_uploaded_data', min=-1, default=-1, help=_("Defines how many bytes of data of this type user " "can upload to storage. Value -1 means no limit.")), cfg.IntOpt('max_artifact_number', min=-1, default=-1, help=_("Defines how many artifacts of this type user can " "have. Value -1 means no limit.")), cfg.BoolOpt('delayed_delete', help=_( "If False defines that artifacts must be deleted " "immediately after the user call. Otherwise they just " "will be marked as deleted so they can be scrubbed " "by some other tool in the background. " "Redefines global parameter of the same name " "from [DEFAULT] section.")), cfg.StrOpt('default_store', choices=('file', 'filesystem', 'http', 'https', 'swift', 'swift+http', 'swift+https', 'swift+config', 'rbd', 'sheepdog', 'cinder', 'vsphere', 'database'), help=_(""" The default scheme to use for storing artifacts of this type. Provide a string value representing the default scheme to use for storing artifact data. If not set, Glare uses default_store parameter from [glance_store] section. NOTE: The value given for this configuration option must be a valid scheme for a store registered with the ``stores`` configuration option. Possible values: * file * filesystem * http * https * swift * swift+http * swift+https * swift+config * rbd * sheepdog * cinder * vsphere * database """)) ] artifact_type_opts = [] @classmethod def list_artifact_type_opts(cls): return cls.artifact_type_opts + cls.common_artifact_type_opts db_api = artifact_api.ArtifactAPI() @classmethod def is_blob(cls, field_name): """Helper to check that a field is a blob. :param field_name: name of the field :return: True if the field is a blob, False otherwise """ return isinstance(cls.fields.get(field_name), glare_fields.BlobField) @classmethod def is_blob_dict(cls, field_name): """Helper to check that field is a blob dict. :param field_name: name of the field :return: True if the field is a blob dict, False otherwise """ return (isinstance(cls.fields.get(field_name), glare_fields.Dict) and cls.fields[field_name].element_type == glare_fields.BlobFieldType) @classmethod def init_artifact(cls, context, values): """Initialize an empty versioned object with values. Initialize vo object with default values and values specified by user. Also reset all changes of initialized object so user can track own changes. :param context: user context :param values: values needs to be set :return: artifact with initialized values """ af = cls(context) # setup default values for all non specified fields default_fields = [] for field in af.fields: if field not in values: default_fields.append(field) if default_fields: af.obj_set_defaults(*default_fields) # apply values specified by user for name, value in values.items(): setattr(af, name, value) return af @classmethod def get_type_name(cls): """Return type name that allows to find artifact type in Glare Type name allows to find artifact type definition in Glare registry. :return: string that identifies current artifact type """ raise NotImplementedError() def create(self, context): """Create new artifact in Glare repo. :param context: user context :return: created artifact object """ values = self.obj_changes_to_primitive() values['type_name'] = self.get_type_name() LOG.debug("Sending request to create artifact of type '%(type_name)s'." " New values are %(values)s", {'type_name': self.get_type_name(), 'values': values}) af_vals = self.db_api.save(context, None, values) return self.init_artifact(context, af_vals) def save(self, context): """Save artifact in Glare repo. :param context: user context :return: updated artifact object """ values = self.obj_changes_to_primitive() LOG.debug("Sending request to update artifact '%(af_id)s'. " "New values are %(values)s", {'af_id': self.id, 'values': values}) updated_af = self.db_api.save(context, self.id, values) return self.init_artifact(context, updated_af) @classmethod def show(cls, context, artifact_id): """Return Artifact from Glare repo :param context: user context :param artifact_id: id of requested artifact :return: requested artifact object """ af = cls.db_api.get(context, artifact_id) return cls.init_artifact(context, af) @classmethod def _get_field_type(cls, obj): """Get string representation of field type for filters.""" if isinstance(obj, fields.IntegerField) or obj is fields.Integer: return 'int' elif isinstance(obj, fields.FloatField) or obj is fields.Float: return 'numeric' elif isinstance(obj, fields.FlexibleBooleanField) or \ obj is fields.FlexibleBoolean: return 'bool' return 'string' @classmethod def _parse_sort_values(cls, sort): """Prepare sorting parameters for database.""" new_sort = [] for key, direction in sort: if key not in cls.fields: msg = _("The field %s doesn't exist.") % key raise exception.BadRequest(msg) # check if field can be sorted if not cls.fields[key].sortable: msg = _("The field %s is not sortable.") % key raise exception.BadRequest(msg) new_sort.append((key, direction, cls._get_field_type( cls.fields.get(key)))) return new_sort @classmethod def _validate_filter_ops(cls, filter_name, op): field = cls.fields.get(filter_name) if op not in field.filter_ops: msg = (_("Unsupported filter type '%(key)s'." "The following filters are supported " "%(filters)s") % { 'key': op, 'filters': str(field.filter_ops)}) raise exception.BadRequest(message=msg) @classmethod def _parse_filter_values(cls, filters): # input format for filters is list of tuples: # (filter_name, filter_value) # output format for filters is list of tuples: # (field_name, key_name, op, field_type, value) new_filters = [] for filter_name, filter_value in filters: if filter_name in ('tags-any', 'tags'): if ':' in filter_value: msg = _("Tags are filtered without operator") raise exception.BadRequest(msg) new_filters.append( (filter_name, None, None, None, filter_value)) continue key_name = None if '.' in filter_name: filter_name, key_name = filter_name.rsplit('.', 1) if not isinstance(cls.fields.get(filter_name), glare_fields.Dict): msg = _("Field %s is not Dict") % filter_name raise exception.BadRequest(msg) if cls.fields.get(filter_name) is None: msg = _("Unable filter '%s'") % filter_name raise exception.BadRequest(msg) field_type = cls.fields.get(filter_name) if isinstance(field_type, glare_fields.List) or isinstance( field_type, glare_fields.Dict) and key_name is not None: field_type = field_type.element_type try: op, val = utils.split_filter_op(filter_value) if isinstance(field_type, glare_fields.Dict): if op not in ['eq', 'in']: msg = (_("Unsupported filter type '%s'. The following " "filters are supported: eq, in") % op) raise exception.BadRequest(message=msg) if op == 'in': new_filters.append(( filter_name, utils.split_filter_value_for_quotes( val), op, None, None)) else: new_filters.append(( filter_name, val, op, None, None)) else: cls._validate_filter_ops(filter_name, op) if op == 'in': value = [field_type.coerce(cls(), filter_name, value) for value in utils.split_filter_value_for_quotes(val)] else: value = field_type.coerce(cls(), filter_name, val) new_filters.append( (filter_name, key_name, op, cls._get_field_type(field_type), value)) except ValueError: msg = _("Invalid filter value: %s") % str(val) raise exception.BadRequest(msg) return new_filters @classmethod def list(cls, context, filters=None, marker=None, limit=None, sort=None, latest=False): """Return list of artifacts requested by user. :param context: user context :param filters: filters that need to be applied to artifact :param marker: the artifact that considered as begin of the list so all artifacts before marker (including marker itself) will not be added to artifact list :param limit: maximum number of items in the list :param sort: sorting options :param latest: flag that indicates, that only artifacts with highest versions should be returned in output :return: list of artifact objects """ default_sort_parameters = ( ('created_at', 'desc', None), ('id', 'asc', None)) # Parse sort parameters and update them with defaults sort = [] if sort is None else cls._parse_sort_values(sort) for default_sort in default_sort_parameters: for s in sort: # If the default sort parameter already in the list - skip it if s[0] == default_sort[0]: break else: sort.append(default_sort) default_filter_parameters = [ ('status', None, 'neq', None, 'deleted')] if cls.get_type_name() != 'all': default_filter_parameters.append( ('type_name', None, 'eq', None, cls.get_type_name())) # Parse filter parameters and update them with defaults filters = [] if filters is None else cls._parse_filter_values(filters) for default_filter in default_filter_parameters: if default_filter not in filters: filters.append(default_filter) return [cls.init_artifact(context, af) for af in cls.db_api.list( context, filters, marker, limit, sort, latest)] @classmethod def delete(cls, context, af): """Delete artifact and all its blobs from Glare. :param context: user context :param af: artifact object targeted for deletion """ # marking artifact as deleted cls.db_api.save(context, af.id, {'status': 'deleted'}) # collect all uploaded blobs blobs = {} for name in af.fields: if cls.is_blob(name) or cls.is_blob_dict(name): field = getattr(af, name) if field: blobs[name] = field LOG.debug("Marked artifact %(artifact)s as deleted.", {'artifact': af.id}) return blobs @classmethod def get_max_blob_size(cls, field_name): """Get the maximum allowed blob size in bytes. :param field_name: blob or blob dict field name :return: maximum blob size in bytes """ return getattr(cls.fields[field_name], 'max_blob_size') @classmethod def get_max_folder_size(cls, field_name): """Get the maximum allowed folder size in bytes. :param field_name: folder (blob dict) field name :return: maximum folder size in bytes """ return getattr(cls.fields[field_name], 'max_folder_size') @classmethod def update_blob(cls, context, af_id, field_name, values): """Update blob info in database. :param context: user context :param af_id: id of modified artifact :param field_name: blob or blob dict field name :param values: updated blob values :return: updated artifact definition in Glare """ af_upd = cls.db_api.update_blob(context, af_id, {field_name: values}) return cls.init_artifact(context, af_upd) # Next comes a collection of hooks for various operations @classmethod def pre_create_hook(cls, context, af): pass @classmethod def post_create_hook(cls, context, af): pass @classmethod def pre_update_hook(cls, context, af): pass @classmethod def post_update_hook(cls, context, af): pass @classmethod def pre_activate_hook(cls, context, af): pass @classmethod def post_activate_hook(cls, context, af): pass @classmethod def pre_publish_hook(cls, context, af): pass @classmethod def post_publish_hook(cls, context, af): pass @classmethod def pre_deactivate_hook(cls, context, af): pass @classmethod def post_deactivate_hook(cls, context, af): pass @classmethod def pre_reactivate_hook(cls, context, af): pass @classmethod def post_reactivate_hook(cls, context, af): pass @classmethod def pre_upload_hook(cls, context, af, field_name, blob_key, fd): return fd @classmethod def post_upload_hook(cls, context, af, field_name, blob_key): pass @classmethod def pre_add_location_hook( cls, context, af, field_name, blob_key, location): pass @classmethod def post_add_location_hook(cls, context, af, field_name, blob_key): pass @classmethod def pre_download_hook(cls, context, af, field_name, blob_key): pass @classmethod def post_download_hook(cls, context, af, field_name, blob_key, fd): return fd @classmethod def pre_delete_hook(cls, context, af): pass @classmethod def post_delete_hook(cls, context, af): pass @classmethod def format_all(cls, values): """Specify output format for 'all' artifact meta-type :param values: dict with values that need to be formatted """ return values def to_notification(self): """Return notification body that can be send to listeners. :return: dict with notification information """ return { 'type': self.get_type_name(), 'id': self.id, 'description': self.description, 'name': self.name, 'version': self.version, 'visibility': self.visibility, 'status': self.status, 'created_at': self.created_at, 'updated_at': self.updated_at, 'activated_at': self.activated_at, 'owner': self.owner } def to_dict(self): """Convert oslo versioned object to dictionary. :return: dict with field names and field values """ return self.obj_to_primitive()['versioned_object.data'] def obj_changes_to_primitive(self): changes = self.obj_get_changes() res = {} for key, val in changes.items(): if val is not None and hasattr(val, 'to_primitive'): res[key] = val.to_primitive() else: res[key] = val return res @classmethod def _schema_field(cls, field, field_name=''): field_type = utils.get_schema_type(field) schema = {} # generate schema for validators for val in getattr(field, 'validators', []): schema.update(val.to_jsonschema()) schema['type'] = (field_type if not field.nullable else [field_type, 'null']) schema['glareType'] = utils.get_glare_type(field) output_blob_schema = { 'type': ['object', 'null'], 'properties': { 'size': {'type': ['number', 'null']}, 'md5': {'type': ['string', 'null']}, 'sha1': {'type': ['string', 'null']}, 'sha256': {'type': ['string', 'null']}, 'external': {'type': 'boolean'}, 'status': {'type': 'string', 'enum': list( glare_fields.BlobFieldType.BLOB_STATUS)}, 'content_type': {'type': 'string'}, }, 'required': ['size', 'md5', 'sha1', 'sha256', 'external', 'status', 'content_type'], 'additionalProperties': False } if field.system: schema['readOnly'] = True if isinstance(field, glare_fields.Dict): element_type = utils.get_schema_type(field.element_type) property_validators = schema.pop('propertyValidators', []) if field.element_type is glare_fields.BlobFieldType: schema['additionalProperties'] = output_blob_schema else: if schema.get('properties'): properties = {} required = schema.pop('required', []) for key in schema.pop('properties'): properties[key] = { 'type': (element_type if key in required else [element_type, 'null'])} for val in property_validators: properties[key].update(val) schema['properties'] = properties schema['additionalProperties'] = False else: schema['additionalProperties'] = {'type': element_type} for val in property_validators: schema['additionalProperties'].update(val) if isinstance(field, glare_fields.List): items_validators = schema.pop('itemValidators', []) schema['items'] = { 'type': utils.get_schema_type(field.element_type)} for val in items_validators: schema['items'].update(val) if isinstance(field, glare_fields.BlobField): schema.update(output_blob_schema) if isinstance(field, fields.DateTimeField): schema['format'] = 'date-time' if field_name == 'status': schema['enum'] = cls.STATUS if field.description: schema['description'] = field.description if field.mutable: schema['mutable'] = True if field.sortable: schema['sortable'] = True if not field.required_on_activate: schema['required_on_activate'] = False if field._default is not None: schema['default'] = field._default schema['filter_ops'] = field.filter_ops return schema @classmethod def gen_schemas(cls): """Return json schema representation of the artifact type.""" schemas_prop = {} for field_name, field in cls.fields.items(): schemas_prop[field_name] = cls._schema_field( field, field_name=field_name) schemas = {'properties': schemas_prop, 'name': cls.get_type_name(), 'version': cls.VERSION, 'title': 'Artifact type %s of version %s' % (cls.get_type_name(), cls.VERSION), 'type': 'object', 'required': ['name']} return schemas glare-0.5.0/glare/objects/heat_environment.py000066400000000000000000000016771317401036700212650ustar00rootroot00000000000000# Copyright 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glare.objects import base from glare.objects.meta import wrappers Blob = wrappers.BlobField.init class HeatEnvironment(base.BaseArtifact): fields = { 'environment': Blob(description="Heat Environment text body."), } @classmethod def get_type_name(cls): return "heat_environments" glare-0.5.0/glare/objects/heat_template.py000066400000000000000000000037411317401036700205260ustar00rootroot00000000000000# Copyright 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields from glare.objects import base from glare.objects.meta import fields as glare_fields from glare.objects.meta import wrappers Field = wrappers.Field.init Blob = wrappers.BlobField.init Dict = wrappers.DictField.init Folder = wrappers.FolderField.init class HeatTemplate(base.BaseArtifact): fields = { 'environments': Dict(glare_fields.LinkFieldType, mutable=True, description="References to Heat Environments " "that can be used with current " "template."), 'template': Blob(description="Heat template body."), 'nested_templates': Folder(description="Dict of nested templates " "where key is the name of " "template and value is " "nested template body."), 'default_envs': Dict(fields.String, mutable=True, description="Default environments that can be " "applied to the template if no " "environments specified by user.") } @classmethod def get_type_name(cls): return "heat_templates" glare-0.5.0/glare/objects/image.py000066400000000000000000000110611317401036700167660ustar00rootroot00000000000000# Copyright 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields from glare.objects import base from glare.objects.meta import validators from glare.objects.meta import wrappers Field = wrappers.Field.init Blob = wrappers.BlobField.init class Image(base.BaseArtifact): fields = { 'container_format': Field(fields.StringField, validators=[validators.AllowedValues( ['ami', 'ari', 'aki', 'bare', 'ovf', 'ova', 'docker'])], description="Image container format."), 'disk_format': Field(fields.StringField, validators=[validators.AllowedValues( ['ami', 'ari', 'aki', 'vhd', 'vhdx', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso'])], description="Image disk format."), 'min_ram': Field(fields.IntegerField, required_on_activate=False, validators=[validators.MinNumberSize(0)], description="Minimal RAM required to boot image."), 'min_disk': Field(fields.IntegerField, required_on_activate=False, validators=[validators.MinNumberSize(0)], description="Minimal disk space " "required to boot image."), 'image': Blob(max_blob_size=1073741824000, required_on_activate=False, description="Image binary."), 'kernel_id': Field(fields.StringField, required_on_activate=False, validators=[validators.UUID()], description="ID of image stored in Glare that " "should be used as the kernel when " "booting an AMI-style image."), 'ramdisk_id': Field(fields.StringField, required_on_activate=False, validators=[validators.UUID()], description="ID of image stored in Glare that " "should be used as the ramdisk when " "booting an AMI-style image."), 'instance_uuid': Field(fields.StringField, required_on_activate=False, description="Metadata which can be used to " "record which instance this image " "is associated with. " "(Informational only, does not " "create an instance snapshot.)"), 'architecture': Field(fields.StringField, required_on_activate=False, description="Operating system architecture as " "specified in http://docs.openstack." "org/trunk/openstack-compute/admin/" "content/adding-images.html"), 'os_distro': Field(fields.StringField, required_on_activate=False, description="Common name of operating system " "distribution as specified in " "http://docs.openstack.org/trunk/" "openstack-compute/admin/content/" "adding-images.html"), 'os_version': Field(fields.StringField, required_on_activate=False, description="Operating system version as " "specified by the distributor"), } @classmethod def get_type_name(cls): return "images" glare-0.5.0/glare/objects/meta/000077500000000000000000000000001317401036700162615ustar00rootroot00000000000000glare-0.5.0/glare/objects/meta/__init__.py000066400000000000000000000013651317401036700203770ustar00rootroot00000000000000# Copyright 2017 Nokia # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glare.objects.meta import wrappers # for backward compatibility when 'wrappers' module was called 'attribute' attribute = wrappers glare-0.5.0/glare/objects/meta/fields.py000066400000000000000000000146161317401036700201110ustar00rootroot00000000000000# Copyright (c) 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import jsonschema from jsonschema import exceptions as json_exceptions from oslo_versionedobjects import fields import semantic_version import six import six.moves.urllib.parse as urlparse import six.moves.urllib.request as urlrequest from glare.common import exception from glare.i18n import _ class Version(fields.FieldType): @staticmethod def coerce(obj, field, value): return str(semantic_version.Version.coerce(str(value))) class VersionField(fields.AutoTypedField): AUTO_TYPE = Version() class BlobFieldType(fields.FieldType): """Blob field contains reference to blob location. """ BLOB_STATUS = (SAVING, ACTIVE) = ('saving', 'active') BLOB_SCHEMA = { 'type': 'object', 'properties': { 'url': {'type': ['string', 'null'], 'format': 'uri', 'maxLength': 2048}, 'size': {'type': ['number', 'null']}, 'md5': {'type': ['string', 'null']}, 'sha1': {'type': ['string', 'null']}, 'sha256': {'type': ['string', 'null']}, 'external': {'type': 'boolean'}, 'id': {'type': 'string'}, 'status': {'type': 'string', 'enum': list(BLOB_STATUS)}, 'content_type': {'type': ['string', 'null']}, }, 'required': ['url', 'size', 'md5', 'sha1', 'sha256', 'external', 'status', 'id', 'content_type'] } @staticmethod def coerce(obj, field, value): """Validate and store blob info inside oslo.vo""" if not isinstance(value, dict): raise ValueError(_("Blob value must be dict. Got %s type instead") % type(value)) try: jsonschema.validate(value, BlobFieldType.BLOB_SCHEMA) except json_exceptions.ValidationError as e: raise ValueError(e) return value @staticmethod def to_primitive(obj, field, value): prim = {key: val for key, val in value.items() if key != 'id'} if not value.get('external'): url = '/artifacts/%(name)s/%(id)s/' % { "name": obj.get_type_name(), 'id': obj.id } blob_path = field.split('[') url = url + blob_path[0] if len(blob_path) > 1: url += '/%s' % blob_path[1][1:-2] prim['url'] = url return prim class BlobField(fields.AutoTypedField): AUTO_TYPE = BlobFieldType() class LinkFieldType(fields.FieldType): """Link field specifies Artifact dependency on other artifact or some external resource. From technical perspective it is just soft link to Glare Artifact or https/http resource. So Artifact users can download the referenced file by that link. """ @staticmethod def is_external(link): return link.startswith('http') @staticmethod def get_type_name(link): url = link.split('/') if len(url) == 4: return url[2] else: raise ValueError(_("It is not possible to " "extract type_name from link %s"), link) @staticmethod def coerce(obj, field, value): # to remove the existing link user sets its value to None, # we have to consider this case. if value is None: return value # check that value is string if not isinstance(value, six.string_types): raise ValueError(_('A string is required in field %(field)s, ' 'not a %(type)s') % {'field': field, 'type': type(value).__name__}) # determine if link is external or internal external = LinkFieldType.is_external(value) # validate link itself if external: link = urlparse.urlparse(value) if link.scheme not in ('http', 'https'): raise ValueError(_('Only http and https requests ' 'are allowed in url %s') % value) try: with urlrequest.urlopen(value) as data: data.read(1) except Exception: raise ValueError( _('Link %(link)s is not valid in field ' '%(field)s. The link must be either valid url or ' 'reference to artifact. Example: ' 'http://glarehost:9494/artifacts//' '' ) % {'link': value, 'field': field}) else: result = value.split('/') if len(result) != 4 or result[1] != 'artifacts': raise ValueError( _('Link %(link)s is not valid in field ' '%(field)s. The link must be either valid url or ' 'reference to artifact. Example: ' '/artifacts//' ) % {'link': value, 'field': field}) # try to find the referenced artifact try: obj.db_api.get(obj.obj_context, result[3]) except exception.NotFound: raise ValueError( _("Link %(link)s is not valid in field %(field)s, because " "artifact with id %(art_id)s doesn't exist" ) % {'link': value, 'field': field, 'art_id': result[3]} ) return value class Link(fields.AutoTypedField): AUTO_TYPE = LinkFieldType() class List(fields.AutoTypedField): def __init__(self, element_type, **kwargs): self.AUTO_TYPE = fields.List(element_type()) super(List, self).__init__(**kwargs) class Dict(fields.AutoTypedField): def __init__(self, element_type, **kwargs): self.AUTO_TYPE = fields.Dict(element_type()) super(Dict, self).__init__(**kwargs) glare-0.5.0/glare/objects/meta/file_utils.py000066400000000000000000000137201317401036700207750ustar00rootroot00000000000000# Copyright 2017 Nokia # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Contains additional file utils that may be useful for upload hooks.""" import os import tempfile import zipfile from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import uuidutils from glare.common import store_api from glare.common import utils from glare.objects.meta import fields as glare_fields CONF = cfg.CONF LOG = logging.getLogger(__name__) INMEMORY_OBJECT_SIZE_LIMIT = 134217728 # 128 megabytes def create_temporary_file(stream, suffix=''): """Create a temporary local file from a stream. :param stream: stream of bytes to be stored in a temporary file :param suffix: (optional) file name suffix """ tfd, path = tempfile.mkstemp(suffix=suffix) while True: data = stream.read(100000) if data == b'': # end of file reached break os.write(tfd, data) tfile = os.fdopen(tfd, "rb") return tfile, path def extract_zip_to_temporary_folder(tfile): """Create temporary folder and extract all file contents there. :param tfile: zip archive to be extracted """ zip_ref = zipfile.ZipFile(tfile, 'r') tdir = tempfile.mkdtemp() zip_ref.extractall(tdir) zip_ref.close() return tdir def unpack_zip_archive_to_artifact_folder(context, af, zip_ref, folder_name): """Unpack zip archive to artifact folder. :param context: user context :param af: artifact object :param zip_ref: zip archive to be extracted :param folder_name: name of the artifact folder where to extract data """ file_dict = {} blobs = [] for name in zip_ref.namelist(): if not name.endswith('/'): blob_id = uuidutils.generate_uuid() # create an an empty blob instance in db with 'saving' status blob = {'url': None, 'size': None, 'md5': None, 'sha1': None, 'sha256': None, 'status': 'saving', 'id': blob_id, 'external': False, 'content_type': 'application/octet-stream'} file_dict[name] = blob blobs.append((blob_id, utils.BlobIterator(zip_ref.read(name)))) af = af.update_blob(context, af.id, folder_name, file_dict) default_store = getattr( CONF, 'artifact_type:' + af.get_type_name()).default_store # use global parameter if default store isn't set per artifact type if default_store is None: default_store = CONF.glance_store.default_store # try to perform blob uploading to storage backend try: blobs_info = store_api.save_blobs_to_store( blobs, context, af.get_max_blob_size(folder_name), default_store) for name in zip_ref.namelist(): if not name.endswith('/'): location_uri, size, checksums = blobs_info[ file_dict[name]['id']] # update blob info and activate it file_dict[name].update({'url': location_uri, 'status': 'active', 'size': size}) file_dict[name].update(checksums) except Exception: # if upload failed remove blob from db and storage with excutils.save_and_reraise_exception(logger=LOG): af.update_blob(context, af.id, folder_name, None) af.update_blob(context, af.id, folder_name, file_dict) def upload_content_file(context, af, data, blob_dict, key_name, content_type='application/octet-stream'): """Upload a file to a blob dictionary. :param context: user context :param af: artifact object :param data: bytes that need to be stored in the blob dictionary :param blob_dict: name of the blob_dictionary field :param key_name: name of key in the dictionary :param content_type: (optional) specifies mime type of uploading data """ blob_id = uuidutils.generate_uuid() # create an an empty blob instance in db with 'saving' status blob = {'url': None, 'size': None, 'md5': None, 'sha1': None, 'sha256': None, 'status': glare_fields.BlobFieldType.SAVING, 'external': False, 'content_type': content_type, 'id': blob_id} getattr(af, blob_dict)[key_name] = blob af = af.update_blob(context, af.id, blob_dict, getattr(af, blob_dict)) # try to perform blob uploading to storage backend try: default_store = getattr( CONF, 'artifact_type:' + af.get_type_name()).default_store # use global parameter if default store isn't set per artifact type if default_store is None: default_store = CONF.glance_store.default_store location_uri, size, checksums = store_api.save_blob_to_store( blob_id, data, context, af.get_max_blob_size(blob_dict), default_store) except Exception: # if upload failed remove blob from db and storage with excutils.save_and_reraise_exception(logger=LOG): del getattr(af, blob_dict)[key_name] af = af.update_blob(context, af.id, blob_dict, getattr(af, blob_dict)) # update blob info and activate it blob.update({'url': location_uri, 'status': glare_fields.BlobFieldType.ACTIVE, 'size': size}) blob.update(checksums) getattr(af, blob_dict)[key_name] = blob af.update_blob(context, af.id, blob_dict, getattr(af, blob_dict)) glare-0.5.0/glare/objects/meta/registry.py000066400000000000000000000115471317401036700205130ustar00rootroot00000000000000# Copyright 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import importlib import pkgutil import sys from oslo_config import cfg from oslo_config import types as conf_types from oslo_log import log as logging from oslo_versionedobjects import base as vo_base from glare.common import exception from glare.i18n import _ from glare.objects import base CONF = cfg.CONF LOG = logging.getLogger(__name__) registry_options = [ cfg.ListOpt('enabled_artifact_types', default=['heat_templates', 'heat_environments', 'murano_packages', 'tosca_templates', 'images'], item_type=conf_types.String(), help=_("List of enabled artifact types that will be " "available to user")), cfg.ListOpt('custom_artifact_types_modules', default=[], item_type=conf_types.String(), help=_("List of custom user modules with artifact types that " "will be uploaded by Glare dynamically during service " "startup.")) ] CONF.register_opts(registry_options) def import_submodules(module): """Import all submodules of a module. :param module: Package name :return: list of imported modules """ package = sys.modules[module] return [ importlib.import_module(module + '.' + name) for loader, name, is_pkg in pkgutil.walk_packages(package.__path__)] def import_modules_list(modules): custom_module_list = [] for module_name in modules: try: custom_module_list.append(importlib.import_module(module_name)) except Exception as e: LOG.exception(e) LOG.error("Cannot import custom artifact type from module " "%(module_name)%s. Error: %(error)s", {'module_name': module_name, 'error': str(e)}) return custom_module_list def get_subclasses(module, base_class): subclasses = [] for name in dir(module): obj = getattr(module, name) try: if issubclass(obj, base_class) and obj != base_class: subclasses.append(obj) except TypeError: pass return subclasses class ArtifactRegistry(vo_base.VersionedObjectRegistry): """Artifact Registry is responsible for registration of artifacts and returning appropriate artifact types based on artifact type name. """ enabled_types = {} @classmethod def register_all_artifacts(cls): """Register all artifacts in Glare.""" # get all submodules in glare.objects # please note that we registering trusted modules first # and applying custom modules after that to allow custom modules # to specify custom logic inside modules = (import_submodules('glare.objects') + import_modules_list( CONF.custom_artifact_types_modules)) # get all versioned object classes in module supported_types = [] for module in modules: supported_types.extend(get_subclasses(module, base.BaseArtifact)) for type_name in set(CONF.enabled_artifact_types + ['all']): for af_type in supported_types: if type_name == af_type.get_type_name(): if af_type != 'all': CONF.register_opts( af_type.list_artifact_type_opts(), group='artifact_type:' + type_name) cls.register(af_type) break else: raise exception.TypeNotFound(name=type_name) # Fill enabled_types for name, af_type in cls.obj_classes().items(): cls.enabled_types[af_type[0].get_type_name()] = af_type[0] @classmethod def get_artifact_type(cls, type_name): """Return artifact type based on artifact type name. :param type_name: name of artifact type :return: artifact class """ if type_name not in cls.enabled_types: raise exception.TypeNotFound(name=type_name) return cls.enabled_types[type_name] @classmethod def reset_registry(cls): """Resets all registered artifact type classes.""" cls._registry._obj_classes = collections.defaultdict(list) glare-0.5.0/glare/objects/meta/validators.py000066400000000000000000000262101317401036700210040ustar00rootroot00000000000000# Copyright 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import re import uuid from oslo_log import log as logging from oslo_versionedobjects import fields import six from glare.common import exception from glare.i18n import _ from glare.objects.meta import fields as glare_fields LOG = logging.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class Validator(object): """Common interface for all validators.""" @staticmethod @abc.abstractmethod def get_allowed_types(): raise NotImplementedError() def check_type_allowed(self, field_type): if not issubclass(field_type, self.get_allowed_types()): # try to check if field_type is correct # in case of element_type passed allowed_field_types = tuple(type(field.AUTO_TYPE) for field in self.get_allowed_types() if hasattr(field, 'AUTO_TYPE')) if not issubclass(field_type, allowed_field_types): raise exception.IncorrectArtifactType( _("%(type)s is not allowed for validator " "%(val)s. Allowed types are %(allowed)s.") % { "type": str(field_type), "val": str(self.__class__), "allowed": str(self.get_allowed_types())}) def to_jsonschema(self): return {} @abc.abstractmethod def __call__(self, value): raise NotImplemented class UUID(Validator): @staticmethod def get_allowed_types(): return fields.StringField, def __call__(self, value): uuid.UUID(value, version=4) def to_jsonschema(self): return {'pattern': ('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F])' '{4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$')} class AllowedValues(Validator): def __init__(self, allowed_values): self.allowed_values = allowed_values @staticmethod def get_allowed_types(): return fields.StringField, fields.IntegerField, fields.FloatField def __call__(self, value): if value not in self.allowed_values: raise ValueError(_("Value must be one of the following: %s") % ', '.join(map(str, self.allowed_values))) def to_jsonschema(self): return {'enum': self.allowed_values} class Version(Validator): @staticmethod def get_allowed_types(): return glare_fields.VersionField, def __call__(self, value): pass def to_jsonschema(self): return {'pattern': ('/^([0-9]+)\.([0-9]+)\.([0-9]+)(?:-([0-9A-Za-z-]' '+(?:\.[0-9A-Za-z-]+)*))?(?:\+[0-9A-Za-z-]+)?$/')} class Regex(Validator): def __init__(self, pattern): self.pattern = re.compile(pattern) @staticmethod def get_allowed_types(): return fields.StringField, def __call__(self, value): if not self.pattern.match(value): raise ValueError def to_jsonschema(self): return {'pattern': self.pattern.pattern} @six.add_metaclass(abc.ABCMeta) class SizeValidator(Validator): def __init__(self, size): self.size = size class MaxStrLen(SizeValidator): @staticmethod def get_allowed_types(): return fields.StringField, def __call__(self, value): l = len(value) if l > self.size: raise ValueError( _("String length must be less than %(size)d. " "Current length: %(cur)d") % {'size': self.size, 'cur': l}) def to_jsonschema(self): return {'maxLength': self.size} class MinStrLen(SizeValidator): @staticmethod def get_allowed_types(): return fields.StringField, def __call__(self, value): l = len(value) if l < self.size: raise ValueError( _("String length must be less than %(size)d. " "Current length: %(cur)d") % {'size': self.size, 'cur': l}) def to_jsonschema(self): return {'minLength': self.size} class ForbiddenChars(Validator): def __init__(self, forbidden_chars): self.forbidden_chars = forbidden_chars @staticmethod def get_allowed_types(): return fields.StringField, def __call__(self, value): for fc in self.forbidden_chars: if fc in value: raise ValueError( _("Forbidden character %(char)c found in string " "%(string)s") % {"char": fc, "string": value}) def to_jsonschema(self): return {'pattern': '^[^%s]+$' % ''.join(self.forbidden_chars)} @six.add_metaclass(abc.ABCMeta) class MaxSize(SizeValidator): def __call__(self, value): l = len(value) if l > self.size: raise ValueError( _("Number of items must be less than " "%(size)d. Current size: %(cur)d") % {'size': self.size, 'cur': l}) class MaxDictSize(MaxSize): @staticmethod def get_allowed_types(): return glare_fields.Dict, def to_jsonschema(self): return {'maxProperties': self.size} class MaxListSize(MaxSize): @staticmethod def get_allowed_types(): return glare_fields.List, def to_jsonschema(self): return {'maxItems': self.size} @six.add_metaclass(abc.ABCMeta) class MinSize(SizeValidator): def __call__(self, value): l = len(value) if l < self.size: raise ValueError( _("Number of items must be greater than " "%(size)d. Current size: %(cur)d") % {'size': self.size, 'cur': l}) class MinDictSize(MinSize): @staticmethod def get_allowed_types(): return glare_fields.Dict, def to_jsonschema(self): return {'minProperties': self.size} class MinListSize(MinSize): @staticmethod def get_allowed_types(): return glare_fields.List, def to_jsonschema(self): return {'minItems': self.size} class MaxNumberSize(SizeValidator): def __call__(self, value): if value > self.size: raise ValueError("Number is too big: %d. Max allowed number is " "%d" % (value, self.size)) @staticmethod def get_allowed_types(): return fields.IntegerField, fields.FloatField def to_jsonschema(self): return {'maximum': self.size} class MinNumberSize(SizeValidator): def __call__(self, value): if value < self.size: raise ValueError("Number is too small: %d. Min allowed number is " "%d" % (value, self.size)) @staticmethod def get_allowed_types(): return fields.IntegerField, fields.FloatField def to_jsonschema(self): return {'minimum': self.size} class Unique(Validator): def __init__(self, convert_to_set=False): self.convert_to_set = convert_to_set @staticmethod def get_allowed_types(): return glare_fields.List, def __call__(self, value): if self.convert_to_set: value[:] = list(set(value)) elif len(value) != len(set(value)): raise ValueError(_("List items %s must be unique.") % value) def to_jsonschema(self): return {'uniqueItems': True} class AllowedDictKeys(Validator): def __init__(self, allowed_keys): self.allowed_items = allowed_keys @staticmethod def get_allowed_types(): return glare_fields.Dict, def __call__(self, value): for item in value: if item not in self.allowed_items: raise ValueError(_("Key %(item)s is not allowed in dict. " "Allowed key values: %(allowed)s") % {"item": item, "allowed": ', '.join(self.allowed_items)}) def to_jsonschema(self): return { 'properties': {prop: {} for prop in self.allowed_items}, } class RequiredDictKeys(Validator): def __init__(self, required_keys): self.required_items = required_keys @staticmethod def get_allowed_types(): return glare_fields.Dict, def __call__(self, value): for item in self.required_items: if item not in value: raise ValueError(_("Key \"%(item)s\" is required in " "dictionary %(value)s.") % {"item": item, "value": ''.join( '{}:{}, '.format(key, val) for key, val in value.items())}) def to_jsonschema(self): return {'required': list(self.required_items)} class MaxDictKeyLen(SizeValidator): @staticmethod def get_allowed_types(): return glare_fields.Dict, def __call__(self, value): for key in value: if len(str(key)) > self.size: raise ValueError(_("Dict key length %(key)s must be less than " "%(size)d.") % {'key': key, 'size': self.size}) class MinDictKeyLen(SizeValidator): @staticmethod def get_allowed_types(): return glare_fields.Dict, def __call__(self, value): for key in value: if len(str(key)) < self.size: raise ValueError(_("Dict key length %(key)s must be bigger " "than %(size)d.") % {'key': key, 'size': self.size}) @six.add_metaclass(abc.ABCMeta) class ElementValidator(Validator): def __init__(self, validators): self.validators = validators class ListElementValidator(ElementValidator): @staticmethod def get_allowed_types(): return glare_fields.List, def __call__(self, value): for v in value: for validator in self.validators: validator(v) def to_jsonschema(self): return {'itemValidators': [ val.to_jsonschema() for val in self.validators ]} class DictElementValidator(ElementValidator): @staticmethod def get_allowed_types(): return glare_fields.Dict, def __call__(self, value): for v in value.values(): for validator in self.validators: validator(v) def to_jsonschema(self): return {'propertyValidators': [ val.to_jsonschema() for val in self.validators ]} glare-0.5.0/glare/objects/meta/wrappers.py000066400000000000000000000253231317401036700205030ustar00rootroot00000000000000# Copyright (c) 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """This file contains classes that wrap nat""" from oslo_versionedobjects import fields from glare.common import exception as exc from glare.objects.meta import fields as glare_fields from glare.objects.meta import validators as val_lib FILTERS = ( FILTER_EQ, FILTER_NEQ, FILTER_IN, FILTER_GT, FILTER_GTE, FILTER_LT, FILTER_LTE) = ('eq', 'neq', 'in', 'gt', 'gte', 'lt', 'lte') DEFAULT_MAX_BLOB_SIZE = 10485760 # 10 Megabytes DEFAULT_MAX_FOLDER_SIZE = 2673868800 # 2550 Megabytes class Field(object): def __init__(self, field_class, mutable=False, required_on_activate=True, system=False, validators=None, nullable=True, default=None, sortable=False, filter_ops=None, description=""): """Init and validate field. Each artifact field has several common properties: :param required_on_activate: boolean value indicating if the field value should be specified for the artifact before activation (Default:True). :param mutable: boolean value indicating if the field value may be changed after the artifact is activated. (Default: False) :param system: boolean value indicating if the field value cannot be edited by user (Default: False). :param sortable: boolean value indicating if there is a possibility to sort by this fields's values. (Default: False) Only fields of 4 primitive types may be sortable: integer, string, float and boolean. :param default: a default value for the field may be specified (Default: None). :param validators: a list of objects. When user sets a value to the field with additional validators Glare checks them before setting the value and raises ValueError if at least one of the requirements is not satisfied. :param filter_ops: a list of available filter operators for the field. There are seven available operators: 'eq', 'neq', 'lt', 'lte', 'gt', 'gte', 'in'. """ if not issubclass(field_class, fields.AutoTypedField): raise exc.IncorrectArtifactType( "Field class %s must be sub-class of AutoTypedField." % field_class) self.validators = validators or [] for v in self.validators: v.check_type_allowed(field_class) if isinstance(v, val_lib.MaxStrLen): if v.size > 255 and sortable: raise exc.IncorrectArtifactType( "It's forbidden to make field %(field)s " "sortable if string length can be more than 255 " "symbols. Maximal allowed length now: %(max)d" % {"field": str(field_class), 'max': v.size}) self.field_class = field_class self.nullable = nullable self.default = default self.vo_props = ['nullable', 'default'] self.mutable = mutable self.required_on_activate = required_on_activate self.system = system self.sortable = sortable try: default_ops = self.get_allowed_filter_ops(self.element_type) except AttributeError: default_ops = self.get_allowed_filter_ops(field_class) if filter_ops is None: self.filter_ops = default_ops else: for op in filter_ops: if op not in default_ops: raise exc.IncorrectArtifactType( "Incorrect filter operator '%s'. " "Only %s are allowed" % (op, ', '.join(default_ops))) self.filter_ops = filter_ops self.field_props = ['mutable', 'required_on_activate', 'system', 'sortable', 'filter_ops', 'description'] self.description = description @staticmethod def get_allowed_filter_ops(field): if field in (fields.StringField, fields.String): return [FILTER_EQ, FILTER_NEQ, FILTER_IN] elif field in (fields.IntegerField, fields.Integer, fields.FloatField, fields.Float, glare_fields.VersionField): return FILTERS elif field in (fields.FlexibleBooleanField, fields.FlexibleBoolean, glare_fields.Link, glare_fields.LinkFieldType): return [FILTER_EQ, FILTER_NEQ] elif field in (glare_fields.BlobField, glare_fields.BlobFieldType): return [] elif field is fields.DateTimeField: return [FILTER_LT, FILTER_GT] def get_default_validators(self): default = [] if issubclass(self.field_class, fields.StringField): # check if fields is string if not any(isinstance(v, val_lib.MaxStrLen) for v in self.validators) and \ not any(isinstance(v, val_lib.AllowedValues) for v in self.validators): default.append(val_lib.MaxStrLen(255)) return default def get_field(self): # init the field vo_props = {prop_name: getattr(self, prop_name) for prop_name in self.vo_props} field = self.field_class(**vo_props) # setup custom field properties field_props = {prop_name: getattr(self, prop_name) for prop_name in self.field_props} for prop, value in field_props.items(): setattr(field, prop, value) # apply custom validators vals = self.validators for def_val in self.get_default_validators(): for val in self.validators: if type(val) is type(def_val): break else: vals.append(def_val) def wrapper(coerce_func): def coerce_wrapper(obj, field, value): try: val = coerce_func(obj, field, value) if val is not None: for check_func in vals: check_func(val) return val except (KeyError, ValueError, TypeError) as e: msg = "Type: %s. Field: %s. Exception: %s" % ( obj.get_type_name(), field, str(e)) raise exc.BadRequest(message=msg) return coerce_wrapper field.coerce = wrapper(field.coerce) field.validators = vals return field @classmethod def init(cls, *args, **kwargs): """Fabric to build fields.""" return cls(*args, **kwargs).get_field() class CompoundField(Field): def __init__(self, field_class, element_type, element_validators=None, **kwargs): if element_type is None: raise exc.IncorrectArtifactType("'element_type' must be set for " "compound type.") self.element_type = element_type super(CompoundField, self).__init__(field_class, **kwargs) self.vo_props.append('element_type') self.field_props.append('element_type') self.element_validators = element_validators or [] if self.sortable: raise exc.IncorrectArtifactType("'sortable' must be False for " "compound type.") def get_element_validators(self): default_vals = [] if issubclass(self.element_type, fields.String): # check if fields is string if not any(isinstance(v, val_lib.MaxStrLen) for v in self.element_validators): default_vals.append(val_lib.MaxStrLen(255)) vals = default_vals + self.element_validators for v in vals: v.check_type_allowed(self.element_type) return default_vals + self.element_validators class ListField(CompoundField): def __init__(self, element_type, max_size=255, **kwargs): if 'default' not in kwargs: kwargs['default'] = [] if element_type is glare_fields.BlobField: raise exc.IncorrectArtifactType("List of blobs is not allowed " "to be specified in artifact.") super(ListField, self).__init__(glare_fields.List, element_type, **kwargs) self.validators.append(val_lib.MaxListSize(max_size)) def get_default_validators(self): default_vals = [] elem_val = val_lib.ListElementValidator( super(ListField, self).get_element_validators()) default_vals.append(elem_val) return default_vals class DictField(CompoundField): def __init__(self, element_type, max_size=255, **kwargs): if 'default' not in kwargs: kwargs['default'] = {} super(DictField, self).__init__(glare_fields.Dict, element_type, **kwargs) self.validators.append(val_lib.MaxDictSize(max_size)) def get_default_validators(self): default_vals = [] elem_val = val_lib.DictElementValidator( super(DictField, self).get_element_validators()) default_vals.append(elem_val) default_vals.append(val_lib.MaxDictKeyLen(255)) default_vals.append(val_lib.MinDictKeyLen(1)) return default_vals class BlobField(Field): def __init__(self, max_blob_size=DEFAULT_MAX_BLOB_SIZE, **kwargs): super(BlobField, self).__init__( field_class=glare_fields.BlobField, **kwargs) self.max_blob_size = int(max_blob_size) self.field_props.append('max_blob_size') class FolderField(DictField): def __init__(self, max_blob_size=DEFAULT_MAX_BLOB_SIZE, max_folder_size=DEFAULT_MAX_FOLDER_SIZE, **kwargs): super(FolderField, self).__init__( element_type=glare_fields.BlobFieldType, **kwargs) self.max_blob_size = int(max_blob_size) self.max_folder_size = int(max_folder_size) self.field_props.append('max_blob_size') self.field_props.append('max_folder_size') # Classes below added for backward compatibility. They shouldn't be used Attribute = Field CompoundAttribute = CompoundField ListAttribute = ListField DictAttribute = DictField BlobAttribute = BlobField BlobDictAttribute = FolderField glare-0.5.0/glare/objects/murano_package.py000066400000000000000000000047221317401036700206660ustar00rootroot00000000000000# Copyright 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields from glare.objects import base from glare.objects.meta import fields as glare_fields from glare.objects.meta import validators from glare.objects.meta import wrappers Field = wrappers.Field.init Blob = wrappers.BlobField.init List = wrappers.ListField.init Dict = wrappers.DictField.init class MuranoPackage(base.BaseArtifact): fields = { 'package': Blob(required_on_activate=False, description="Murano Package binary.", max_blob_size=104857600), 'type': Field(fields.StringField, validators=[validators.AllowedValues( ['Application', 'Library'])], default='Application', description="Package type."), 'display_name': Field(fields.StringField, mutable=True, description="Package name in human-readable " "format."), 'categories': List(fields.String, mutable=True, description="List of categories specified " "for the package."), 'class_definitions': List(fields.String, validators=[validators.Unique()], description="List of class definitions in " "the package."), 'inherits': Dict(fields.String), 'keywords': List(fields.String, mutable=True), 'dependencies': List(glare_fields.LinkFieldType, required_on_activate=False, description="List of package dependencies for " "this package."), } @classmethod def get_type_name(cls): return "murano_packages" glare-0.5.0/glare/objects/secret.py000066400000000000000000000062361317401036700172010ustar00rootroot00000000000000# Copyright 2017 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glare.objects import base as base_artifact from glare.objects.meta import validators from glare.objects.meta import wrappers from oslo_versionedobjects import fields Field = wrappers.Field.init Blob = wrappers.BlobField.init Dict = wrappers.DictField.init Folder = wrappers.FolderField.init class Secret(base_artifact.BaseArtifact): """The purpose this glare artifact, Secret, is to enable the user to store 'secret' data such as: Private key, Certificate, Password, SSH keys Etc. """ VERSION = '1.0' @classmethod def get_type_name(cls): return "secrets" fields = { 'payload': Blob( # The encrypted secret data description="The secret's data to be stored" ), 'payload_content_encoding': Field( fields.StringField, required_on_activate=False, default="base64", filter_ops=[], validators=[validators.AllowedValues(["base64"])], description="Required if payload is encoded. " "The encoding used for the payload to be" " able to include it in the JSON request " "(only base64 supported)" ), 'secret_type': Field( fields.StringField, required_on_activate=False, default="opaque", sortable=True, filter_ops=(wrappers.FILTER_EQ,), validators=[validators.AllowedValues([ "symmetric", "public", "private", "passphrase", "certificate", "opaque"])], description="Used to indicate the type of secret being stored", ), 'algorithm': Field( fields.StringField, required_on_activate=False, filter_ops=(wrappers.FILTER_EQ,), description="Metadata provided by a user or system for" " informational purposes" ), 'bit_length': Field( fields.IntegerField, required_on_activate=False, sortable=True, validators=[validators.MinNumberSize(1)], description="Metadata provided by a user or system" " for informational purposes." " Value must be greater than zero." ), 'mode': Field( fields.StringField, required_on_activate=False, filter_ops=(wrappers.FILTER_EQ,), description="Metadata provided by a user or" " system for informational purposes."), } glare-0.5.0/glare/objects/tosca_template.py000066400000000000000000000021621317401036700207120ustar00rootroot00000000000000# Copyright 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields from glare.objects import base from glare.objects.meta import wrappers Field = wrappers.Field.init Blob = wrappers.BlobField.init class TOSCATemplate(base.BaseArtifact): fields = { 'template_format': Field(fields.StringField, description="TOSCA template format."), 'template': Blob(description="TOSCA template body.") } @classmethod def get_type_name(cls): return "tosca_templates" glare-0.5.0/glare/opts.py000066400000000000000000000042211317401036700152400ustar00rootroot00000000000000# Copyright (c) 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = [ 'list_artifacts_opts' ] import copy import itertools from osprofiler import opts as profiler import glare.api.middleware.context import glare.api.middleware.keycloak_auth import glare.api.v1.resource import glare.api.versions import glare.common.config import glare.common.wsgi import glare.notification import glare.objects.base from glare.objects.meta import registry import glare.scrubber _artifacts_opts = [ (None, list(itertools.chain( glare.api.middleware.context.context_opts, glare.api.v1.resource.list_configs, glare.api.versions.versions_opts, glare.common.config.common_opts, glare.common.wsgi.bind_opts, glare.common.wsgi.eventlet_opts, glare.common.wsgi.socket_opts, glare.notification.notifier_opts, glare.objects.base.global_artifact_opts, registry.registry_options))), profiler.list_opts()[0], ('paste_deploy', glare.common.config.paste_deploy_opts), ('keycloak_oidc', glare.api.middleware.keycloak_auth.keycloak_oidc_opts), ('scrubber', glare.scrubber.scrubber_opts + glare.scrubber.scrubber_cmd_opts + glare.scrubber.scrubber_cmd_cli_opts) ] registry.ArtifactRegistry.register_all_artifacts() for af_type in registry.ArtifactRegistry.obj_classes().values(): _artifacts_opts.append( ('artifact_type:' + af_type[0].get_type_name(), af_type[0].list_artifact_type_opts())) def list_artifacts_opts(): """Return a list of oslo_config options available in Glare""" return [(g, copy.deepcopy(o)) for g, o in _artifacts_opts] glare-0.5.0/glare/quota.py000066400000000000000000000124271317401036700154130ustar00rootroot00000000000000# Copyright 2017 - Nokia Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from glare.common import exception from glare.db.sqlalchemy import api from glare.i18n import _ CONF = cfg.CONF def verify_artifact_count(context, type_name): """Verify if user can upload data based on his quota limits. :param context: user context :param type_name: name of artifact type """ global_limit = CONF.max_artifact_number type_limit = getattr( CONF, 'artifact_type:' + type_name).max_artifact_number # update limits if they were reassigned for project project_id = context.tenant quotas = list_quotas(project_id).get(project_id, {}) if 'max_artifact_number' in quotas: global_limit = quotas['max_artifact_number'] if 'max_artifact_number:' + type_name in quotas: type_limit = quotas['max_artifact_number:' + type_name] session = api.get_session() if global_limit != -1: # the whole amount of created artifacts whole_number = api.count_artifact_number(context, session) if whole_number >= global_limit: msg = _("Can't create artifact because of global quota " "limit is %(global_limit)d artifacts. " "You have %(whole_number)d artifact(s).") % { 'global_limit': global_limit, 'whole_number': whole_number} raise exception.Forbidden(msg) if type_limit != -1: # the amount of artifacts for specific type type_number = api.count_artifact_number( context, session, type_name) if type_number >= type_limit: msg = _("Can't create artifact because of quota limit for " "artifact type '%(type_name)s' is %(type_limit)d " "artifacts. You have %(type_number)d artifact(s) " "of this type.") % { 'type_name': type_name, 'type_limit': type_limit, 'type_number': type_number} raise exception.Forbidden(msg) def verify_uploaded_data_amount(context, type_name, data_amount=None): """Verify if user can upload data based on his quota limits. :param context: user context :param type_name: name of artifact type :param data_amount: number of bytes user wants to upload. Value None means that user hasn't specified data amount. In this case don't raise an exception, but just return the amount of data he is able to upload. :return: number of bytes user can upload if data_amount isn't specified """ global_limit = CONF.max_uploaded_data type_limit = getattr(CONF, 'artifact_type:' + type_name).max_uploaded_data # update limits if they were reassigned for project project_id = context.tenant quotas = list_quotas(project_id).get(project_id, {}) if 'max_uploaded_data' in quotas: global_limit = quotas['max_uploaded_data'] if 'max_uploaded_data:' + type_name in quotas: type_limit = quotas['max_uploaded_data:' + type_name] session = api.get_session() res = -1 if global_limit != -1: # the whole amount of created artifacts whole_number = api.calculate_uploaded_data(context, session) if data_amount is None: res = global_limit - whole_number elif whole_number + data_amount > global_limit: msg = _("Can't upload %(data_amount)d byte(s) because of global " "quota limit: %(global_limit)d. " "You have %(whole_number)d bytes uploaded.") % { 'data_amount': data_amount, 'global_limit': global_limit, 'whole_number': whole_number} raise exception.RequestEntityTooLarge(msg) if type_limit != -1: # the amount of artifacts for specific type type_number = api.calculate_uploaded_data( context, session, type_name) if data_amount is None: available = type_limit - type_number res = available if res == -1 else min(res, available) elif type_number + data_amount > type_limit: msg = _("Can't upload %(data_amount)d byte(s) because of " "quota limit for artifact type '%(type_name)s': " "%(type_limit)d. You have %(type_number)d bytes " "uploaded for this type.") % { 'data_amount': data_amount, 'type_name': type_name, 'type_limit': type_limit, 'type_number': type_number} raise exception.RequestEntityTooLarge(msg) return res def set_quotas(values): session = api.get_session() api.set_quotas(values, session) def list_quotas(project_id=None): session = api.get_session() return api.get_all_quotas(session, project_id) glare-0.5.0/glare/scrubber.py000066400000000000000000000141021317401036700160610ustar00rootroot00000000000000# Copyright 2017 - Nokia Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import eventlet from oslo_config import cfg from oslo_log import log as logging from glare.api.middleware import context from glare.common import exception from glare.common import store_api from glare.db.sqlalchemy import api as db_api from glare.i18n import _ LOG = logging.getLogger(__name__) scrubber_opts = [ cfg.IntOpt('scrub_time', default=0, min=0, help=_(""" The amount of time, in seconds, to delay artifact scrubbing. When delayed delete is turned on, an artifact is put into ``deleted`` state upon deletion until the scrubber deletes its data. Typically, soon after the artifact is put into ``deleted`` state, it is available for scrubbing. However, scrubbing can be delayed until a later point using this configuration option. This option denotes the time period an artifact spends in ``deleted`` state before it is available for scrubbing. It is important to realize that this has storage implications. The larger the ``scrub_time``, the longer the time to reclaim backend storage from deleted artifacts. Possible values: * Any non-negative integer Related options: * ``delayed_delete`` """)), cfg.IntOpt('scrub_pool_size', default=1, min=1, help=_(""" The size of thread pool to be used for scrubbing artifacts. When there are a large number of artifacts to scrub, it is beneficial to scrub artifacts in parallel so that the scrub queue stays in control and the backend storage is reclaimed in a timely fashion. This configuration option denotes the maximum number of artifacts to be scrubbed in parallel. The default value is one, which signifies serial scrubbing. Any value above one indicates parallel scrubbing. Possible values: * Any non-zero positive integer Related options: * ``delayed_delete`` """)), ] scrubber_cmd_opts = [ cfg.IntOpt('wakeup_time', default=300, min=0, help=_(""" Time interval, in seconds, between scrubber runs in daemon mode. Scrubber can be run either as a cron job or daemon. When run as a daemon, this configuration time specifies the time period between two runs. When the scrubber wakes up, it fetches and scrubs all ``deleted`` artifacts that are available for scrubbing after taking ``scrub_time`` into consideration. If the ``wakeup_time`` is set to a large number, there may be a large number of artifacts to be scrubbed for each run. Also, this impacts how quickly the backend storage is reclaimed. Possible values: * Any non-negative integer Related options: * ``daemon`` * ``delayed_delete`` """)) ] scrubber_cmd_cli_opts = [ cfg.BoolOpt('daemon', short='D', default=False, help=_(""" Run scrubber as a daemon. This boolean configuration option indicates whether scrubber should run as a long-running process that wakes up at regular intervals to scrub artifacts. The wake up interval can be specified using the configuration option ``wakeup_time``. If this configuration option is set to ``False``, which is the default value, scrubber runs once to scrub artifacts and exits. In this case, if the operator wishes to implement continuous scrubbing of artifacts, scrubber needs to be scheduled as a cron job. Possible values: * True * False Related options: * ``wakeup_time`` """)) ] CONF = cfg.CONF CONF.register_opts(scrubber_opts, group='scrubber') class Daemon(object): def __init__(self, wakeup_time=300, threads=100): LOG.info("Starting Daemon: wakeup_time=%(wakeup_time)s " "threads=%(threads)s", {'wakeup_time': wakeup_time, 'threads': threads}) self.wakeup_time = wakeup_time self.event = eventlet.event.Event() # This pool is used for periodic instantiation of scrubber self.daemon_pool = eventlet.greenpool.GreenPool(threads) def start(self, application): self._run(application) def wait(self): try: self.event.wait() except KeyboardInterrupt: LOG.info("Daemon Shutdown on KeyboardInterrupt") def _run(self, application): LOG.debug("Running scrubber application") self.daemon_pool.spawn_n(application.run, self.event) eventlet.spawn_after(self.wakeup_time, self._run, application) LOG.debug("Next run scheduled in %s seconds", self.wakeup_time) class Scrubber(object): def __init__(self): self.context = context.RequestContext() self.context.is_admin = True self.pool = eventlet.greenpool.GreenPool( CONF.scrubber.scrub_pool_size) def run(self, event=None): while True: artifacts = db_api._get_all( context=self.context, session=db_api.get_session(), limit=CONF.scrubber.scrub_pool_size, sort=[], filters=[('status', None, 'eq', None, 'deleted')]) if not artifacts: break self.pool.imap(self._scrub_artifact, artifacts) @staticmethod def _scrub_artifact(af): LOG.info("Begin scrubbing of artifact %s", af.id) for blob in af.blobs: if not blob.external: try: store_api.delete_blob(blob.url, context=context) except exception.NotFound: # data has already been removed pass LOG.info("Blobs successfully deleted for artifact %s", af.id) # delete artifact itself db_api.delete(context, af.id, db_api.get_session()) LOG.info("Artifact %s was scrubbed", af.id) glare-0.5.0/glare/store/000077500000000000000000000000001317401036700150365ustar00rootroot00000000000000glare-0.5.0/glare/store/__init__.py000066400000000000000000000000001317401036700171350ustar00rootroot00000000000000glare-0.5.0/glare/store/base_api.py000066400000000000000000000027151317401036700171600ustar00rootroot00000000000000# Copyright 2017 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class BaseStoreAPI(object): def add_to_backend(self, context, blob_id, data, verifier=None): """Save data to database store type and return location info :param blob_id: id of artifact :param data: file iterator :param context: user context :param verifier:signature verified :return: database location uri """ raise NotImplementedError() def get_from_store(self, uri, context): """Load file from database store :param uri: blob uri :param context: user context :return: file iterator """ raise NotImplementedError() def delete_from_store(self, uri, context): """Delete blob from database store :param uri: blob uri :param context: user context """ raise NotImplementedError() glare-0.5.0/glare/store/database.py000066400000000000000000000026711317401036700171620ustar00rootroot00000000000000# Copyright 2017 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glare.db.sqlalchemy import api as db_api from glare.store import base_api class DatabaseStoreAPI(base_api.BaseStoreAPI): """Class that stores all data in sql database.""" def add_to_backend(self, blob_id, data, context, verifier=None): session = db_api.get_session() return db_api.save_blob_data(context, blob_id, data, session) def add_to_backend_batch(self, blobs, context, verifier=None): session = db_api.get_session() return db_api.save_blob_data_batch(context, blobs, session) def get_from_store(self, uri, context): session = db_api.get_session() return db_api.get_blob_data(context, uri, session) def delete_from_store(self, uri, context): session = db_api.get_session() return db_api.delete_blob_data(context, uri, session) glare-0.5.0/glare/tests/000077500000000000000000000000001317401036700150445ustar00rootroot00000000000000glare-0.5.0/glare/tests/__init__.py000066400000000000000000000000001317401036700171430ustar00rootroot00000000000000glare-0.5.0/glare/tests/etc/000077500000000000000000000000001317401036700156175ustar00rootroot00000000000000glare-0.5.0/glare/tests/etc/policy.json000066400000000000000000000000021317401036700200010ustar00rootroot00000000000000{}glare-0.5.0/glare/tests/functional/000077500000000000000000000000001317401036700172065ustar00rootroot00000000000000glare-0.5.0/glare/tests/functional/__init__.py000066400000000000000000000624671317401036700213360ustar00rootroot00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Base test class for running non-stubbed tests (functional tests) The FunctionalTest class contains helper methods for starting Glare server, grabbing the logs of each, cleaning up pidfiles, and spinning down the server. """ import atexit import datetime import errno import os import platform import shutil import signal import socket import sys import tempfile import time import eventlet import fixtures from oslo_log import log as logging from oslo_serialization import jsonutils # NOTE(jokke): simplified transition to py3, behaves like py2 xrange from six.moves import range import six.moves.urllib.parse as urlparse import testtools from glare.api.v1 import resource from glare.api.v1 import router from glare.common import utils from glare.common import wsgi from glare.db.sqlalchemy import api as db_api from glare import tests as glare_tests from glare.tests import utils as test_utils execute, get_unused_port = test_utils.execute, test_utils.get_unused_port tracecmd_osmap = {'Linux': 'strace', 'FreeBSD': 'truss'} eventlet.patcher.monkey_patch() class Server(object): """Class used to easily manage starting and stopping a server during functional test runs. """ def __init__(self, test_dir, port, sock=None): """Creates a new Server object. :param test_dir: The directory where all test stuff is kept. This is passed from the FunctionalTestCase. :param port: The port to start a server up on. """ self.debug = True self.no_venv = False self.test_dir = test_dir self.bind_port = port self.conf_file_name = None self.conf_base = None self.paste_conf_base = None self.exec_env = None self.deployment_flavor = '' self.needs_database = False self.log_file = None self.sock = sock self.fork_socket = True self.process_pid = None self.server_module = None self.stop_kill = False def write_conf(self, **kwargs): """Writes the configuration file for the server to its intended destination. Returns the name of the configuration file and the over-ridden config content (may be useful for populating error messages). """ if not self.conf_base: raise RuntimeError("Subclass did not populate config_base!") conf_override = self.__dict__.copy() if kwargs: conf_override.update(**kwargs) # A config file and paste.ini to use just for this test...we don't want # to trample on currently-running Glare servers, now do we? conf_dir = os.path.join(self.test_dir, 'etc') conf_filepath = os.path.join(conf_dir, "%s.conf" % self.server_name) if os.path.exists(conf_filepath): os.unlink(conf_filepath) paste_conf_filepath = conf_filepath.replace(".conf", "-paste.ini") if os.path.exists(paste_conf_filepath): os.unlink(paste_conf_filepath) test_utils.safe_mkdirs(conf_dir) def override_conf(filepath, overridden): with open(filepath, 'w') as conf_file: conf_file.write(overridden) conf_file.flush() return conf_file.name overridden_core = self.conf_base % conf_override self.conf_file_name = override_conf(conf_filepath, overridden_core) overridden_paste = '' if self.paste_conf_base: overridden_paste = self.paste_conf_base % conf_override override_conf(paste_conf_filepath, overridden_paste) overridden = ('==Core config==\n%s\n==Paste config==\n%s' % (overridden_core, overridden_paste)) return self.conf_file_name, overridden def start(self, expect_exit=True, expected_exitcode=0, **kwargs): """Starts the server. Any kwargs passed to this method will override the configuration value in the conf file used in starting the servers. """ # Ensure the configuration file is written self.write_conf(**kwargs) self.create_database() cmd = ("%(server_module)s --config-file %(conf_file_name)s" % {"server_module": self.server_module, "conf_file_name": self.conf_file_name}) cmd = "%s -m %s" % (sys.executable, cmd) # close the sock and release the unused port closer to start time if self.exec_env: exec_env = self.exec_env.copy() else: exec_env = {} pass_fds = set() if self.sock: if not self.fork_socket: self.sock.close() self.sock = None else: fd = os.dup(self.sock.fileno()) exec_env[utils.GLARE_TEST_SOCKET_FD_STR] = str(fd) pass_fds.add(fd) self.sock.close() self.process_pid = test_utils.fork_exec(cmd, logfile=os.devnull, exec_env=exec_env, pass_fds=pass_fds) self.stop_kill = not expect_exit if self.pid_file: with open(self.pid_file, 'w') as pf: pf.write('%d\n' % self.process_pid) if not expect_exit: rc = 0 try: os.kill(self.process_pid, 0) except OSError: raise RuntimeError("The process did not start") else: rc = test_utils.wait_for_fork( self.process_pid, expected_exitcode=expected_exitcode) # avoid an FD leak if self.sock: os.close(fd) self.sock = None return (rc, '', '') def reload(self, expect_exit=True, expected_exitcode=0, **kwargs): """Start and stop the service to reload Any kwargs passed to this method will override the configuration value in the conf file used in starting the servers. """ self.stop() return self.start(expect_exit=expect_exit, expected_exitcode=expected_exitcode, **kwargs) def create_database(self): """Create database if required for this server""" if self.needs_database: conf_dir = os.path.join(self.test_dir, 'etc') test_utils.safe_mkdirs(conf_dir) conf_filepath = os.path.join(conf_dir, 'glare.conf') glare_db_env = 'GLARE_DB_TEST_SQLITE_FILE' if glare_db_env in os.environ: # use the empty db created and cached as a tempfile # instead of spending the time creating a new one db_location = os.environ[glare_db_env] os.system('cp %s %s/tests.sqlite' % (db_location, self.test_dir)) else: cmd = ('%s -m glare.cmd.db_manage --config-file %s upgrade' % (sys.executable, conf_filepath)) execute(cmd, no_venv=self.no_venv, exec_env=self.exec_env, expect_exit=True) # copy the clean db to a temp location so that it # can be reused for future tests (osf, db_location) = tempfile.mkstemp() os.close(osf) os.system('cp %s/tests.sqlite %s' % (self.test_dir, db_location)) os.environ[glare_db_env] = db_location # cleanup the temp file when the test suite is # complete def _delete_cached_db(): try: os.remove(os.environ[glare_db_env]) except Exception: glare_tests.logger.exception( "Error cleaning up the file %s" % os.environ[glare_db_env]) atexit.register(_delete_cached_db) def stop(self): """Spin down the server.""" if not self.process_pid: raise Exception('why is this being called? %s' % self.server_name) if self.stop_kill: os.kill(self.process_pid, signal.SIGTERM) rc = test_utils.wait_for_fork(self.process_pid, raise_error=False) return (rc, '', '') def dump_log(self, name): log = logging.getLogger(name) if not self.log_file or not os.path.exists(self.log_file): return with open(self.log_file, 'r') as fptr: for line in fptr: log.info(line.strip()) class GlareServer(Server): """Server object that starts/stops/manages Glare server""" def __init__(self, test_dir, port, policy_file, delayed_delete=False, pid_file=None, sock=None, **kwargs): super(GlareServer, self).__init__(test_dir, port, sock=sock) self.server_name = 'glare' self.server_module = 'glare.cmd.api' self.default_store = kwargs.get("default_store", "file") self.key_file = "" self.cert_file = "" self.blob_dir = os.path.join(self.test_dir, "artifacts") self.pid_file = pid_file or os.path.join(self.test_dir, "glare.pid") self.log_file = os.path.join(self.test_dir, "glare.log") self.delayed_delete = delayed_delete self.workers = 1 self.policy_file = policy_file self.policy_default_rule = 'default' self.disable_path = None self.needs_database = True default_sql_connection = 'sqlite:////%s/tests.sqlite' % self.test_dir self.sql_connection = os.environ.get('GLARE_TEST_SQL_CONNECTION', default_sql_connection) self.lock_path = self.test_dir self.enabled_artifact_types = '' self.custom_artifact_types_modules = '' self.max_uploaded_data = '1099511627776' self.max_artifact_number = '100' self.artifact_type_section = '' self.conf_base = """[DEFAULT] debug = %(debug)s default_log_levels = eventlet.wsgi.server=DEBUG bind_host = 127.0.0.1 bind_port = %(bind_port)s key_file = %(key_file)s cert_file = %(cert_file)s log_file = %(log_file)s delayed_delete = %(delayed_delete)s workers = %(workers)s lock_path = %(lock_path)s enabled_artifact_types = %(enabled_artifact_types)s custom_artifact_types_modules = %(custom_artifact_types_modules)s max_uploaded_data = %(max_uploaded_data)s max_artifact_number = %(max_artifact_number)s [oslo_policy] policy_file = %(policy_file)s policy_default_rule = %(policy_default_rule)s [paste_deploy] flavor = %(deployment_flavor)s [glance_store] filesystem_store_datadir=%(blob_dir)s default_store = %(default_store)s [database] connection = %(sql_connection)s %(artifact_type_section)s """ self.paste_conf_base = """[pipeline:glare-api] pipeline = faultwrapper versionnegotiation trustedauth glarev1api [pipeline:glare-api-noauth] pipeline = faultwrapper versionnegotiation context glarev1api [app:glarev1api] paste.app_factory = glare.tests.functional:TestRouter.factory [filter:faultwrapper] paste.filter_factory = glare.api.middleware.fault:GlareFaultWrapperFilter.factory [filter:versionnegotiation] paste.filter_factory = glare.api.middleware.version_negotiation: GlareVersionNegotiationFilter.factory [filter:context] paste.filter_factory = glare.api.middleware.context:ContextMiddleware.factory [filter:trustedauth] paste.filter_factory = glare.api.middleware.context:TrustedAuthMiddleware.factory """ class ScrubberDaemon(Server): """ Server object that starts/stops/manages the Scrubber server """ def __init__(self, test_dir, policy_file, daemon=False, **kwargs): # NOTE(jkoelker): Set the port to 0 since we actually don't listen super(ScrubberDaemon, self).__init__(test_dir, 0) self.server_name = 'scrubber' self.server_module = 'glare.cmd.%s' % self.server_name self.daemon = daemon self.blob_dir = os.path.join(self.test_dir, "artifacts") self.scrub_time = 5 self.pid_file = os.path.join(self.test_dir, "scrubber.pid") self.log_file = os.path.join(self.test_dir, "scrubber.log") self.lock_path = self.test_dir default_sql_connection = 'sqlite:////%s/tests.sqlite' % self.test_dir self.sql_connection = os.environ.get('GLARE_TEST_SQL_CONNECTION', default_sql_connection) self.policy_file = policy_file self.policy_default_rule = 'default' self.conf_base = """[DEFAULT] debug = %(debug)s log_file = %(log_file)s [scrubber] daemon = %(daemon)s wakeup_time = 2 scrub_time = %(scrub_time)s [glance_store] filesystem_store_datadir=%(blob_dir)s [oslo_policy] policy_file = %(policy_file)s policy_default_rule = %(policy_default_rule)s [database] connection = %(sql_connection)s idle_timeout = 3600 """ def start(self, expect_exit=True, expected_exitcode=0, **kwargs): if 'daemon' in kwargs: expect_exit = False return super(ScrubberDaemon, self).start( expect_exit=expect_exit, expected_exitcode=expected_exitcode, **kwargs) class FunctionalTest(test_utils.BaseTestCase): """Base test class for any test that wants to test the actual servers and clients and not just the stubbed out interfaces """ inited = False disabled = False launched_servers = [] def setUp(self): super(FunctionalTest, self).setUp() self.test_dir = self.useFixture(fixtures.TempDir()).path self.api_protocol = 'http' self.glare_port, glare_sock = test_utils.get_unused_port_and_socket() self.include_scrubber = False self.tracecmd = tracecmd_osmap.get(platform.system()) conf_dir = os.path.join(self.test_dir, 'etc') test_utils.safe_mkdirs(conf_dir) self.copy_data_file('policy.json', conf_dir) self.policy_file = os.path.join(conf_dir, 'policy.json') self.glare_server = GlareServer(self.test_dir, self.glare_port, self.policy_file, sock=glare_sock) self.scrubber_daemon = ScrubberDaemon(self.test_dir, self.policy_file) self.pid_files = [self.glare_server.pid_file, self.scrubber_daemon.pid_file] self.files_to_destroy = [] self.launched_servers = [] def tearDown(self): if not self.disabled: self.cleanup() # We destroy the test data store between each test case, # and recreate it, which ensures that we have no side-effects # from the tests self._reset_database(self.glare_server.sql_connection) super(FunctionalTest, self).tearDown() self.glare_server.dump_log('glare_server') self.scrubber_daemon.dump_log('scrubber_daemon') def set_policy_rules(self, rules): with open(self.policy_file, 'w') as fap: fap.write(jsonutils.dumps(rules)) def _reset_database(self, conn_string): conn_pieces = urlparse.urlparse(conn_string) if conn_string.startswith('sqlite'): # We leave behind the sqlite DB for failing tests to aid # in diagnosis, as the file size is relatively small and # won't interfere with subsequent tests as it's in a per- # test directory (which is blown-away if the test is green) pass elif conn_string.startswith('mysql'): # We can execute the MySQL client to destroy and re-create # the MYSQL database, which is easier and less error-prone # than using SQLAlchemy to do this via MetaData...trust me. database = conn_pieces.path.strip('/') loc_pieces = conn_pieces.netloc.split('@') host = loc_pieces[1] auth_pieces = loc_pieces[0].split(':') user = auth_pieces[0] password = "" if len(auth_pieces) > 1: if auth_pieces[1].strip(): password = "-p%s" % auth_pieces[1] sql = ("drop database if exists %(database)s; " "create database %(database)s;") % {'database': database} cmd = ("mysql -u%(user)s %(password)s -h%(host)s " "-e\"%(sql)s\"") % {'user': user, 'password': password, 'host': host, 'sql': sql} exitcode, out, err = execute(cmd) self.assertEqual(0, exitcode) def cleanup(self): """Makes sure anything we created or started up in the tests are destroyed or spun down """ # NOTE(jbresnah) call stop on each of the servers instead of # checking the pid file. stop() will wait until the child # server is dead. This eliminates the possibility of a race # between a child process listening on a port actually dying # and a new process being started servers = [self.glare_server, self.scrubber_daemon] for s in servers: try: s.stop() except Exception: pass for f in self.files_to_destroy: if os.path.exists(f): os.unlink(f) def start_server(self, server, expect_launch, expect_exit=True, expected_exitcode=0, **kwargs): """Starts a server on an unused port. Any kwargs passed to this method will override the configuration value in the conf file used in starting the server. :param server: the server to launch :param expect_launch: true iff the server is expected to successfully start :param expect_exit: true iff the launched process is expected to exit in a timely fashion :param expected_exitcode: expected exitcode from the launcher """ self.cleanup() # Start up the requested server exitcode, out, err = server.start(expect_exit=expect_exit, expected_exitcode=expected_exitcode, **kwargs) if expect_exit: self.assertEqual(expected_exitcode, exitcode, "Failed to spin up the requested server. " "Got: %s" % err) self.launched_servers.append(server) launch_msg = self.wait_for_servers([server], expect_launch) self.assertTrue(launch_msg is None, launch_msg) def start_with_retry(self, server, port_name, max_retries, expect_launch=True, **kwargs): """Starts a server, with retries if the server launches but fails to start listening on the expected port. :param server: the server to launch :param port_name: the name of the port attribute :param max_retries: the maximum number of attempts :param expect_launch: true iff the server is expected to successfully start :param expect_exit: true iff the launched process is expected to exit in a timely fashion """ launch_msg = None for i in range(max_retries): exitcode, out, err = server.start(expect_exit=not expect_launch, **kwargs) name = server.server_name self.assertEqual(0, exitcode, "Failed to spin up the %s server. " "Got: %s" % (name, err)) launch_msg = self.wait_for_servers([server], expect_launch) if launch_msg: server.stop() server.bind_port = get_unused_port() setattr(self, port_name, server.bind_port) else: self.launched_servers.append(server) break self.assertTrue(launch_msg is None, launch_msg) def start_servers(self, **kwargs): """Starts the Glare server on unused port. Any kwargs passed to this method will override the configuration value in the conf file used in starting the servers. """ self.cleanup() self.start_with_retry(self.glare_server, 'glare_port', 3, **kwargs) if self.include_scrubber: exitcode, out, err = self.scrubber_daemon.start(**kwargs) self.assertEqual(0, exitcode, "Failed to spin up the Scrubber daemon. " "Got: %s" % err) def ping_server(self, port): """Simple ping on the port. If responsive, return True, else return False. :note We use raw sockets, not ping here, since ping uses ICMP and has no concept of ports... """ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.connect(("127.0.0.1", port)) return True except socket.error: return False finally: s.close() def wait_for_servers(self, servers, expect_launch=True, timeout=30): """Tight loop, waiting for the given server port(s) to be available. Returns when all are pingable. There is a timeout on waiting for the servers to come up. :param servers: Glare server ports to ping :param expect_launch: Optional, true iff the server(s) are expected to successfully start :param timeout: Optional, defaults to 30 seconds :returns: None if launch expectation is met, otherwise an assertion message """ now = datetime.datetime.now() timeout_time = now + datetime.timedelta(seconds=timeout) replied = [] while (timeout_time > now): pinged = 0 for server in servers: if self.ping_server(server.bind_port): pinged += 1 if server not in replied: replied.append(server) if pinged == len(servers): msg = 'Unexpected server launch status' return None if expect_launch else msg now = datetime.datetime.now() time.sleep(0.05) failed = list(set(servers) - set(replied)) msg = 'Unexpected server launch status for: ' for f in failed: msg += ('%s, ' % f.server_name) if os.path.exists(f.pid_file): pid = f.process_pid trace = f.pid_file.replace('.pid', '.trace') if self.tracecmd: cmd = '%s -p %d -o %s' % (self.tracecmd, pid, trace) try: execute(cmd, raise_error=False, expect_exit=False) except OSError as e: if e.errno == errno.ENOENT: raise RuntimeError('No executable found for "%s" ' 'command.' % self.tracecmd) else: raise time.sleep(0.5) if os.path.exists(trace): msg += ('\n%s:\n%s\n' % (self.tracecmd, open(trace).read())) self.add_log_details(failed) return msg if expect_launch else None def stop_server(self, server): """Called to stop a single server in a normal fashion. :param server: the server to stop """ # Spin down the requested server server.stop() def stop_servers(self): self.stop_server(self.glare_server) if self.include_scrubber: self.stop_server(self.scrubber_daemon) self._reset_database(self.glare_server.sql_connection) def run_sql_cmd(self, sql): """Provides a crude mechanism to run manual SQL commands for backend DB verification within the functional tests. The raw result set is returned. """ engine = db_api.get_engine() return engine.execute(sql) def copy_data_file(self, file_name, dst_dir): src_file_name = os.path.join('glare/tests/etc', file_name) shutil.copy(src_file_name, dst_dir) dst_file_name = os.path.join(dst_dir, file_name) return dst_file_name def add_log_details(self, servers=None): logs = [s.log_file for s in (servers or self.launched_servers)] for log in logs: if os.path.exists(log): testtools.content.attach_file(self, log) class TestRouter(router.API): def _get_artifacts_resource(self): deserializer = resource.RequestDeserializer() serializer = resource.ResponseSerializer() controller = resource.ArtifactsController() return wsgi.Resource(controller, deserializer, serializer) glare-0.5.0/glare/tests/functional/base.py000066400000000000000000000146211317401036700204760ustar00rootroot00000000000000# Copyright (c) 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from oslo_utils import uuidutils import requests from glare.tests import functional def sort_results(lst, target='name'): return sorted(lst, key=lambda x: x[target]) class TestArtifact(functional.FunctionalTest): enabled_types = (u'sample_artifact', u'images', u'heat_templates', u'heat_environments', u'tosca_templates', u'murano_packages', u'all') users = { 'user1': { 'id': uuidutils.generate_uuid(), 'tenant_id': uuidutils.generate_uuid(), 'token': uuidutils.generate_uuid(), 'role': 'member' }, 'user2': { 'id': uuidutils.generate_uuid(), 'tenant_id': uuidutils.generate_uuid(), 'token': uuidutils.generate_uuid(), 'role': 'member' }, 'admin': { 'id': uuidutils.generate_uuid(), 'tenant_id': uuidutils.generate_uuid(), 'token': uuidutils.generate_uuid(), 'role': 'admin' }, 'anonymous': { 'id': None, 'tenant_id': None, 'token': None, 'role': None } } def setUp(self): super(TestArtifact, self).setUp() self.set_user('user1') self.glare_server.deployment_flavor = 'noauth' self.glare_server.enabled_artifact_types = ','.join( self.enabled_types) self.glare_server.custom_artifact_types_modules = ( 'glare.tests.sample_artifact') self.start_servers(**self.__dict__.copy()) def tearDown(self): self.stop_servers() self._reset_database(self.glare_server.sql_connection) super(TestArtifact, self).tearDown() def _url(self, path): if path.startswith('/schemas') or \ path.startswith('/quotas') or \ path.startswith('/project-quotas'): return 'http://127.0.0.1:%d%s' % (self.glare_port, path) else: return 'http://127.0.0.1:%d/artifacts%s' % (self.glare_port, path) def set_user(self, username): if username not in self.users: raise KeyError self.current_user = username def _headers(self, custom_headers=None): base_headers = { 'X-Identity-Status': 'Confirmed', 'X-Auth-Token': self.users[self.current_user]['token'], 'X-User-Id': self.users[self.current_user]['id'], 'X-Tenant-Id': self.users[self.current_user]['tenant_id'], 'X-Project-Id': self.users[self.current_user]['tenant_id'], 'X-Roles': self.users[self.current_user]['role'], } base_headers.update(custom_headers or {}) return base_headers def create_artifact(self, data=None, status=201, type_name='sample_artifact'): return self.post('/' + type_name, data or {}, status=status) def _check_artifact_method(self, method, url, data=None, status=200, headers=None): if not headers: headers = self._headers() else: headers = self._headers(headers) headers.setdefault("Content-Type", "application/json") if 'application/json' in headers['Content-Type'] and data is not None: data = jsonutils.dumps(data) response = getattr(requests, method)(self._url(url), headers=headers, data=data) self.assertEqual(status, response.status_code, response.text) if status >= 400: return response.text if ("application/json" in response.headers["content-type"] or "application/schema+json" in response.headers["content-type"]): return jsonutils.loads(response.text) return response.text def post(self, url, data=None, status=201, headers=None): return self._check_artifact_method("post", url, data, status=status, headers=headers) def get(self, url, status=200, headers=None): return self._check_artifact_method("get", url, status=status, headers=headers) def delete(self, url, status=204, headers=None): return self._check_artifact_method("delete", url, status=status, headers=headers) def patch(self, url, data, status=200, headers=None): if headers is None: headers = {} if 'Content-Type' not in headers: headers.update({'Content-Type': 'application/json-patch+json'}) return self._check_artifact_method("patch", url, data, status=status, headers=headers) def put(self, url, data=None, status=200, headers=None): return self._check_artifact_method("put", url, data, status=status, headers=headers) # the test cases below are written in accordance with use cases # each test tries to cover separate use case in Glare # all code inside each test tries to cover all operators and data # involved in use case execution # each tests represents part of artifact lifecycle # so we can easily define where is the failed code make_active = [{"op": "replace", "path": "/status", "value": "active"}] make_deactivated = [{"op": "replace", "path": "/status", "value": "deactivated"}] make_public = [{"op": "replace", "path": "/visibility", "value": "public"}] def admin_action(self, artifact_id, body, status=200, type_name='sample_artifact'): cur_user = self.current_user self.set_user('admin') url = '/%s/%s' % (type_name, artifact_id) af = self.patch(url=url, data=body, status=status) self.set_user(cur_user) return af glare-0.5.0/glare/tests/functional/test_all.py000066400000000000000000000104431317401036700213710ustar00rootroot00000000000000# Copyright (c) 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glare.tests.functional import base class TestAll(base.TestArtifact): def test_all(self): for type_name in self.enabled_types: if type_name == 'all': continue for i in range(3): for j in range(3): self.create_artifact( data={'name': '%s_%d' % (type_name, i), 'version': '%d' % j, 'tags': ['tag%s' % i]}, type_name=type_name) # get all possible artifacts url = '/all?sort=name:asc&limit=100' res = self.get(url=url, status=200)['all'] self.assertEqual(54, len(res)) # get artifacts with latest versions url = '/all?version=latest&sort=name:asc' res = self.get(url=url, status=200)['all'] self.assertEqual(18, len(res)) for art in res: self.assertEqual('2.0.0', art['version']) # get images only url = '/all?type_name=images&sort=name:asc' res = self.get(url=url, status=200)['all'] self.assertEqual(9, len(res)) for art in res: self.assertEqual('images', art['type_name']) # get images and heat_templates url = '/all?type_name=in:images,heat_templates&sort=name:asc' res = self.get(url=url, status=200)['all'] self.assertEqual(18, len(res)) for art in res: self.assertIn(art['type_name'], ('images', 'heat_templates')) def test_all_readonlyness(self): self.create_artifact(data={'name': 'all'}, type_name='all', status=403) art = self.create_artifact(data={'name': 'image'}, type_name='images') url = '/all/%s' % art['id'] # update 'all' is forbidden data = [{ "op": "replace", "path": "/description", "value": "text" }] self.patch(url=url, data=data, status=403) # activation is forbidden data = [{ "op": "replace", "path": "/status", "value": "active" }] self.patch(url=url, data=data, status=403) # publishing is forbidden data = [{ "op": "replace", "path": "/visibility", "value": "public" }] self.patch(url=url, data=data, status=403) # get is okay new_art = self.get(url=url) self.assertEqual(new_art['id'], art['id']) def test_format_all(self): # Test that we used right output formatting for each type art1 = self.create_artifact(data={'name': 'aaa'}) # Sample artifact adds metadata that contains its name in upper case self.assertEqual('AAA', art1['__some_meta_information__']) # 'Image' doesn't art2 = self.create_artifact( data={'name': 'aaa'}, type_name='images') self.assertEqual('aaa', art2['name']) # fetch all artifacts url = '/all?sort=created_at:asc' res = self.get(url=url, status=200)['all'] self.assertEqual(2, len(res)) self.assertEqual('sample_artifact', res[0]['type_name']) self.assertEqual('AAA', res[0]['__some_meta_information__']) self.assertEqual('images', res[1]['type_name']) self.assertNotIn('__some_meta_information__', res[1]) # fetch artifacts by id url = '/all/%s' % art1['id'] res = self.get(url=url, status=200) self.assertEqual('sample_artifact', res['type_name']) self.assertEqual('AAA', res['__some_meta_information__']) url = '/all/%s' % art2['id'] res = self.get(url=url, status=200) self.assertEqual('images', res['type_name']) self.assertNotIn('__some_meta_information__', res) glare-0.5.0/glare/tests/functional/test_database_store.py000066400000000000000000000162721317401036700236070ustar00rootroot00000000000000# Copyright 2017 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib import requests from glare.tests.functional import base class TestMultiStore(base.TestArtifact): def setUp(self): base.functional.FunctionalTest.setUp(self) self.set_user('user1') self.glare_server.deployment_flavor = 'noauth' self.glare_server.enabled_artifact_types = 'sample_artifact' self.glare_server.custom_artifact_types_modules = ( 'glare.tests.sample_artifact') self.glare_server.artifact_type_section = """ [artifact_type:sample_artifact] delayed_delete = False default_store = database """ self.start_servers(**self.__dict__.copy()) def test_blob_dicts(self): # Getting empty artifact list url = '/sample_artifact' response = self.get(url=url, status=200) expected = {'first': '/artifacts/sample_artifact', 'sample_artifact': [], 'schema': '/schemas/sample_artifact'} self.assertEqual(expected, response) # Create a test artifact art = self.create_artifact(status=201, data={'name': 'test', 'version': '1.0', 'string_required': '123'}) self.assertIsNotNone(art['id']) # Get the artifact which should have a generated id and status # 'drafted' url = '/sample_artifact/%s' % art['id'] art_1 = self.get(url=url, status=200) self.assertIsNotNone(art_1['id']) self.assertEqual('drafted', art_1['status']) # Upload data to blob dict headers = {'Content-Type': 'application/octet-stream'} data = "data" * 100 self.put(url=url + '/dict_of_blobs/new_blob', data=data, status=200, headers=headers) # Download data from blob dict self.assertEqual(data, self.get(url=url + '/dict_of_blobs/new_blob', status=200)) # download blob from undefined dict property self.get(url=url + '/not_a_dict/not_a_blob', status=400) def test_blob_upload(self): # create artifact with blob data = 'data' self.create_artifact( data={'name': 'test_af', 'blob': data, 'version': '0.0.1'}, status=400) art = self.create_artifact(data={'name': 'test_af', 'version': '0.0.1', 'string_required': 'test'}) url = '/sample_artifact/%s' % art['id'] headers = {'Content-Type': 'application/octet-stream'} # upload to non-existing property self.put(url=url + '/blob_non_exist', data=data, status=400, headers=headers) # upload too big value big_data = "this is the smallest big data" self.put(url=url + '/small_blob', data=big_data, status=413, headers=headers) # upload correct blob value self.put(url=url + '/small_blob', data=big_data[:2], headers=headers) # Upload artifact via different user self.set_user('user2') self.put(url=url + '/blob', data=data, status=404, headers=headers) # Upload file to the artifact self.set_user('user1') art = self.put(url=url + '/blob', data=data, status=200, headers=headers) self.assertEqual('active', art['blob']['status']) self.assertEqual('application/octet-stream', art['blob']['content_type']) self.assertIn('url', art['blob']) self.assertNotIn('id', art['blob']) # reUpload file to artifact self.put(url=url + '/blob', data=data, status=409, headers=headers) # upload blob dict self.put(url + '/dict_of_blobs/test_key', data=data, headers=headers) # test re-upload failed self.put(url + '/dict_of_blobs/test_key', data=data, headers=headers, status=409) # upload few other blobs to the dict for elem in ('aaa', 'bbb', 'ccc', 'ddd'): self.put(url + '/dict_of_blobs/' + elem, data=data, headers=headers) # upload to active artifact self.patch(url, self.make_active) self.put(url + '/dict_of_blobs/key2', data=data, status=403, headers=headers) self.delete(url) def test_blob_download(self): data = 'some_arbitrary_testing_data' art = self.create_artifact(data={'name': 'test_af', 'version': '0.0.1'}) url = '/sample_artifact/%s' % art['id'] # download not uploaded blob self.get(url=url + '/blob', status=404) # download blob from not existing artifact self.get(url=url + '1/blob', status=404) # download blob from undefined property self.get(url=url + '/not_a_blob', status=400) headers = {'Content-Type': 'application/octet-stream'} art = self.put(url=url + '/blob', data=data, status=200, headers=headers) self.assertEqual('active', art['blob']['status']) md5 = hashlib.md5(data.encode('UTF-8')).hexdigest() sha1 = hashlib.sha1(data.encode('UTF-8')).hexdigest() sha256 = hashlib.sha256(data.encode('UTF-8')).hexdigest() self.assertEqual(md5, art['blob']['md5']) self.assertEqual(sha1, art['blob']['sha1']) self.assertEqual(sha256, art['blob']['sha256']) # check that content-length is in response response = requests.get(self._url(url + '/blob'), headers=self._headers()) self.assertEqual('27', response.headers["content-length"]) # check that all checksums are in response self.assertEqual('0825587cc011b7e76381b65e19d5ec27', response.headers["Content-MD5"]) self.assertEqual('89eb4b969b721ba8c3aff18ad7d69454f651a697', response.headers["X-Openstack-Glare-Content-SHA1"]) self.assertEqual('bbfd48c7ec792fc462e58232d4d9f407' 'ecefb75cc9e9823336166556b499ea4d', response.headers["X-Openstack-Glare-Content-SHA256"]) blob_data = self.get(url=url + '/blob') self.assertEqual(data, blob_data) # download artifact via admin self.set_user('admin') blob_data = self.get(url=url + '/blob') self.assertEqual(data, blob_data) # try to download blob via different user self.set_user('user2') self.get(url=url + '/blob', status=404) glare-0.5.0/glare/tests/functional/test_quotas.py000066400000000000000000000656751317401036700221560ustar00rootroot00000000000000# Copyright 2017 - Nokia Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from glare.tests.functional import base class TestQuotasAPI(base.TestArtifact): """Test quotas REST API.""" def setUp(self): base.functional.FunctionalTest.setUp(self) self.glare_server.deployment_flavor = 'noauth' self.glare_server.max_uploaded_data = '10000' self.glare_server.max_artifact_number = '150' self.glare_server.enabled_artifact_types = 'images,' \ 'heat_templates,' \ 'murano_packages,' \ 'sample_artifact' self.glare_server.custom_artifact_types_modules = ( 'glare.tests.sample_artifact') self.glare_server.artifact_type_section = """ [artifact_type:sample_artifact] max_uploaded_data = 3000 [artifact_type:images] max_uploaded_data = 15000 max_artifact_number = 30 [artifact_type:heat_templates] max_artifact_number = 150 [artifact_type:murano_packages] max_uploaded_data = 10000 max_artifact_number = 100 """ self.start_servers(**self.__dict__.copy()) def test_quota_api_wrong(self): self.set_user('admin') url = '/quotas' # try to set wrong values values = [{"project1": "value1"}] self.put(url=url, data=values, status=400) # no quota name values = [ { "project_id": "project1", "project_quotas": [ { "quota_value": 10 } ] } ] self.put(url=url, data=values, status=400) # no quota value values = [ { "project_id": "project1", "project_quotas": [ { "quota_name": "max_artifact_number", } ] } ] self.put(url=url, data=values, status=400) # no project id values = [ { "project_quotas": [ { "quota_name": "max_artifact_number", "quota_value": 10 } ] } ] self.put(url=url, data=values, status=400) # no project quotas values = [ { "project_id": "project1", } ] self.put(url=url, data=values, status=400) # quota name has more than 1 : values = [ { "project_id": "project1", "project_quotas": [ { "quota_name": "max:artifact:number", "quota_value": 10 } ] } ] self.put(url=url, data=values, status=400) # too long quota name values = [ { "project_id": "project1", "project_quotas": [ { "quota_name": "a" * 256, "quota_value": 10 } ] } ] self.put(url=url, data=values, status=400) # too long project name values = [ { "project_id": "a" * 256, "project_quotas": [ { "quota_name": "max_artifact_number", "quota_value": 10 } ] } ] self.put(url=url, data=values, status=400) # negative quota value less than -1 values = [ { "project_id": "project1", "project_quotas": [ { "quota_name": "max_artifact_number", "quota_value": -2 } ] } ] self.put(url=url, data=values, status=400) # non-integer quota value values = [ { "project_id": "project1", "project_quotas": [ { "quota_name": "max_artifact_number", "quota_value": "AAA" } ] } ] self.put(url=url, data=values, status=400) values = [ { "project_id": "project1", "project_quotas": [ { "quota_name": "max_artifact_number", "quota_value": 10.5 } ] } ] self.put(url=url, data=values, status=400) @staticmethod def _deserialize_quotas(quotas): values = {} for item in quotas: project_id = item['project_id'] values[project_id] = {} for quota in item['project_quotas']: values[project_id][quota['quota_name']] = quota['quota_value'] return values def test_quota_api(self): self.set_user('admin') user1_tenant_id = self.users['user1']['tenant_id'] user2_tenant_id = self.users['user2']['tenant_id'] admin_tenant_id = self.users['admin']['tenant_id'] values = [ { "project_id": user1_tenant_id, "project_quotas": [ { "quota_name": "max_artifact_number:images", "quota_value": 3 }, { "quota_name": "max_artifact_number:heat_templates", "quota_value": 15 }, { "quota_name": "max_artifact_number:murano_packages", "quota_value": 10 }, { "quota_name": "max_artifact_number", "quota_value": 10 } ] }, { "project_id": user2_tenant_id, "project_quotas": [ { "quota_name": "max_artifact_number", "quota_value": 10 } ] }, { "project_id": admin_tenant_id, "project_quotas": [ { "quota_name": "max_artifact_number", "quota_value": 10 } ] } ] url = '/quotas' # define several quotas self.put(url=url, data=values) # get all quotas res = self.get(url=url) global_quotas = res['global_quotas'] self.assertEqual({ 'max_artifact_number': 150, 'max_artifact_number:heat_templates': 150, 'max_artifact_number:images': 30, 'max_artifact_number:murano_packages': 100, 'max_uploaded_data': 10000, 'max_uploaded_data:images': 15000, 'max_uploaded_data:murano_packages': 10000, 'max_uploaded_data:sample_artifact': 3000}, global_quotas) self.assertEqual(self._deserialize_quotas(values), self._deserialize_quotas(res['quotas'])) # get user1 quotas res = self._deserialize_quotas(self.get( url='/project-quotas/' + user1_tenant_id)) self.assertEqual({user1_tenant_id: { 'max_artifact_number': 10, 'max_artifact_number:heat_templates': 15, 'max_artifact_number:images': 3, 'max_artifact_number:murano_packages': 10, 'max_uploaded_data': 10000, 'max_uploaded_data:images': 15000, 'max_uploaded_data:murano_packages': 10000, 'max_uploaded_data:sample_artifact': 3000}}, res) # get admin quotas res = self._deserialize_quotas(self.get(url='/project-quotas')) self.assertEqual({admin_tenant_id: { 'max_artifact_number': 10, 'max_artifact_number:heat_templates': 150, 'max_artifact_number:images': 30, 'max_artifact_number:murano_packages': 100, 'max_uploaded_data': 10000, 'max_uploaded_data:images': 15000, 'max_uploaded_data:murano_packages': 10000, 'max_uploaded_data:sample_artifact': 3000}}, res) # user1 can't set quotas self.set_user('user1') self.put(url=url, data=values, status=403) self.get(url=url, status=403) # user1 can get his quotas res = self._deserialize_quotas(self.get(url='/project-quotas')) self.assertEqual({user1_tenant_id: { 'max_artifact_number': 10, 'max_artifact_number:heat_templates': 15, 'max_artifact_number:images': 3, 'max_artifact_number:murano_packages': 10, 'max_uploaded_data': 10000, 'max_uploaded_data:images': 15000, 'max_uploaded_data:murano_packages': 10000, 'max_uploaded_data:sample_artifact': 3000}}, res) # user1 can't get user2 quotas self.get(url='/project-quotas/' + user2_tenant_id, status=403) class TestStaticQuotas(base.TestArtifact): """Test static quota limits.""" def setUp(self): base.functional.FunctionalTest.setUp(self) self.set_user('user1') self.glare_server.deployment_flavor = 'noauth' self.glare_server.max_uploaded_data = '1000' self.glare_server.max_artifact_number = '10' self.glare_server.enabled_artifact_types = 'images,' \ 'heat_templates,' \ 'murano_packages,' \ 'sample_artifact' self.glare_server.custom_artifact_types_modules = ( 'glare.tests.sample_artifact') self.glare_server.artifact_type_section = """ [artifact_type:sample_artifact] default_store = database max_uploaded_data = 300 [artifact_type:images] max_uploaded_data = 1500 max_artifact_number = 3 [artifact_type:heat_templates] max_artifact_number = 15 [artifact_type:murano_packages] max_uploaded_data = 1000 max_artifact_number = 10 """ self.start_servers(**self.__dict__.copy()) def test_count_artifact_number(self): # initially there are no artifacts result = self.get('/all') self.assertEqual([], result['all']) # create 3 images for user1 for i in range(3): img = self.create_artifact( data={'name': 'img%d' % i}, type_name='images') # creation of another image fails because of artifact type limit self.create_artifact( data={'name': 'img4'}, type_name='images', status=403) # create 7 murano packages for i in range(7): self.create_artifact( data={'name': 'mp%d' % i}, type_name='murano_packages') # creation of another package fails because of global limit self.create_artifact( data={'name': 'mp8'}, type_name='murano_packages', status=403) # delete an image and create another murano package work self.delete('/images/%s' % img['id']) self.create_artifact( data={'name': 'mp8'}, type_name='murano_packages') # admin can create his own artifacts self.set_user('admin') for i in range(10): self.create_artifact( data={'name': 'ht%d' % i}, type_name='heat_templates') # creation of another heat template fails because of global limit self.create_artifact( data={'name': 'ht11'}, type_name='heat_templates', status=403) def test_calculate_uploaded_data(self): headers = {'Content-Type': 'application/octet-stream'} # initially there are no artifacts result = self.get('/all') self.assertEqual([], result['all']) # create 2 sample artifacts for user1 art1 = self.create_artifact(data={'name': 'art1'}) art2 = self.create_artifact(data={'name': 'art2'}) # create 2 images for user1 img1 = self.create_artifact(data={'name': 'img1'}, type_name='images') img2 = self.create_artifact(data={'name': 'img2'}, type_name='images') # upload to art1 fails now because of type limit data = 'a' * 301 self.put(url='/sample_artifact/%s/blob' % art1['id'], data=data, status=413, headers=headers) # upload to img1 fails now because of global limit data = 'a' * 1001 self.put(url='/images/%s/image' % img1['id'], data=data, status=413, headers=headers) # upload 300 bytes to 'blob' of art1 data = 'a' * 300 self.put(url='/sample_artifact/%s/blob' % art1['id'], data=data, headers=headers) # upload another blob to art1 fails because of type limit self.put(url='/sample_artifact/%s/dict_of_blobs/blob' % art1['id'], data='a', status=413, headers=headers) # upload to art2 fails now because of type limit self.put(url='/sample_artifact/%s/dict_of_blobs/blob' % art2['id'], data='a', status=413, headers=headers) # delete art1 and check that upload to art2 works data = 'a' * 300 self.delete('/sample_artifact/%s' % art1['id']) self.put(url='/sample_artifact/%s/dict_of_blobs/blob' % art2['id'], data=data, headers=headers) # upload 700 bytes to img1 works data = 'a' * 700 self.put(url='/images/%s/image' % img1['id'], data=data, headers=headers) # upload to img2 fails because of global limit self.put(url='/images/%s/image' % img2['id'], data='a', status=413, headers=headers) # admin can upload data to his images self.set_user('admin') img1 = self.create_artifact(data={'name': 'img1'}, type_name='images') data = 'a' * 1000 self.put(url='/images/%s/image' % img1['id'], data=data, headers=headers) class TestDynamicQuotas(base.TestArtifact): """Test dynamic quota limits.""" def setUp(self): base.functional.FunctionalTest.setUp(self) self.glare_server.deployment_flavor = 'noauth' self.glare_server.enabled_artifact_types = 'images,' \ 'heat_templates,' \ 'murano_packages,' \ 'sample_artifact' self.glare_server.custom_artifact_types_modules = ( 'glare.tests.sample_artifact') self.start_servers(**self.__dict__.copy()) def test_count_artifact_number(self): self.set_user('admin') user1_tenant_id = self.users['user1']['tenant_id'] admin_tenant_id = self.users['admin']['tenant_id'] values = [ { "project_id": user1_tenant_id, "project_quotas": [ { "quota_name": "max_artifact_number:images", "quota_value": 3 }, { "quota_name": "max_artifact_number:heat_templates", "quota_value": 15 }, { "quota_name": "max_artifact_number:murano_packages", "quota_value": 10 }, { "quota_name": "max_artifact_number", "quota_value": 10 } ] }, { "project_id": admin_tenant_id, "project_quotas": [ { "quota_name": "max_artifact_number", "quota_value": 10 } ] } ] url = '/quotas' # define several quotas self.put(url=url, data=values) self.set_user('user1') # initially there are no artifacts result = self.get('/all') self.assertEqual([], result['all']) # create 3 images for user1 for i in range(3): img = self.create_artifact( data={'name': 'img%d' % i}, type_name='images') # creation of another image fails because of artifact type limit self.create_artifact( data={'name': 'img4'}, type_name='images', status=403) # create 7 murano packages for i in range(7): self.create_artifact( data={'name': 'mp%d' % i}, type_name='murano_packages') # creation of another package fails because of global limit self.create_artifact( data={'name': 'mp8'}, type_name='murano_packages', status=403) # delete an image and create another murano package work self.delete('/images/%s' % img['id']) self.create_artifact( data={'name': 'mp8'}, type_name='murano_packages') # admin can create his own artifacts self.set_user('admin') for i in range(10): self.create_artifact( data={'name': 'ht%d' % i}, type_name='heat_templates') # creation of another heat template fails because of global limit self.create_artifact( data={'name': 'ht11'}, type_name='heat_templates', status=403) # disable global limit for user1 and try to create 15 heat templates values = [ { "project_id": user1_tenant_id, "project_quotas": [ { "quota_name": "max_artifact_number:images", "quota_value": 3 }, { "quota_name": "max_artifact_number:heat_templates", "quota_value": 15 }, { "quota_name": "max_artifact_number:murano_packages", "quota_value": 10 }, { "quota_name": "max_artifact_number", "quota_value": -1 } ] } ] url = '/quotas' self.put(url=url, data=values) self.set_user("user1") for i in range(15): self.create_artifact( data={'name': 'ht%d' % i}, type_name='heat_templates') # creation of another heat template fails because of type limit self.create_artifact( data={'name': 'ht16'}, type_name='heat_templates', status=403) self.set_user("admin") # disable type limit for heat templates and create 1 heat templates values = [ { "project_id": user1_tenant_id, "project_quotas": [ { "quota_name": "max_artifact_number:images", "quota_value": 3 }, { "quota_name": "max_artifact_number:heat_templates", "quota_value": -1 }, { "quota_name": "max_artifact_number:murano_packages", "quota_value": 10 }, { "quota_name": "max_artifact_number", "quota_value": -1 } ] } ] url = '/quotas' self.put(url=url, data=values) # now user1 can create another heat template self.set_user("user1") self.create_artifact( data={'name': 'ht16'}, type_name='heat_templates') def test_calculate_uploaded_data(self): self.set_user('admin') user1_tenant_id = self.users['user1']['tenant_id'] admin_tenant_id = self.users['admin']['tenant_id'] values = [ { "project_id": user1_tenant_id, "project_quotas": [ { "quota_name": "max_uploaded_data:images", "quota_value": 1500 }, { "quota_name": "max_uploaded_data:sample_artifact", "quota_value": 300 }, { "quota_name": "max_uploaded_data:murano_packages", "quota_value": 1000 }, { "quota_name": "max_uploaded_data", "quota_value": 1000 } ] }, { "project_id": admin_tenant_id, "project_quotas": [ { "quota_name": "max_uploaded_data", "quota_value": 1000 } ] } ] url = '/quotas' # define several quotas self.put(url=url, data=values) headers = {'Content-Type': 'application/octet-stream'} self.set_user('user1') # initially there are no artifacts result = self.get('/all') self.assertEqual([], result['all']) # create 2 sample artifacts for user1 art1 = self.create_artifact(data={'name': 'art1'}) art2 = self.create_artifact(data={'name': 'art2'}) # create 3 images for user1 img1 = self.create_artifact(data={'name': 'img1'}, type_name='images') img2 = self.create_artifact(data={'name': 'img2'}, type_name='images') img3 = self.create_artifact(data={'name': 'img3'}, type_name='images') # upload to art1 fails now because of type limit data = 'a' * 301 self.put(url='/sample_artifact/%s/blob' % art1['id'], data=data, status=413, headers=headers) # upload to img1 fails now because of global limit data = 'a' * 1001 self.put(url='/images/%s/image' % img1['id'], data=data, status=413, headers=headers) # upload 300 bytes to 'blob' of art1 data = 'a' * 300 self.put(url='/sample_artifact/%s/blob' % art1['id'], data=data, headers=headers) # upload another blob to art1 fails because of type limit self.put(url='/sample_artifact/%s/dict_of_blobs/blob' % art1['id'], data='a', status=413, headers=headers) # upload to art2 fails now because of type limit self.put(url='/sample_artifact/%s/dict_of_blobs/blob' % art2['id'], data='a', status=413, headers=headers) # delete art1 and check that upload to art2 works data = 'a' * 300 self.delete('/sample_artifact/%s' % art1['id']) self.put(url='/sample_artifact/%s/dict_of_blobs/blob' % art2['id'], data=data, headers=headers) # upload 700 bytes to img1 works data = 'a' * 700 self.put(url='/images/%s/image' % img1['id'], data=data, headers=headers) # upload to img2 fails because of global limit self.put(url='/images/%s/image' % img2['id'], data='a', status=413, headers=headers) # admin can upload data to his images self.set_user('admin') img1 = self.create_artifact(data={'name': 'img1'}, type_name='images') data = 'a' * 1000 self.put(url='/images/%s/image' % img1['id'], data=data, headers=headers) # disable global limit and try upload data from user1 again values = [ { "project_id": user1_tenant_id, "project_quotas": [ { "quota_name": "max_uploaded_data:images", "quota_value": 1500 }, { "quota_name": "max_uploaded_data:sample_artifact", "quota_value": 300 }, { "quota_name": "max_uploaded_data:murano_packages", "quota_value": 1000 }, { "quota_name": "max_uploaded_data", "quota_value": -1 } ] } ] url = '/quotas' self.put(url=url, data=values) self.set_user("user1") data = 'a' * 800 self.put(url='/images/%s/image' % img2['id'], data=data, headers=headers) # uploading more fails because of image type limit data = 'a' self.put(url='/images/%s/image' % img3['id'], data=data, headers=headers, status=413) # disable type limit and try upload data from user1 again self.set_user("admin") values = [ { "project_id": user1_tenant_id, "project_quotas": [ { "quota_name": "max_uploaded_data:images", "quota_value": -1 }, { "quota_name": "max_uploaded_data:sample_artifact", "quota_value": 300 }, { "quota_name": "max_uploaded_data:murano_packages", "quota_value": 1000 }, { "quota_name": "max_uploaded_data", "quota_value": -1 } ] } ] url = '/quotas' self.put(url=url, data=values) self.set_user("user1") data = 'a' * 1000 self.put(url='/images/%s/image' % img3['id'], data=data, headers=headers) glare-0.5.0/glare/tests/functional/test_sample_artifact.py000066400000000000000000003047571317401036700237750ustar00rootroot00000000000000# Copyright 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib import uuid from oslo_serialization import jsonutils import requests from glare.tests.functional import base def sort_results(lst, target='name'): return sorted(lst, key=lambda x: x[target]) class TestList(base.TestArtifact): def test_list_marker_and_limit(self): # Create artifacts art_list = [self.create_artifact({'name': 'name%s' % i, 'version': '1.0', 'tags': ['tag%s' % i], 'int1': 1024 + i, 'float1': 123.456, 'str1': 'bugaga', 'bool1': True}) for i in range(5)] # sort by 'next' url url = '/sample_artifact?limit=1&sort=int1:asc,name:desc' result = self.get(url=url) self.assertEqual([art_list[0]], result['sample_artifact']) marker = result['next'] result = self.get(url=marker[10:]) self.assertEqual([art_list[1]], result['sample_artifact']) # sort by custom marker url = '/sample_artifact?sort=int1:asc&marker=%s' % art_list[1]['id'] result = self.get(url=url) self.assertEqual(art_list[2:], result['sample_artifact']) url = '/sample_artifact?sort=int1:desc&marker=%s' % art_list[1]['id'] result = self.get(url=url) self.assertEqual(art_list[:1], result['sample_artifact']) url = '/sample_artifact' \ '?sort=float1:asc,name:desc&marker=%s' % art_list[1]['id'] result = self.get(url=url) self.assertEqual([art_list[0]], result['sample_artifact']) # paginate by name in desc order with limit 2 url = '/sample_artifact?limit=2&sort=name:desc' result = self.get(url=url) self.assertEqual(art_list[4:2:-1], result['sample_artifact']) marker = result['next'] result = self.get(url=marker[10:]) self.assertEqual(art_list[2:0:-1], result['sample_artifact']) marker = result['next'] result = self.get(url=marker[10:]) self.assertEqual([art_list[0]], result['sample_artifact']) def test_list_base_filters(self): # Create artifact art_list = [self.create_artifact({'name': 'name%s' % i, 'version': '1.0', 'tags': ['tag%s' % i], 'int1': 1024, 'float1': 123.456, 'str1': 'bugaga', 'bool1': True}) for i in range(5)] public_art = self.create_artifact({'name': 'name5', 'version': '1.0', 'tags': ['tag4', 'tag5'], 'int1': 2048, 'float1': 987.654, 'str1': 'lalala', 'bool1': False, 'string_required': '123'}) url = '/sample_artifact/%s' % public_art['id'] data = [{ "op": "replace", "path": "/status", "value": "active" }] self.patch(url=url, data=data, status=200) public_art = self.admin_action(public_art['id'], self.make_public) art_list.append(public_art) art_list.sort(key=lambda x: x['name']) url = '/sample_artifact?str1=bla:empty' self.get(url=url, status=400) url = '/sample_artifact?str1=bla:empty' self.get(url=url, status=400) url = '/sample_artifact?name=name0' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual([art_list[0]], result) url = '/sample_artifact?tags=tag4' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual(art_list[4:], result) url = '/sample_artifact?name=eq:name0' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual(art_list[:1], result) url = '/sample_artifact?str1=eq:bugaga' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual(art_list[:5], result) url = '/sample_artifact?int1=eq:2048' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual(art_list[5:], result) url = '/sample_artifact?float1=eq:123.456' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual(art_list[:5], result) url = '/sample_artifact?name=neq:name0' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual(art_list[1:], result) url = '/sample_artifact?name=in:name,name0' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual(art_list[:1], result) url = '/sample_artifact?name=in:not_exist,name0' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual(art_list[:1], result) url = '/sample_artifact?name=not_exist' result = self.get(url=url)['sample_artifact'] self.assertEqual([], result) url = '/sample_artifact?name=bla:name1' self.get(url=url, status=400) url = '/sample_artifact?name=' self.get(url=url, status=400) url = '/sample_artifact?name=eq:' self.get(url=url, status=400) url = '/sample_artifact?tags=tag4,tag5' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual(art_list[5:], result) url = '/sample_artifact?tags-any=tag4' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual(art_list[4:], result) url = '/sample_artifact?tags=tag4,tag_not_exist,tag5' result = self.get(url=url)['sample_artifact'] self.assertEqual([], result) url = '/sample_artifact?tags-any=tag4,tag_not_exist,tag5' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual(art_list[4:], result) url = '/sample_artifact?tags=tag_not_exist,tag_not_exist_1' result = self.get(url=url)['sample_artifact'] self.assertEqual([], result) url = '/sample_artifact?tags' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual(art_list, result) url = '/sample_artifact?tags=' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual(art_list, result) url = '/sample_artifact?tags=eq:tag0' self.get(url=url, status=400) url = '/sample_artifact?tags=bla:tag0' self.get(url=url, status=400) url = '/sample_artifact?tags=neq:tag1' self.get(url=url, status=400) url = '/sample_artifact?visibility=private' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual(art_list[:5], result) url = '/sample_artifact?visibility=public' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual(art_list[5:], result) url = '/sample_artifact?visibility=eq:private' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual(art_list[:5], result) url = '/sample_artifact?visibility=eq:public' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual(art_list[5:], result) url = '/sample_artifact?visibility=neq:private' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual(art_list[5:], result) url = '/sample_artifact?visibility=neq:public' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual(art_list[:5], result) url = '/sample_artifact?visibility=blabla' self.get(url=url, status=400) url = '/sample_artifact?visibility=neq:blabla' self.get(url=url, status=400) url = '/sample_artifact?name=eq:name0&name=name1&tags=tag1' result = self.get(url=url)['sample_artifact'] self.assertEqual([], result) url = '/sample_artifact?int1=gt:2000' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual(art_list[5:], result) url = '/sample_artifact?int1=lte:1024' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual(art_list[:5], result) url = '/sample_artifact?int1=gt:1000&int1=lt:2000' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual(art_list[:5], result) url = '/sample_artifact?int1=lt:2000' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual(art_list[:5], result) url = '/sample_artifact?float1=gt:200.000' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual(art_list[5:], result) url = '/sample_artifact?float1=gt:100.00&float1=lt:200.00' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual(art_list[:5], result) url = '/sample_artifact?float1=lt:200.00' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual(art_list[:5], result) url = '/sample_artifact?float1=lt:200' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual(art_list[:5], result) url = '/sample_artifact?float1=lte:123.456' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual(art_list[:5], result) url = '/sample_artifact?bool1=True' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual(art_list[:5], result) url = '/sample_artifact?bool1=False' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual(art_list[5:], result) def test_artifact_list_dict_filters(self): lists_of_str = [ ['aaa', 'bbb', 'ccc'], ['aaa', 'bbb'], ['aaa', 'ddd'], ['bbb'], ['ccc'] ] dicts_of_str = [ {'aaa': 'z', 'bbb': 'z', 'ccc': 'z'}, {'aaa': 'z', 'bbb': 'z'}, {'aaa': 'z', 'ddd': 'z'}, {'bbb': 'z'}, {'ccc': 'z'} ] art_list = [self.create_artifact({'name': 'name%s' % i, 'version': '1.0', 'tags': ['tag%s' % i], 'int1': 1024, 'float1': 123.456, 'str1': 'bugaga', 'bool1': True, 'list_of_str': lists_of_str[i], 'dict_of_str': dicts_of_str[i]}) for i in range(5)] # test list filters url = '/sample_artifact?list_of_str=aaa&sort=name' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual(art_list[:3], result) url = '/sample_artifact?list_of_str=ccc&sort=name' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual([art_list[0], art_list[4]], result) url = '/sample_artifact?list_of_str=eee&sort=name' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual([], result) # test dict filters url = '/sample_artifact?dict_of_str=aaa&sort=name' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual(art_list[:3], result) url = '/sample_artifact?dict_of_str=ccc&sort=name' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual([art_list[0], art_list[4]], result) url = '/sample_artifact?dict_of_str=eee&sort=name' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual([], result) def test_list_dict_prop_filters(self): # Create artifact art_list = [self.create_artifact({'name': 'name0', 'version': '1.0', 'dict_of_str': {'pr1': 'val1'}}), self.create_artifact({'name': 'name1', 'version': '1.0', 'dict_of_str': {'pr1': 'val1', 'pr2': 'val2'}}), self.create_artifact({'name': 'name2', 'version': '1.0', 'dict_of_str': {'pr3': 'val3'}}), self.create_artifact({'name': 'name3', 'version': '1.0', 'dict_of_str': {'pr3': 'val1'}, 'dict_of_int': {"1": 10, "2": 20}}), self.create_artifact({'name': 'name4', 'version': '1.0', 'dict_of_str': {}, 'dict_of_int': {"2": 20, "3": 30}}), ] art_list.sort(key=lambda x: x['name']) url = '/sample_artifact?dict_of_str.pr1=val1' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual(art_list[:2], result) url = '/sample_artifact?dict_of_int.1=10' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual(art_list[3:4], result) url = '/sample_artifact?dict_of_str.pr1=val999' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual([], result) url = '/sample_artifact?dict_of_str.pr1=eq:val1' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual(art_list[:2], result) url = '/sample_artifact?dict_of_str.' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual([], result) for op in ['gt', 'gte', 'lt', 'lte']: url = '/sample_artifact?dict_of_str.pr3=%s:val3' % op self.get(url=url, status=400) url = '/sample_artifact?dict_of_str.pr3=blabla:val3' self.get(url=url, status=400) url = '/sample_artifact?dict_of_str.pr1=' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual([], result) url = '/sample_artifact?dict_of_str.pr1=' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual([], result) url = '/sample_artifact?dict_of_str' self.assertEqual([], result) url = '/sample_artifact?dict_of_str.pr3=blabla:val3' self.get(url=url, status=400) url = '/sample_artifact?list_of_str.pr3=blabla:val3' self.get(url=url, status=400) url = '/sample_artifact?dict_of_str.bla=val1' result = sort_results(self.get(url=url)['sample_artifact']) self.assertEqual([], result) url = '/sample_artifact?dict_of_int.1=lala' self.get(url=url, status=400) def test_list_sorted(self): art_list = [self.create_artifact({'name': 'name%s' % i, 'version': '1.0', 'tags': ['tag%s' % i], 'int1': i, 'float1': 123.456 + (-0.9) ** i, 'str1': 'bugaga', 'bool1': True, 'list_of_int': [11, 22, - i], 'dict_of_int': {'one': 4 * i, 'two': (-2) ** i}}) for i in range(5)] # sorted by string 'asc' url = '/sample_artifact?sort=name:asc' result = self.get(url=url) expected = sort_results(art_list) self.assertEqual(expected, result['sample_artifact']) # sorted by string 'desc' url = '/sample_artifact?sort=name:desc' result = self.get(url=url) expected = sort_results(art_list) expected.reverse() self.assertEqual(expected, result['sample_artifact']) # sorted by int 'asc' url = '/sample_artifact?sort=int1:asc' result = self.get(url=url) expected = sort_results(art_list, target='int1') self.assertEqual(expected, result['sample_artifact']) # sorted by int 'desc' url = '/sample_artifact?sort=int1:desc' result = self.get(url=url) expected = sort_results(art_list, target='int1') expected.reverse() self.assertEqual(expected, result['sample_artifact']) # sorted by float 'asc' url = '/sample_artifact?sort=float1:asc' result = self.get(url=url) expected = sort_results(art_list, target='float1') self.assertEqual(expected, result['sample_artifact']) # sorted by float 'desc' url = '/sample_artifact?sort=float1:desc' result = self.get(url=url) expected = sort_results(art_list, target='float1') expected.reverse() self.assertEqual(expected, result['sample_artifact']) # sorted by unsorted 'asc' url = '/sample_artifact?sort=bool1:asc' self.get(url=url, status=400) # sorted by unsorted 'desc' url = '/sample_artifact?sort=bool1:desc' self.get(url=url, status=400) # sorted by non-existent 'asc' url = '/sample_artifact?sort=non_existent:asc' self.get(url=url, status=400) # sorted by non-existent 'desc' url = '/sample_artifact?sort=non_existent:desc' self.get(url=url, status=400) # sorted by invalid op url = '/sample_artifact?sort=name:invalid_op' self.get(url=url, status=400) # sorted without op url = '/sample_artifact?sort=name' result = self.get(url=url) expected = sort_results(art_list) expected.reverse() self.assertEqual(expected, result['sample_artifact']) # sorted by list url = '/sample_artifact?sort=list_of_int:asc' self.get(url=url, status=400) # sorted by dict url = '/sample_artifact?sort=dict_of_int:asc' self.get(url=url, status=400) # sorted by element of dict url = '/sample_artifact?sort=dict_of_int.one:asc' self.get(url=url, status=400) # sorted by any prop url = '/sample_artifact?sort=name:asc,int1:desc' result = self.get(url=url) expected = sort_results(sort_results(art_list), target='int1') self.assertEqual(expected, result['sample_artifact']) def test_list_versions(self): # Create artifacts with versions version_list = ['1.0', '1.1', '2.0.0', '2.0.1-beta', '2.0.1', '20.0'] # Create artifact art_list = [self.create_artifact({'name': 'name', 'version': version_list[i - 1], 'tags': ['tag%s' % i], 'int1': 2048, 'float1': 123.456, 'str1': 'bugaga', 'bool1': True}) for i in range(1, 7)] public_art = self.create_artifact( {'name': 'name', 'tags': ['tag4', 'tag5'], 'int1': 1024, 'float1': 987.654, 'str1': 'lalala', 'bool1': False, 'string_required': '123'}) url = '/sample_artifact/%s' % public_art['id'] data = [{ "op": "replace", "path": "/status", "value": "active" }] self.patch(url=url, data=data, status=200) public_art = self.admin_action(public_art['id'], self.make_public) art_list.insert(0, public_art) expected_result = sort_results(art_list, target='version') url = '/sample_artifact' result = sort_results(self.get(url=url)['sample_artifact'], target='version') self.assertEqual(expected_result, result) # Creating an artifact with existing version fails self.create_artifact( {'name': 'name', 'version': '1.0', 'tags': ['tag1'], 'int1': 2048, 'float1': 123.456, 'str1': 'bugaga', 'bool1': True}, status=409) url = '/sample_artifact?name=name&version=gte:2.0.0' result = sort_results(self.get(url=url)['sample_artifact'], target='version') self.assertEqual(expected_result[3:], result) url = ('/sample_artifact?' 'name=name&version=gte:1.1&version=lt:2.0.1-beta') result = sort_results(self.get(url=url)['sample_artifact'], target='version') self.assertEqual(expected_result[2:4], result) # Filtering by version without name is ok url = '/sample_artifact?version=gte:2.0.0' self.get(url=url, status=200) # Several name filters with version is ok url = '/sample_artifact?name=name&name=anothername&version=gte:2.0.0' self.get(url=url, status=200) # Filtering by version with name filter op different from 'eq' url = '/sample_artifact?version=gte:2.0.0&name=neq:name' self.get(url=url, status=200) # Sorting by version 'asc' url = '/sample_artifact?name=name&sort=version:asc' result = self.get(url=url)['sample_artifact'] self.assertEqual(art_list, result) # Sorting by version 'desc' url = '/sample_artifact?name=name&sort=version:desc' result = self.get(url=url)['sample_artifact'] self.assertEqual(list(reversed(art_list)), result) def test_list_latest_filter(self): # Create artifacts with versions group1_versions = ['1.0', '20.0', '2.0.0', '2.0.1-beta', '2.0.1'] group2_versions = ['1', '1000.0.1-beta', '99.0', '1000.0.1-alpha', '1000.0.1'] for i in range(5): self.create_artifact( {'name': 'group1', 'version': group1_versions[i], 'tags': ['tag%s' % i], 'int1': 2048 + i, 'float1': 123.456, 'str1': 'bugaga', "string_required": "test_str", 'bool1': True}) self.create_artifact( {'name': 'group2', 'version': group2_versions[i], 'tags': ['tag%s' % i], 'int1': 2048 + i, 'float1': 123.456, 'str1': 'bugaga', "string_required": "test_str", 'bool1': True}) url = '/sample_artifact?version=latest&sort=name:asc' res = self.get(url=url, status=200)['sample_artifact'] self.assertEqual(2, len(res)) self.assertEqual('20.0.0', res[0]['version']) self.assertEqual('1000.0.1', res[1]['version']) self.patch('/sample_artifact/' + res[0]['id'], self.make_active) url = '/sample_artifact?version=latest&sort=name:asc&status=drafted' res = self.get(url=url, status=200)['sample_artifact'] self.assertEqual(2, len(res)) self.assertEqual('2.0.1', res[0]['version']) self.assertEqual('1000.0.1', res[1]['version']) url = '/sample_artifact?version=latest&sort=name:asc&int1=2050' res = self.get(url=url, status=200)['sample_artifact'] self.assertEqual(2, len(res)) self.assertEqual('2.0.0', res[0]['version']) self.assertEqual('99.0.0', res[1]['version']) url = '/sample_artifact?version=latest&name=group1' res = self.get(url=url, status=200)['sample_artifact'] self.assertEqual(1, len(res)) self.assertEqual('20.0.0', res[0]['version']) url = '/sample_artifact?version=latest&name=group2' res = self.get(url=url, status=200)['sample_artifact'] self.assertEqual(1, len(res)) self.assertEqual('1000.0.1', res[0]['version']) def test_list_support_unicode_filters(self): unicode_text = u'\u041f\u0420\u0418\u0412\u0415\u0422' art1 = self.create_artifact(data={'name': unicode_text}) self.assertEqual(unicode_text, art1['name']) mixed_text = u'la\u041f' art2 = self.create_artifact(data={'name': mixed_text}) self.assertEqual(mixed_text, art2['name']) headers = {'Content-Type': 'text/html; charset=UTF-8'} url = u'/sample_artifact?name=\u041f\u0420\u0418\u0412\u0415\u0422' response_url = u'/artifacts/sample_artifact?name=' \ u'%D0%9F%D0%A0%D0%98%D0%92%D0%95%D0%A2' result = self.get(url=url, headers=headers) self.assertEqual(art1, result['sample_artifact'][0]) self.assertEqual(response_url, result['first']) class TestBlobs(base.TestArtifact): def test_blob_dicts(self): # Getting empty artifact list url = '/sample_artifact' response = self.get(url=url, status=200) expected = {'first': '/artifacts/sample_artifact', 'sample_artifact': [], 'schema': '/schemas/sample_artifact'} self.assertEqual(expected, response) # Create a test artifact art = self.create_artifact(status=201, data={'name': 'test', 'version': '1.0', 'string_required': '123'}) self.assertIsNotNone(art['id']) # Get the artifact which should have a generated id and status # 'drafted' url = '/sample_artifact/%s' % art['id'] art_1 = self.get(url=url, status=200) self.assertIsNotNone(art_1['id']) self.assertEqual('drafted', art_1['status']) # Upload data to blob dict headers = {'Content-Type': 'application/octet-stream'} data = "data" * 100 blob_name = 'blob_name' * 100 self.put(url=url + '/dict_of_blobs/' + blob_name, data=data, status=200, headers=headers) # Download data from blob dict self.assertEqual(data, self.get(url=url + '/dict_of_blobs/' + blob_name, status=200)) # Download blob from undefined dict property self.get(url=url + '/not_a_dict/not_a_blob', status=400) # Blob url is generated right art = self.get(url=url, status=200) exp_blob_url = '/artifacts' + url + '/dict_of_blobs/' + blob_name self.assertEqual(exp_blob_url, art['dict_of_blobs'][blob_name]['url']) def test_blob_upload(self): # create artifact with blob data = 'data' self.create_artifact( data={'name': 'test_af', 'blob': data, 'version': '0.0.1'}, status=400) art = self.create_artifact(data={'name': 'test_af', 'version': '0.0.1', 'string_required': 'test'}) url = '/sample_artifact/%s' % art['id'] headers = {'Content-Type': 'application/octet-stream', 'Content-Length': '4'} # upload to non-existing property self.put(url=url + '/blob_non_exist', data=data, status=400, headers=headers) # upload too big value big_data = "this is the smallest big data" self.put(url=url + '/small_blob', data=big_data, status=413, headers=headers) # upload correct blob value self.put(url=url + '/small_blob', data=big_data[:2], headers=headers) # Upload artifact via different user self.set_user('user2') self.put(url=url + '/blob', data=data, status=404, headers=headers) # Upload file to the artifact self.set_user('user1') art = self.put(url=url + '/blob', data=data, status=200, headers=headers) self.assertEqual('active', art['blob']['status']) self.assertEqual('application/octet-stream', art['blob']['content_type']) self.assertIn('url', art['blob']) self.assertNotIn('id', art['blob']) # Blob url is generated right exp_blob_url = '/artifacts' + url + '/blob' self.assertEqual(exp_blob_url, art['blob']['url']) # reUpload file to artifact self.put(url=url + '/blob', data=data, status=409, headers=headers) # upload blob dict self.put(url + '/dict_of_blobs/test_key', data=data, headers=headers) # test re-upload failed self.put(url + '/dict_of_blobs/test_key', data=data, headers=headers, status=409) # upload few other blobs to the dict for elem in ('aaa', 'bbb', 'ccc', 'ddd'): self.put(url + '/dict_of_blobs/' + elem, data=data, headers=headers) # upload to active artifact self.patch(url, self.make_active) self.put(url + '/dict_of_blobs/key2', data=data, status=403, headers=headers) self.delete(url) def test_blob_download(self): data = 'some_arbitrary_testing_data' art = self.create_artifact(data={'name': 'test_af', 'version': '0.0.1'}) url = '/sample_artifact/%s' % art['id'] # download not uploaded blob self.get(url=url + '/blob', status=404) # download blob from not existing artifact self.get(url=url + '1/blob', status=404) # download blob from undefined property self.get(url=url + '/not_a_blob', status=400) headers = {'Content-Type': 'application/octet-stream'} art = self.put(url=url + '/blob', data=data, status=200, headers=headers) self.assertEqual('active', art['blob']['status']) md5 = hashlib.md5(data.encode('UTF-8')).hexdigest() sha1 = hashlib.sha1(data.encode('UTF-8')).hexdigest() sha256 = hashlib.sha256(data.encode('UTF-8')).hexdigest() self.assertEqual(md5, art['blob']['md5']) self.assertEqual(sha1, art['blob']['sha1']) self.assertEqual(sha256, art['blob']['sha256']) # check that content-length is in response response = requests.get(self._url(url + '/blob'), headers=self._headers()) self.assertEqual('27', response.headers["content-length"]) # check that all checksums are in response response = requests.get(self._url(url + '/blob'), headers=self._headers()) self.assertEqual('0825587cc011b7e76381b65e19d5ec27', response.headers["Content-MD5"]) self.assertEqual('89eb4b969b721ba8c3aff18ad7d69454f651a697', response.headers["X-Openstack-Glare-Content-SHA1"]) self.assertEqual('bbfd48c7ec792fc462e58232d4d9f407' 'ecefb75cc9e9823336166556b499ea4d', response.headers["X-Openstack-Glare-Content-SHA256"]) blob_data = self.get(url=url + '/blob') self.assertEqual(data, blob_data) # download artifact via admin self.set_user('admin') blob_data = self.get(url=url + '/blob') self.assertEqual(data, blob_data) # try to download blob via different user self.set_user('user2') self.get(url=url + '/blob', status=404) def test_blob_add_custom_location(self): # Create artifact art = self.create_artifact({'name': 'name5', 'version': '1.0', 'tags': ['tag1', 'tag2', 'tag3'], 'int1': 2048, 'float1': 987.654, 'str1': 'lalala', 'bool1': False, 'string_required': '123'}) self.assertIsNotNone(art['id']) # Create auxiliary artifact and upload data there aux = self.create_artifact({'name': 'auxiliary'}) url = '/sample_artifact/%s/blob' % aux['id'] data = b'a' * 1000 self.put(url=url, data=data) data_url = self._url(url) # Set custom location url = '/sample_artifact/%s' % art['id'] body = jsonutils.dumps( {'url': data_url, 'md5': "fake", 'sha1': "fake_sha", "sha256": "fake_sha256"}) headers = {'Content-Type': 'application/vnd+openstack.glare-custom-location+json'} self.put(url=url + '/blob', data=body, status=200, headers=headers) # test re-add failed self.put(url=url + '/blob', data=body, status=409, headers=headers) # add to non-existing property self.put(url=url + '/blob_non_exist', data=body, status=400, headers=headers) # Get the artifact, blob property should have status 'active' art = self.get(url=url, status=200) self.assertEqual('active', art['blob']['status']) self.assertEqual('fake', art['blob']['md5']) self.assertEqual('fake_sha', art['blob']['sha1']) self.assertEqual('fake_sha256', art['blob']['sha256']) self.assertIsNone(art['blob']['size']) self.assertIsNone(art['blob']['content_type']) self.assertEqual(data_url, art['blob']['url']) self.assertNotIn('id', art['blob']) # Set custom location url = '/sample_artifact/%s' % art['id'] self.put(url=url + '/dict_of_blobs/blob', data=body, status=200, headers=headers) # Get the artifact, blob property should have status 'active' art = self.get(url=url, status=200) self.assertEqual('active', art['dict_of_blobs']['blob']['status']) self.assertIsNotNone(art['dict_of_blobs']['blob']['md5']) self.assertIsNone(art['dict_of_blobs']['blob']['size']) self.assertIsNone(art['dict_of_blobs']['blob']['content_type']) self.assertEqual(data_url, art['dict_of_blobs']['blob']['url']) self.assertNotIn('id', art['dict_of_blobs']['blob']) # test re-add failed self.put(url=url + '/dict_of_blobs/blob', data=body, status=409, headers=headers) # test request failed with non-json containment self.put(url=url + '/dict_of_blobs/blob_incorrect', data="incorrect", status=400, headers=headers) # delete the artifact self.delete(url=url) def test_delete_external_blob(self): # Create artifact art = self.create_artifact({'name': 'name5', 'version': '1.0', 'tags': ['tag1', 'tag2', 'tag3'], 'int1': 2048, 'float1': 987.654, 'str1': 'lalala', 'bool1': False, 'string_required': '123'}) self.assertIsNotNone(art['id']) # Create auxiliary artifact and upload data there aux = self.create_artifact({'name': 'auxiliary'}) url = '/sample_artifact/%s/blob' % aux['id'] data = b'a' * 1000 self.put(url=url, data=data) data_url = self._url(url) # Set custom location url = '/sample_artifact/%s' % art['id'] body = jsonutils.dumps( {'url': data_url, 'md5': "fake", 'sha1': "fake_sha", "sha256": "fake_sha256"}) headers = {'Content-Type': 'application/vnd+openstack.glare-custom-location+json'} art = self.put(url=url + '/blob', data=body, status=200, headers=headers) self.assertEqual('active', art['blob']['status']) self.assertEqual('fake', art['blob']['md5']) self.assertEqual('fake_sha', art['blob']['sha1']) self.assertEqual('fake_sha256', art['blob']['sha256']) self.assertIsNone(art['blob']['size']) self.assertIsNone(art['blob']['content_type']) self.assertEqual(data_url, art['blob']['url']) self.assertNotIn('id', art['blob']) # Delete should work art = self.delete(url=url + '/blob', status=200) self.assertIsNone(art['blob']) # Deletion of empty blob fails self.delete(url=url + '/blob', status=404) # Deletion of non-blob field fails self.delete(url=url + '/int1', status=400) # Deletion ofn non-existing field fails self.delete(url=url + '/NONEXIST', status=400) # Upload data data = 'some_arbitrary_testing_data' headers = {'Content-Type': 'application/octet-stream'} art = self.put(url=url + '/blob', data=data, status=200, headers=headers) self.assertEqual('active', art['blob']['status']) md5 = hashlib.md5(data.encode('UTF-8')).hexdigest() sha1 = hashlib.sha1(data.encode('UTF-8')).hexdigest() sha256 = hashlib.sha256(data.encode('UTF-8')).hexdigest() self.assertEqual(md5, art['blob']['md5']) self.assertEqual(sha1, art['blob']['sha1']) self.assertEqual(sha256, art['blob']['sha256']) # Deletion of internal blob fails self.delete(url=url + '/blob', status=403) def test_delete_external_blob_dict(self): # Create artifact art = self.create_artifact({'name': 'name5', 'version': '1.0', 'tags': ['tag1', 'tag2', 'tag3'], 'int1': 2048, 'float1': 987.654, 'str1': 'lalala', 'bool1': False, 'string_required': '123'}) self.assertIsNotNone(art['id']) # Create auxiliary artifact and upload data there aux = self.create_artifact({'name': 'auxiliary'}) url = '/sample_artifact/%s/blob' % aux['id'] data = b'a' * 1000 self.put(url=url, data=data) data_url = self._url(url) # Set custom location url = '/sample_artifact/%s' % art['id'] body = jsonutils.dumps( {'url': data_url, 'md5': "fake", 'sha1': "fake_sha", "sha256": "fake_sha256"}) headers = {'Content-Type': 'application/vnd+openstack.glare-custom-location+json'} art = self.put(url=url + '/dict_of_blobs/blob', data=body, status=200, headers=headers) self.assertEqual('active', art['dict_of_blobs']['blob']['status']) self.assertEqual('fake', art['dict_of_blobs']['blob']['md5']) self.assertEqual('fake_sha', art['dict_of_blobs']['blob']['sha1']) self.assertEqual('fake_sha256', art['dict_of_blobs']['blob']['sha256']) self.assertIsNone(art['dict_of_blobs']['blob']['size']) self.assertIsNone(art['dict_of_blobs']['blob']['content_type']) self.assertEqual(data_url, art['dict_of_blobs']['blob']['url']) self.assertNotIn('id', art['dict_of_blobs']['blob']) # Delete should work art = self.delete(url=url + '/dict_of_blobs/blob', status=200) self.assertNotIn('blob', art['dict_of_blobs']) # Deletion of non-existing blob fails self.delete(url=url + '/dict_of_blobs/NONEXIST', status=404) # Upload data data = 'some_arbitrary_testing_data' headers = {'Content-Type': 'application/octet-stream'} art = self.put(url=url + '/dict_of_blobs/blob', data=data, status=200, headers=headers) self.assertEqual('active', art['dict_of_blobs']['blob']['status']) md5 = hashlib.md5(data.encode('UTF-8')).hexdigest() sha1 = hashlib.sha1(data.encode('UTF-8')).hexdigest() sha256 = hashlib.sha256(data.encode('UTF-8')).hexdigest() self.assertEqual(md5, art['dict_of_blobs']['blob']['md5']) self.assertEqual(sha1, art['dict_of_blobs']['blob']['sha1']) self.assertEqual(sha256, art['dict_of_blobs']['blob']['sha256']) # Deletion of internal blob fails self.delete(url=url + '/dict_of_blobs/blob', status=403) def test_internal_location(self): self.set_user('admin') # Create artifact art = self.create_artifact({'name': 'name5'}) self.assertIsNotNone(art['id']) url = '/sample_artifact/%s' % art['id'] headers = {'Content-Type': 'application/vnd+openstack.glare-custom-location+json'} # Setting locations with forbidden schemes fails forbidden_schemes = ('file', 'filesystem', 'swift+config', 'sql') for scheme in forbidden_schemes: body = jsonutils.dumps( {'md5': 'fake', 'sha1': 'fake_sha', 'sha256': 'fake_sha256', 'location_type': 'internal', 'url': scheme + '://FAKE_LOCATION.com'}) self.put(url=url + '/blob', data=body, status=403, headers=headers) # Setting locations with unknown schemes fail body = jsonutils.dumps( {'md5': 'fake', 'sha1': 'fake_sha', 'sha256': 'fake_sha256', 'location_type': 'internal', 'url': 'UNKNOWN://FAKE_LOCATION.com'}) self.put(url=url + '/blob', data=body, status=400, headers=headers) body = jsonutils.dumps( {'md5': 'fake', 'sha1': 'fake_sha', 'sha256': 'fake_sha256', 'location_type': 'internal', 'url': 'https://FAKE_LOCATION.com'}) art = self.put(url=url + '/blob', data=body, status=200, headers=headers) self.assertFalse(art['blob']['external']) self.assertEqual('active', art['blob']['status']) self.assertEqual('fake', art['blob']['md5']) self.assertEqual('fake_sha', art['blob']['sha1']) self.assertEqual('fake_sha256', art['blob']['sha256']) self.assertIsNone(art['blob']['size']) self.assertIsNone(art['blob']['content_type']) self.assertEqual('/artifacts/sample_artifact/%s/blob' % art['id'], art['blob']['url']) self.assertNotIn('id', art['blob']) class TestTags(base.TestArtifact): def test_tags(self): # Create artifact art = self.create_artifact({'name': 'name5', 'version': '1.0', 'tags': ['tag1', 'tag2', 'tag3'], 'int1': 2048, 'float1': 987.654, 'str1': 'lalala', 'bool1': False, 'string_required': '123'}) self.assertIsNotNone(art['id']) url = '/sample_artifact/%s' % art['id'] data = [{ "op": "replace", "path": "/status", "value": "active" }] art = self.patch(url=url, data=data, status=200) self.assertEqual('active', art['status']) art = self.admin_action(art['id'], self.make_public) self.assertEqual('public', art['visibility']) # only admins can update tags for public artifacts self.set_user("admin") # Check that tags created correctly url = '/sample_artifact/%s' % art['id'] resp = self.get(url=url, status=200) for tag in ['tag1', 'tag2', 'tag3']: self.assertIn(tag, resp['tags']) # Set new tag list to the art body = [{"op": "replace", "path": "/tags", "value": ["new_tag1", "new_tag2", "new_tag3"]}] resp = self.patch(url=url, data=body, status=200) for tag in ['new_tag1', 'new_tag2', 'new_tag3']: self.assertIn(tag, resp['tags']) # Delete all tags from the art body = [{"op": "replace", "path": "/tags", "value": []}] resp = self.patch(url=url, data=body, status=200) self.assertEqual([], resp['tags']) # Set new tags as null body = [{"op": "replace", "path": "/tags", "value": None}] resp = self.patch(url=url, data=body, status=200) self.assertEqual([], resp['tags']) # Get the list of tags resp = self.get(url=url, status=200) self.assertEqual([], resp['tags']) class TestArtifactOps(base.TestArtifact): def test_create(self): """All tests related to artifact creation""" # check that cannot create artifact for non-existent artifact type self.post('/incorrect_artifact', {"name": "t"}, status=404) # check that cannot accept non-json body self.post('/incorrect_artifact', "incorrect_body", status=400) # check that cannot accept incorrect content type self.post('/sample_artifact', {"name": "t"}, status=415, headers={"Content-Type": "application/octet-stream"}) # check that cannot create artifact without name self.create_artifact(data={"int1": 1024}, status=400) # check that cannot create artifact with too long name self.create_artifact(data={"name": "t" * 256}, status=400) # check that cannot create artifact with empty name self.create_artifact(data={"name": ""}, status=400) # check that can create af without version private_art = self.create_artifact( data={"name": "test_af", "string_required": "test_str"}) # check that default is set on artifact create uuid.UUID(private_art['id']) self.assertEqual('0.0.0', private_art['version']) self.assertEqual("default", private_art["system_attribute"]) self.assertEqual(self.users['user1']['tenant_id'], private_art['owner']) # check that cannot create artifact with invalid version self.create_artifact(data={"name": "test_af", "version": "dummy_version"}, status=400) # check that cannot create artifact with empty and long version self.create_artifact(data={"name": "test_af", "version": ""}, status=400) # check that cannot create artifact with empty and long version self.create_artifact(data={"name": "test_af", "version": "t" * 256}, status=400) # check that artifact artifact with the same name-version cannot # be created self.create_artifact(data={"name": "test_af"}, status=409) # check that we cannot create af with the same version but different # presentation self.create_artifact(data={"name": "test_af", "version": "0.0"}, status=409) # check that we can create artifact with different version and tags new_af = self.create_artifact( data={"name": "test_af", "version": "0.0.1", "tags": ["tag1", "tag2"]}) self.assertEqual({"tag1", "tag2"}, set(new_af["tags"])) # check that we cannot create artifact with visibility self.create_artifact(data={"name": "test_af", "version": "0.0.2", "visibility": "private"}, status=400) # check that we cannot create artifact with system property self.create_artifact(data={"name": "test_af", "version": "0.0.2", "system_attribute": "test"}, status=403) # check that we cannot specify blob in create self.create_artifact(data={"name": "test_af", "version": "0.0.2", "blob": { 'url': None, 'size': None, 'md5': None, 'status': 'saving', 'external': False}}, status=400) # check that anonymous user cannot create artifact self.set_user("anonymous") self.create_artifact(data={"name": "test_af", "version": "0.0.2"}, status=403) # check that another user can create artifact # with the same name version self.set_user("user2") some_af = self.create_artifact(data={"name": "test_af"}) # check we can create artifact with all available attributes # (except blobs and system) expected = { "name": "test_big_create", "link1": "/artifacts/sample_artifact/%s" % some_af['id'], "bool1": True, "int1": 2323, "float1": 0.1, "str1": "test", "list_of_str": ["test"], "list_of_int": [0], "dict_of_str": {"test": "test"}, "dict_of_int": {"test": 0}, "string_mutable": "test", "string_required": "test", } big_af = self.create_artifact(data=expected) actual = {} for k in expected: actual[k] = big_af[k] self.assertEqual(expected, actual) # check that we cannot access artifact from other user # check that active artifact is not available for other user url = '/sample_artifact/%s' % private_art['id'] self.get(url, status=404) # check we cannot create af with non-existing property self.create_artifact(data={"name": "test_af_ne", "non_exist": "non_exist"}, status=400) # activate and publish artifact to check that we can create # private artifact with the same name version self.set_user("user1") self.patch(url=url, data=self.make_active) self.admin_action(private_art['id'], self.make_public) self.create_artifact(data={"name": "test_af", "string_required": "test_str"}) def test_activate(self): # create artifact to update private_art = self.create_artifact( data={"name": "test_af", "version": "0.0.1"}) # cannot activate artifact without required for activate attributes url = '/sample_artifact/%s' % private_art['id'] self.patch(url=url, data=self.make_active, status=403) add_required = [{ "op": "replace", "path": "/string_required", "value": "string" }] self.patch(url=url, data=add_required) # can activate if body contains non status changes make_active_with_updates = self.make_active + [{"op": "replace", "path": "/description", "value": "test"}] active_art = self.patch(url=url, data=make_active_with_updates) private_art['status'] = 'active' private_art['activated_at'] = active_art['activated_at'] private_art['updated_at'] = active_art['updated_at'] private_art['string_required'] = 'string' private_art['description'] = 'test' self.assertEqual(private_art, active_art) # check that active artifact is not available for other user self.set_user("user2") self.get(url, status=404) self.set_user("user1") # test that activate is idempotent self.patch(url=url, data=self.make_active) # test activate deleted artifact self.delete(url=url) self.patch(url=url, data=self.make_active, status=404) def test_publish(self): # create artifact to update self.set_user('admin') private_art = self.create_artifact( data={"name": "test_af", "string_required": "test_str", "version": "0.0.1"}) url = '/sample_artifact/%s' % private_art['id'] # test that we cannot publish drafted artifact self.patch(url=url, data=self.make_public, status=403) self.patch(url=url, data=self.make_active) # test that cannot publish deactivated artifact self.patch(url, data=self.make_deactivated) self.patch(url, data=self.make_public, status=403) self.patch(url=url, data=self.make_active) # test that visibility can be specified in the request with # other updates make_public_with_updates = self.make_public + [ {"op": "replace", "path": "/string_mutable", "value": "test"}] self.patch(url=url, data=make_public_with_updates) # check public artifact public_art = self.patch(url=url, data=self.make_public) private_art['activated_at'] = public_art['activated_at'] private_art['visibility'] = 'public' private_art['status'] = 'active' private_art['updated_at'] = public_art['updated_at'] private_art['string_mutable'] = 'test' self.assertEqual(private_art, public_art) # check that public artifact available for simple user self.set_user("user1") self.get(url) self.set_user("admin") # test that artifact publish with the same name and version failed duplicate_art = self.create_artifact( data={"name": "test_af", "string_required": "test_str", "version": "0.0.1"}) dup_url = '/sample_artifact/%s' % duplicate_art['id'] # proceed with duplicate testing self.patch(url=dup_url, data=self.make_active) self.patch(url=dup_url, data=self.make_public, status=409) def test_delete(self): # try ro delete not existing artifact url = '/sample_artifact/111111' self.delete(url=url, status=404) # check that we can delete artifact with soft link art = self.create_artifact( data={"name": "test_af", "string_required": "test_str", "version": "0.0.1"}) artd = self.create_artifact( data={"name": "test_afd", "string_required": "test_str", "version": "0.0.1", "link1": '/artifacts/sample_artifact/%s' % art['id']}) url = '/sample_artifact/%s' % artd['id'] self.delete(url=url, status=204) # try to change status of artifact to deleting url = '/sample_artifact/%s' % art['id'] patch = [{'op': 'replace', 'value': 'deleting', 'path': '/status'}] self.patch(url=url, data=patch, status=400) # delete artifact via different user (non admin) self.set_user('user2') self.delete(url=url, status=404) # delete artifact via admin user self.set_user('admin') self.delete(url=url, status=204) # delete public artifact via different user self.set_user('user1') art = self.create_artifact( data={"name": "test_af", "string_required": "test_str", "version": "0.0.1"}) url = '/sample_artifact/%s' % art['id'] self.patch(url=url, data=self.make_active) self.admin_action(art['id'], self.make_public) self.set_user('user2') self.delete(url=url, status=403) self.set_user('user1') self.delete(url=url, status=403) self.set_user('admin') self.delete(url=url) # delete deactivated artifact art = self.create_artifact( data={"name": "test_af", "string_required": "test_str", "version": "0.0.1"}) url = '/sample_artifact/%s' % art['id'] self.patch(url=url, data=self.make_active) self.patch(url=url, data=self.make_deactivated) self.delete(url=url, status=204) self.get(url=url, status=404) self.assertEqual(0, len(self.get( url='/sample_artifact')['sample_artifact'])) def test_deactivate(self): # test artifact deactivate for non-active artifact private_art = self.create_artifact( data={"name": "test_af", "string_required": "test_str", "version": "0.0.1"}) url = '/sample_artifact/%s' % private_art['id'] self.admin_action(private_art['id'], self.make_deactivated, 403) self.patch(url, self.make_active) self.set_user('admin') # test can deactivate if there is something else in request make_deactived_with_updates = [ {"op": "replace", "path": "/description", "value": "test"}] + self.make_deactivated # test artifact deactivate success deactivated_art = self.admin_action( private_art['id'], make_deactived_with_updates) self.assertEqual("deactivated", deactivated_art["status"]) self.assertEqual("test", deactivated_art["description"]) # test deactivate is idempotent self.patch(url, self.make_deactivated) def test_reactivate(self): self.set_user('admin') private_art = self.create_artifact( data={"name": "test_af", "string_required": "test_str", "version": "0.0.1"}) url = '/sample_artifact/%s' % private_art['id'] self.patch(url, self.make_active) self.admin_action(private_art['id'], self.make_deactivated) # test can reactivate if there is something else in request make_reactived_with_updates = self.make_active + [ {"op": "replace", "path": "/description", "value": "test"}] # test artifact deactivate success reactivated_art = self.admin_action( private_art['id'], make_reactived_with_updates) self.assertEqual("active", reactivated_art["status"]) self.assertEqual("test", reactivated_art["description"]) class TestUpdate(base.TestArtifact): def test_update_artifact_before_activate(self): """Test updates for artifact before activation""" # create artifact to update private_art = self.create_artifact(data={"name": "test_af"}) url = '/sample_artifact/%s' % private_art['id'] # check we can update artifact change_version = [{ "op": "replace", "path": "/version", "value": "0.0.2" }] self.patch(url=url, data=change_version) # wrong patch format fails with 400 error invalid_patch = { "op": "replace", "path": "/version", "value": "0.0.2" } self.patch(url=url, data=invalid_patch, status=400) # check that we cannot update af if af with # the same name or version exists dup_version = self.create_artifact( data={"name": "test_af", "version": "0.0.1"}) dupv_url = '/sample_artifact/%s' % dup_version['id'] change_version_dup = [{ "op": "replace", "path": "/version", "value": "0.0.2" }] self.patch(url=dupv_url, data=change_version_dup, status=409) dup_name = self.create_artifact(data={"name": "test_name_af", "version": "0.0.2"}) dupn_url = '/sample_artifact/%s' % dup_name['id'] change_name = [{ "op": "replace", "path": "/name", "value": "test_af" }] self.patch(url=dupn_url, data=change_name, status=409) # check that we can update artifacts dup # after first artifact updated name and version change_version[0]['value'] = "0.0.3" self.patch(url=url, data=change_version) self.patch(url=dupn_url, data=change_name) # check that we can update artifact dupv to target version # also check that after deletion of artifact with the same name # version I can update dupv self.delete(dupn_url) self.patch(url=dupv_url, data=change_version_dup) # check we cannot update artifact with incorrect content-type self.patch(url, {}, status=415, headers={"Content-Type": "application/json"}) # check we cannot update tags with patch set_tags = [{ "op": "replace", "path": "/tags", "value": "test_af" }] self.patch(url, set_tags, status=400) # check we cannot update artifact with incorrect json-patch self.patch(url, "incorrect json patch", status=400) # check update is correct if there is no update no_name_update = [{ "op": "replace", "path": "/name", "value": "test_af" }] self.patch(url, no_name_update) # check add new property request rejected add_prop = [{ "op": "add", "path": "/string1", "value": "test_af" }] self.patch(url, add_prop, 400) # check delete property request rejected add_prop[0]["op"] = "remove" add_prop[0]["path"] = "/string_required" self.patch(url, add_prop, 400) # check we cannot update system attr with patch system_attr = [{ "op": "replace", "path": "/system_attribute", "value": "dummy" }] self.patch(url, system_attr, 403) # check cannot update blob attr with patch blob_attr = [{ "op": "replace", "path": "/blob", "value": {"name": "test_af", "version": "0.0.2", "blob": {'url': None, 'size': None, 'md5': None, 'status': 'saving', 'external': False}}}] self.patch(url, blob_attr, 400) blob_attr[0]["path"] = "/dict_of_blobs/-" blob_attr[0]["op"] = "add" self.patch(url, blob_attr, 400) # test update correctness for all attributes big_update_patch = [ {"op": "replace", "path": "/bool1", "value": True}, {"op": "replace", "path": "/int1", "value": 2323}, {"op": "replace", "path": "/float1", "value": 0.1}, {"op": "replace", "path": "/str1", "value": "test"}, {"op": "replace", "path": "/list_of_str", "value": ["test"]}, {"op": "replace", "path": "/list_of_int", "value": [0]}, {"op": "replace", "path": "/dict_of_str", "value": {"test": "test"}}, {"op": "replace", "path": "/dict_of_int", "value": {"test": 0}}, {"op": "replace", "path": "/string_mutable", "value": "test"}, {"op": "replace", "path": "/string_required", "value": "test"}, ] upd_af = self.patch(url, big_update_patch) for patch_item in big_update_patch: self.assertEqual(patch_item.get("value"), upd_af[patch_item.get("path")[1:]]) # check we can update private artifact # to the same name version as public artifact self.patch(url=url, data=self.make_active) self.admin_action(private_art['id'], self.make_public) self.patch(url=dupv_url, data=change_version) def test_update_after_activate_and_publish(self): # activate artifact private_art = self.create_artifact( data={"name": "test_af", "string_required": "test_str", "version": "0.0.1"}) url = '/sample_artifact/%s' % private_art['id'] self.patch(url=url, data=self.make_active) # test that immutable properties cannot be updated upd_immutable = [{ "op": "replace", "path": "/name", "value": "new_name" }] self.patch(url, upd_immutable, status=403) # test that mutable properties can be updated upd_mutable = [{ "op": "replace", "path": "/string_mutable", "value": "new_value" }] updated_af = self.patch(url, upd_mutable) self.assertEqual("new_value", updated_af["string_mutable"]) # test cannot update deactivated artifact upd_mutable[0]["value"] = "another_new_value" self.admin_action(private_art['id'], self.make_deactivated) # test that nobody(even admin) can publish deactivated artifact self.set_user("admin") self.patch(url, self.make_public, 403) self.set_user("user1") self.patch(url, upd_mutable, 403) self.admin_action(private_art['id'], self.make_active) # publish artifact self.admin_action(private_art['id'], self.make_public) # check we cannot update public artifact anymore self.patch(url, upd_mutable, status=403) self.patch(url, upd_mutable, status=403) # check that admin can update public artifact self.set_user("admin") self.patch(url, upd_mutable) def test_update_with_validators(self): data = {'name': 'test_af', 'version': '0.0.1', 'list_validators': ['a', 'b', 'c'], 'dict_validators': {'abc': 'a', 'def': 'b'}} art = self.create_artifact(data=data) url = '/sample_artifact/%s' % art['id'] # min int_validators value is 10 patch = [{"op": "replace", "path": "/int_validators", "value": 9}] self.patch(url=url, data=patch, status=400) # max int_validators value is 20 patch = [{"op": "replace", "path": "/int_validators", "value": 21}] self.patch(url=url, data=patch, status=400) # number 15 is okay patch = [{"op": "replace", "path": "/int_validators", "value": 15}] self.patch(url=url, data=patch, status=200) # max string length is 255 patch = [{"op": "replace", "path": "/str1", "value": 'd' * 256}] self.patch(url=url, data=patch, status=400) # 'cc' is not allowed value for the string patch = [{"op": "replace", "path": "/string_validators", "value": 'cc'}] self.patch(url=url, data=patch, status=400) # 'aa' is okay patch = [{"op": "replace", "path": "/string_validators", "value": 'aa'}] self.patch(url=url, data=patch) # 'bb' is okay too patch = [{"op": "replace", "path": "/string_validators", "value": 'bb'}] self.patch(url=url, data=patch) # even if 'c' * 11 is allowed value it exceeds MaxLen's 10 character # limit patch = [{"op": "replace", "path": "/string_validators", "value": 'c' * 11}] self.patch(url=url, data=patch, status=400) # string_regex format it '^([0-9a-fA-F]){8}$' patch = [{"op": "replace", "path": "/string_regex", "value": 'INVALID'}] self.patch(url=url, data=patch, status=400) patch = [{"op": "replace", "path": "/string_regex", "value": '167f808Z'}] self.patch(url=url, data=patch, status=400) patch = [{"op": "replace", "path": "/string_regex", "value": '167f80835'}] self.patch(url=url, data=patch, status=400) patch = [{"op": "replace", "path": "/string_regex", "value": '167f8083'}] self.patch(url=url, data=patch) # test list has 3 elements maximum patch = [{"op": "add", "path": "/list_validators/-", "value": 'd'}] self.patch(url=url, data=patch, status=400) patch = [{"op": "replace", "path": "/list_validators", "value": ['a', 'b', 'c', 'd']}] self.patch(url=url, data=patch, status=400) # test list values are unique patch = [{"op": "replace", "path": "/list_validators/2", "value": 'b'}] self.patch(url=url, data=patch, status=400) patch = [{"op": "replace", "path": "/list_validators", "value": ['a', 'b', 'b']}] self.patch(url=url, data=patch, status=400) # regular update works patch = [{"op": "replace", "path": "/list_validators/1", "value": 'd'}] af = self.patch(url=url, data=patch) self.assertEqual(af['list_validators'], ['a', 'd', 'c']) patch = [{"op": "replace", "path": "/list_validators", "value": ['c', 'b', 'a']}] af = self.patch(url=url, data=patch) self.assertEqual(af['list_validators'], ['c', 'b', 'a']) # test adding wrong key to dict patch = [{"op": "add", "path": "/dict_validators/aaa", "value": 'b'}] self.patch(url=url, data=patch, status=400) patch = [{"op": "replace", "path": "/dict_validators", "value": {'abc': 'a', 'def': 'b', 'aaa': 'c'}}] self.patch(url=url, data=patch, status=400) # test dict has 3 elements maximum patch = [{"op": "add", "path": "/dict_validators/ghi", "value": 'd'}] self.patch(url=url, data=patch) patch = [{"op": "add", "path": "/dict_validators/jkl", "value": 'd'}] self.patch(url=url, data=patch, status=400) patch = [{"op": "replace", "path": "/dict_validators", "value": {'abc': 'a', 'def': 'b', 'ghi': 'c', 'jkl': 'd'}}] self.patch(url=url, data=patch, status=400) # regular update works patch = [{"op": "replace", "path": "/dict_validators/abc", "value": "q"}] af = self.patch(url=url, data=patch) self.assertEqual(af['dict_validators'], {'abc': 'q', 'def': 'b', 'ghi': 'd'}) patch = [{"op": "replace", "path": "/dict_validators", "value": {'abc': 'l', 'def': 'x', 'ghi': 'z'}}] af = self.patch(url=url, data=patch) self.assertEqual(af['dict_validators'], {'abc': 'l', 'def': 'x', 'ghi': 'z'}) def test_update_base_fields(self): data = {'name': 'test_af', 'version': '0.0.1'} art = self.create_artifact(data=data) url = '/sample_artifact/%s' % art['id'] # INT # float to int patch = [{"op": "replace", "path": "/int1", "value": 1.1}] art = self.patch(url=url, data=patch) self.assertEqual(1, art['int1']) # str(int) to int patch = [{"op": "replace", "path": "/int1", "value": '2'}] art = self.patch(url=url, data=patch) self.assertEqual(2, art['int1']) # str(float) to int patch = [{"op": "replace", "path": "/int1", "value": '3.0'}] self.patch(url=url, data=patch, status=400) # str(int) to int patch = [{"op": "replace", "path": "/int1", "value": ''}] self.patch(url=url, data=patch, status=400) # empty list to int patch = [{"op": "replace", "path": "/int1", "value": []}] self.patch(url=url, data=patch, status=400) # empty dict to int patch = [{"op": "replace", "path": "/int1", "value": {}}] self.patch(url=url, data=patch, status=400) # bool to int patch = [{"op": "replace", "path": "/int1", "value": True}] art = self.patch(url=url, data=patch, status=200) self.assertEqual(1, art['int1']) patch = [{"op": "replace", "path": "/int1", "value": False}] art = self.patch(url=url, data=patch, status=200) self.assertEqual(0, art['int1']) # FLOAT # int to float patch = [{"op": "replace", "path": "/float1", "value": 1}] art = self.patch(url=url, data=patch, status=200) self.assertEqual(1.0, art['float1']) # str(int) to float patch = [{"op": "replace", "path": "/float1", "value": '2'}] art = self.patch(url=url, data=patch, status=200) self.assertEqual(2.0, art['float1']) # str(int) to float patch = [{"op": "replace", "path": "/float1", "value": []}] self.patch(url=url, data=patch, status=400) # str(int) to float patch = [{"op": "replace", "path": "/float1", "value": {}}] self.patch(url=url, data=patch, status=400) # str(bool) to float patch = [{"op": "replace", "path": "/float1", "value": 'True'}] self.patch(url=url, data=patch, status=400) # bool to float patch = [{"op": "replace", "path": "/float1", "value": True}] art = self.patch(url=url, data=patch, status=200) self.assertEqual(1.0, art['float1']) # str(float) to float patch = [{"op": "replace", "path": "/float1", "value": '3.0'}] art = self.patch(url=url, data=patch, status=200) self.assertEqual(3.0, art['float1']) # STRING # str to str patch = [{"op": "replace", "path": "/str1", "value": '3.0'}] art = self.patch(url=url, data=patch, status=200) self.assertEqual('3.0', art['str1']) # int to str patch = [{"op": "replace", "path": "/str1", "value": 1}] art = self.patch(url=url, data=patch, status=200) self.assertEqual('1', art['str1']) # float to str patch = [{"op": "replace", "path": "/str1", "value": 1.0}] art = self.patch(url=url, data=patch, status=200) self.assertEqual('1.0', art['str1']) # bool to str patch = [{"op": "replace", "path": "/str1", "value": True}] art = self.patch(url=url, data=patch, status=200) self.assertEqual('True', art['str1']) # empty list to str patch = [{"op": "replace", "path": "/str1", "value": []}] self.patch(url=url, data=patch, status=400) patch = [{"op": "replace", "path": "/str1", "value": {}}] self.patch(url=url, data=patch, status=400) # BOOL # int to bool patch = [{"op": "replace", "path": "/bool1", "value": 1}] art = self.patch(url=url, data=patch, status=200) self.assertEqual(True, art['bool1']) patch = [{"op": "replace", "path": "/bool1", "value": 0}] art = self.patch(url=url, data=patch, status=200) self.assertEqual(False, art['bool1']) # float to bool patch = [{"op": "replace", "path": "/bool1", "value": 2.1}] art = self.patch(url=url, data=patch, status=200) self.assertEqual(False, art['bool1']) patch = [{"op": "replace", "path": "/bool1", "value": 1.1}] art = self.patch(url=url, data=patch, status=200) self.assertEqual(False, art['bool1']) # string to bool patch = [{"op": "replace", "path": "/bool1", "value": '1'}] art = self.patch(url=url, data=patch, status=200) self.assertEqual(True, art['bool1']) patch = [{"op": "replace", "path": "/bool1", "value": ''}] art = self.patch(url=url, data=patch, status=200) self.assertEqual(False, art['bool1']) # [] to bool patch = [{"op": "replace", "path": "/bool1", "value": []}] art = self.patch(url=url, data=patch, status=200) self.assertEqual(False, art['bool1']) patch = [{"op": "replace", "path": "/bool1", "value": [1]}] art = self.patch(url=url, data=patch, status=200) self.assertEqual(False, art['bool1']) # {} to bool patch = [{"op": "replace", "path": "/bool1", "value": {}}] art = self.patch(url=url, data=patch, status=200) self.assertEqual(False, art['bool1']) patch = [{"op": "replace", "path": "/bool1", "value": {'1', 1}}] art = self.patch(url=url, data=patch, status=200) self.assertEqual(False, art['bool1']) # LIST OF STR AND INT # {} to list of str patch = [{"op": "replace", "path": "/list_of_str", "value": {}}] self.patch(url=url, data=patch, status=400) # [] to list of str patch = [{"op": "replace", "path": "/list_of_str", "value": []}] art = self.patch(url=url, data=patch, status=200) self.assertEqual([], art['list_of_str']) # list of int to list of str patch = [{"op": "replace", "path": "/list_of_str", "value": [1, 2, 3]}] art = self.patch(url=url, data=patch, status=200) self.assertEqual(['1', '2', '3'], art['list_of_str']) # list of bool to list of str patch = [{"op": "replace", "path": "/list_of_str", "value": [True, False, True]}] art = self.patch(url=url, data=patch, status=200) self.assertEqual(['True', 'False', 'True'], art['list_of_str']) # str to list of str patch = [{"op": "replace", "path": "/list_of_str", "value": '123'}] self.patch(url=url, data=patch, status=400) # int to list of str patch = [{"op": "replace", "path": "/list_of_str", "value": 11}] self.patch(url=url, data=patch, status=400) # bool to list of str patch = [{"op": "replace", "path": "/list_of_str", "value": True}] self.patch(url=url, data=patch, status=400) # Dict OF INT # [] to dict of int patch = [{"op": "replace", "path": "/dict_of_int", "value": []}] self.patch(url=url, data=patch, status=400) # {} to dict of int patch = [{"op": "replace", "path": "/dict_of_int", "value": {}}] art = self.patch(url=url, data=patch, status=200) self.assertEqual({}, art['dict_of_int']) # int to dict of int patch = [{"op": "replace", "path": "/dict_of_int", "value": 1}] self.patch(url=url, data=patch, status=400) # bool to dict of int patch = [{"op": "replace", "path": "/dict_of_int", "value": True}] self.patch(url=url, data=patch, status=400) # string to dict of int patch = [{"op": "replace", "path": "/dict_of_int", "value": 'aaa'}] self.patch(url=url, data=patch, status=400) def test_update_field_dict(self): art1 = self.create_artifact(data={"name": "art1"}) # create artifact without dict prop data = {'name': 'art_without_dict'} result = self.post(url='/sample_artifact', status=201, data=data) self.assertEqual({}, result['dict_of_str']) # create artifact with dict prop data = {'name': 'art_with_dict', 'dict_of_str': {'a': '1', 'b': '3'}} result = self.post(url='/sample_artifact', status=201, data=data) self.assertEqual({'a': '1', 'b': '3'}, result['dict_of_str']) # create artifact with empty dict data = {'name': 'art_with_empty_dict', 'dict_of_str': {}} result = self.post(url='/sample_artifact', status=201, data=data) self.assertEqual({}, result['dict_of_str']) # add element in invalid path data = [{'op': 'add', 'path': '/dict_of_str', 'value': 'val1'}] url = '/sample_artifact/%s' % art1['id'] self.patch(url=url, data=data, status=400) # add new element data = [{'op': 'add', 'path': '/dict_of_str/new', 'value': 'val1'}] url = '/sample_artifact/%s' % art1['id'] result = self.patch(url=url, data=data) self.assertEqual('val1', result['dict_of_str']['new']) # add existent element data = [{'op': 'add', 'path': '/dict_of_str/new', 'value': 'val_new'}] url = '/sample_artifact/%s' % art1['id'] result = self.patch(url=url, data=data) self.assertEqual('val_new', result['dict_of_str']['new']) # add element with empty key data = [{'op': 'add', 'path': '/dict_of_str/', 'value': 'val1'}] url = '/sample_artifact/%s' % art1['id'] self.patch(url=url, data=data, status=400) # replace element data = [{'op': 'replace', 'path': '/dict_of_str/new', 'value': 'val2'}] url = '/sample_artifact/%s' % art1['id'] result = self.patch(url=url, data=data) self.assertEqual('val2', result['dict_of_str']['new']) # replace non-existent element data = [{'op': 'replace', 'path': '/dict_of_str/non_exist', 'value': 'val2'}] url = '/sample_artifact/%s' % art1['id'] self.patch(url=url, data=data, status=400) # remove element data = [{'op': 'remove', 'path': '/dict_of_str/new', 'value': 'val2'}] url = '/sample_artifact/%s' % art1['id'] result = self.patch(url=url, data=data) self.assertIsNone(result['dict_of_str'].get('new')) # remove non-existent element data = [{'op': 'remove', 'path': '/dict_of_str/non_exist', 'value': 'val2'}] url = '/sample_artifact/%s' % art1['id'] self.patch(url=url, data=data, status=400) # set value data = [{'op': 'add', 'path': '/dict_of_str', 'value': {'key1': 'val1', 'key2': 'val2'}}] url = '/sample_artifact/%s' % art1['id'] result = self.patch(url=url, data=data) self.assertEqual({'key1': 'val1', 'key2': 'val2'}, result['dict_of_str']) # replace value data = [{'op': 'add', 'path': '/dict_of_str', 'value': {'key11': 'val1', 'key22': 'val2'}}] url = '/sample_artifact/%s' % art1['id'] result = self.patch(url=url, data=data) self.assertEqual({'key11': 'val1', 'key22': 'val2'}, result['dict_of_str']) # remove value data = [{'op': 'add', 'path': '/dict_of_str', 'value': {}}] url = '/sample_artifact/%s' % art1['id'] result = self.patch(url=url, data=data) self.assertEqual({}, result['dict_of_str']) # set an element of the wrong non-conversion type value data = [{'op': 'add', 'path': '/dict_of_str/wrong_type', 'value': [1, 2, 4]}] url = '/sample_artifact/%s' % art1['id'] self.patch(url=url, data=data, status=400) # set an element of the wrong conversion type value data = [{'op': 'add', 'path': '/dict_of_str/wrong_type', 'value': 1}] url = '/sample_artifact/%s' % art1['id'] result = self.patch(url=url, data=data) self.assertEqual('1', result['dict_of_str']['wrong_type']) # add element with None value data = [{'op': 'add', 'path': '/dict_of_blob/nane_value', 'value': None}] url = '/sample_artifact/%s' % art1['id'] self.patch(url=url, data=data, status=400) def test_update_field_list(self): art1 = self.create_artifact(data={"name": "art1"}) # create artifact without list prop data = {'name': 'art_without_list'} result = self.post(url='/sample_artifact', status=201, data=data) self.assertEqual([], result['list_of_str']) # create artifact with list prop data = {'name': 'art_with_list', 'list_of_str': ['a', 'b']} result = self.post(url='/sample_artifact', status=201, data=data) self.assertEqual(['a', 'b'], result['list_of_str']) # create artifact with empty list data = {'name': 'art_with_empty_list', 'list_of_str': []} result = self.post(url='/sample_artifact', status=201, data=data) self.assertEqual([], result['list_of_str']) # add value data = [{'op': 'add', 'path': '/list_of_str', 'value': ['b', 'd']}] url = '/sample_artifact/%s' % art1['id'] result = self.patch(url=url, data=data) self.assertEqual(['b', 'd'], result['list_of_str']) # replace value data = [{'op': 'replace', 'path': '/list_of_str', 'value': ['aa', 'dd']}] url = '/sample_artifact/%s' % art1['id'] result = self.patch(url=url, data=data) self.assertEqual(['aa', 'dd'], result['list_of_str']) # remove value data = [{'op': 'add', 'path': '/list_of_str', 'value': []}] url = '/sample_artifact/%s' % art1['id'] result = self.patch(url=url, data=data) self.assertEqual([], result['list_of_str']) # add new element on empty list self.assertEqual([], art1['list_of_str']) data = [{'op': 'add', 'path': '/list_of_str/-', 'value': 'val1'}] url = '/sample_artifact/%s' % art1['id'] result = self.patch(url=url, data=data) self.assertEqual(['val1'], result['list_of_str']) # add new element on index data = [{'op': 'add', 'path': '/list_of_str/0', 'value': 'val2'}] url = '/sample_artifact/%s' % art1['id'] result = self.patch(url=url, data=data) self.assertEqual(['val2', 'val1'], result['list_of_str']) # add new element on next index data = [{'op': 'add', 'path': '/list_of_str/1', 'value': 'val3'}] url = '/sample_artifact/%s' % art1['id'] result = self.patch(url=url, data=data) self.assertEqual(['val2', 'val3', 'val1'], result['list_of_str']) # add new element on default index data = [{'op': 'add', 'path': '/list_of_str/-', 'value': 'val4'}] url = '/sample_artifact/%s' % art1['id'] result = self.patch(url=url, data=data) self.assertEqual(['val2', 'val3', 'val1', 'val4'], result['list_of_str']) # add new element on non-existent index data = [{'op': 'add', 'path': '/list_of_str/10', 'value': 'val2'}] url = '/sample_artifact/%s' % art1['id'] self.patch(url=url, data=data, status=400) # replace element on index data = [{'op': 'replace', 'path': '/list_of_str/1', 'value': 'val_new'}] url = '/sample_artifact/%s' % art1['id'] result = self.patch(url=url, data=data) self.assertEqual(['val2', 'val_new', 'val1', 'val4'], result['list_of_str']) # replace element on default index data = [{'op': 'replace', 'path': '/list_of_str/-', 'value': 'val-'}] url = '/sample_artifact/%s' % art1['id'] self.patch(url=url, data=data, status=400) # replace new element on non-existent index data = [{'op': 'replace', 'path': '/list_of_str/99', 'value': 'val_new'}] url = '/sample_artifact/%s' % art1['id'] self.patch(url=url, data=data, status=400) # remove element on index data = [{'op': 'remove', 'path': '/list_of_str/1', 'value': 'val2'}] url = '/sample_artifact/%s' % art1['id'] result = self.patch(url=url, data=data) self.assertEqual(['val2', 'val1', 'val4'], result['list_of_str']) # remove element on default index data = [{'op': 'remove', 'path': '/list_of_str/-', 'value': 'val3'}] url = '/sample_artifact/%s' % art1['id'] self.patch(url=url, data=data, status=400) # remove new element on non-existent index data = [{'op': 'remove', 'path': '/list_of_str/999', 'value': 'val2'}] url = '/sample_artifact/%s' % art1['id'] self.patch(url=url, data=data, status=400) def test_update_remove_properties(self): data = { "name": "test_big_create", "version": "1.0.0", "bool1": True, "int1": 2323, "float1": 0.1, "str1": "test", "list_of_str": ["test1", "test2"], "list_of_int": [0, 1, 2], "dict_of_str": {"test": "test"}, "dict_of_int": {"test": 0}, "string_mutable": "test", "string_required": "test", } art1 = self.create_artifact(data=data) # remove the whole list of strings data = [{'op': 'replace', 'path': '/list_of_str', 'value': None}] url = '/sample_artifact/%s' % art1['id'] result = self.patch(url=url, data=data) self.assertEqual([], result['list_of_str']) # remove the whole list of ints data = [{'op': 'replace', 'path': '/list_of_int', 'value': None}] url = '/sample_artifact/%s' % art1['id'] result = self.patch(url=url, data=data) self.assertEqual([], result['list_of_int']) # remove the whole dict of strings data = [{'op': 'replace', 'path': '/dict_of_str', 'value': None}] url = '/sample_artifact/%s' % art1['id'] result = self.patch(url=url, data=data) self.assertEqual({}, result['dict_of_str']) # remove the whole dict of ints data = [{'op': 'replace', 'path': '/dict_of_int', 'value': None}] url = '/sample_artifact/%s' % art1['id'] result = self.patch(url=url, data=data) self.assertEqual({}, result['dict_of_int']) # remove bool1 data = [{'op': 'replace', 'path': '/bool1', 'value': None}] url = '/sample_artifact/%s' % art1['id'] result = self.patch(url=url, data=data) self.assertEqual(False, result['bool1']) # remove int1 data = [{'op': 'replace', 'path': '/int1', 'value': None}] url = '/sample_artifact/%s' % art1['id'] result = self.patch(url=url, data=data) self.assertIsNone(result['int1']) # remove float1 data = [{'op': 'replace', 'path': '/float1', 'value': None}] url = '/sample_artifact/%s' % art1['id'] result = self.patch(url=url, data=data) self.assertIsNone(result['float1']) # cannot remove id, because it's a system field data = [{'op': 'replace', 'path': '/id', 'value': None}] url = '/sample_artifact/%s' % art1['id'] self.patch(url=url, data=data, status=403) # cannot remove name data = [{'op': 'replace', 'path': '/name', 'value': None}] url = '/sample_artifact/%s' % art1['id'] self.patch(url=url, data=data, status=400) headers = {'Content-Type': 'application/octet-stream'} self.put(url=url + '/blob', data="d" * 1000, headers=headers) # cannot remove blob data = [{'op': 'replace', 'path': '/blob', 'value': None}] url = '/sample_artifact/%s' % art1['id'] self.patch(url=url, data=data, status=400) def test_update_malformed_json_patch(self): data = {'name': 'ttt'} art1 = self.create_artifact(data=data) data = [{'op': 'replace', 'path': None, 'value': 'aaa'}] url = '/sample_artifact/%s' % art1['id'] self.patch(url=url, data=data, status=400) data = [{'op': 'replace', 'path': '/', 'value': 'aaa'}] url = '/sample_artifact/%s' % art1['id'] self.patch(url=url, data=data, status=400) data = [{'op': 'replace', 'path': '//', 'value': 'aaa'}] url = '/sample_artifact/%s' % art1['id'] self.patch(url=url, data=data, status=400) data = [{'op': 'replace', 'path': 'name/', 'value': 'aaa'}] url = '/sample_artifact/%s' % art1['id'] self.patch(url=url, data=data, status=400) data = [{'op': 'replace', 'path': '*/*', 'value': 'aaa'}] url = '/sample_artifact/%s' % art1['id'] self.patch(url=url, data=data, status=400) data = [{'op': 'add', 'path': None, 'value': 'aaa'}] url = '/sample_artifact/%s' % art1['id'] self.patch(url=url, data=data, status=400) data = [{'op': 'add', 'path': '/', 'value': 'aaa'}] url = '/sample_artifact/%s' % art1['id'] self.patch(url=url, data=data, status=400) data = [{'op': 'add', 'path': '//', 'value': 'aaa'}] url = '/sample_artifact/%s' % art1['id'] self.patch(url=url, data=data, status=400) data = [{'op': 'add', 'path': 'name/', 'value': 'aaa'}] url = '/sample_artifact/%s' % art1['id'] self.patch(url=url, data=data, status=400) data = [{'op': 'add', 'path': '*/*', 'value': 'aaa'}] url = '/sample_artifact/%s' % art1['id'] self.patch(url=url, data=data, status=400) data = [{'op': 'add', 'path': '/name'}] url = '/sample_artifact/%s' % art1['id'] self.patch(url=url, data=data, status=400) data = [{'op': 'replace', 'path': None}] url = '/sample_artifact/%s' % art1['id'] self.patch(url=url, data=data, status=400) data = [{'op': 'replace', 'path': '/'}] url = '/sample_artifact/%s' % art1['id'] self.patch(url=url, data=data, status=400) data = [{'op': 'replace', 'path': '//'}] url = '/sample_artifact/%s' % art1['id'] self.patch(url=url, data=data, status=400) data = [{'op': 'replace', 'path': 'name/'}] url = '/sample_artifact/%s' % art1['id'] self.patch(url=url, data=data, status=400) data = [{'op': 'replace', 'path': '*/*'}] url = '/sample_artifact/%s' % art1['id'] self.patch(url=url, data=data, status=400) data = [{'op': 'no-op', 'path': '/name', 'value': 'aaa'}] url = '/sample_artifact/%s' % art1['id'] self.patch(url=url, data=data, status=400) class TestLinks(base.TestArtifact): def test_manage_links(self): some_af = self.create_artifact(data={"name": "test_af"}) dep_af = self.create_artifact(data={"name": "test_dep_af"}) dep_url = "/artifacts/sample_artifact/%s" % some_af['id'] # set valid link patch = [{"op": "replace", "path": "/link1", "value": dep_url}] url = '/sample_artifact/%s' % dep_af['id'] af = self.patch(url=url, data=patch) self.assertEqual(af['link1'], dep_url) # remove link from artifact patch = [{"op": "replace", "path": "/link1", "value": None}] af = self.patch(url=url, data=patch) self.assertIsNone(af['link1']) # try to set invalid link patch = [{"op": "replace", "path": "/link1", "value": "Invalid"}] self.patch(url=url, data=patch, status=400) # try to set link to non-existing artifact non_exiting_url = "/artifacts/sample_artifact/%s" % uuid.uuid4() patch = [{"op": "replace", "path": "/link1", "value": non_exiting_url}] self.patch(url=url, data=patch, status=400) def test_manage_dict_of_links(self): some_af = self.create_artifact(data={"name": "test_af"}) dep_af = self.create_artifact(data={"name": "test_dep_af"}) dep_url = "/artifacts/sample_artifact/%s" % some_af['id'] # set valid link patch = [{"op": "add", "path": "/dict_of_links/link1", "value": dep_url}] url = '/sample_artifact/%s' % dep_af['id'] af = self.patch(url=url, data=patch) self.assertEqual(af['dict_of_links']['link1'], dep_url) # remove link from artifact patch = [{"op": "remove", "path": "/dict_of_links/link1"}] af = self.patch(url=url, data=patch) self.assertNotIn('link1', af['dict_of_links']) # try to set invalid link patch = [{"op": "replace", "path": "/dict_of_links/link1", "value": "Invalid"}] self.patch(url=url, data=patch, status=400) # try to set link to non-existing artifact non_exiting_url = "/artifacts/sample_artifact/%s" % uuid.uuid4() patch = [{"op": "replace", "path": "/dict_of_links/link1", "value": non_exiting_url}] self.patch(url=url, data=patch, status=400) def test_manage_list_of_links(self): some_af = self.create_artifact(data={"name": "test_af"}) dep_af = self.create_artifact(data={"name": "test_dep_af"}) dep_url = "/artifacts/sample_artifact/%s" % some_af['id'] # set valid link patch = [{"op": "add", "path": "/list_of_links/-", "value": dep_url}] url = '/sample_artifact/%s' % dep_af['id'] af = self.patch(url=url, data=patch) self.assertEqual(af['list_of_links'][0], dep_url) # remove link from artifact patch = [{"op": "remove", "path": "/list_of_links/0"}] af = self.patch(url=url, data=patch) self.assertEqual(0, len(af['list_of_links'])) # try to set invalid link patch = [{"op": "add", "path": "/list_of_links/-", "value": "Invalid"}] self.patch(url=url, data=patch, status=400) # try to set link to non-existing artifact non_exiting_url = "/artifacts/sample_artifact/%s" % uuid.uuid4() patch = [{"op": "add", "path": "/list_of_links/-", "value": non_exiting_url}] self.patch(url=url, data=patch, status=400) glare-0.5.0/glare/tests/functional/test_schemas.py000066400000000000000000001257061317401036700222550ustar00rootroot00000000000000# Copyright 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import jsonschema from glare.tests.functional import base fixture_base_props = { u'activated_at': { u'description': u'Datetime when artifact has became active.', u'filter_ops': [u'lt', u'gt'], u'format': u'date-time', u'glareType': u'DateTime', u'readOnly': True, u'required_on_activate': False, u'sortable': True, u'type': [u'string', u'null']}, u'created_at': { u'description': u'Datetime when artifact has been created.', u'filter_ops': [u'lt', u'gt'], u'format': u'date-time', u'glareType': u'DateTime', u'readOnly': True, u'sortable': True, u'type': u'string'}, u'description': {u'default': u'', u'description': u'Artifact description.', u'filter_ops': [], u'glareType': u'String', u'maxLength': 4096, u'mutable': True, u'required_on_activate': False, u'type': [u'string', u'null']}, u'id': {u'description': u'Artifact UUID.', u'filter_ops': [u'eq', u'neq', u'in'], u'glareType': u'String', u'maxLength': 255, u'pattern': u'^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}' u'-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$', u'readOnly': True, u'sortable': True, u'type': u'string'}, u'metadata': {u'additionalProperties': {u'maxLength': 255, u'minLength': 1, u'type': u'string'}, u'default': {}, u'description': u'Key-value dict with useful information ' u'about an artifact.', u'filter_ops': [u'eq', u'neq', u'in'], u'glareType': u'StringDict', u'maxProperties': 255, u'required_on_activate': False, u'type': [u'object', u'null']}, u'name': {u'description': u'Artifact Name.', u'filter_ops': [u'eq', u'neq', u'in'], u'glareType': u'String', u'maxLength': 255, u'minLength': 1, u'required_on_activate': False, u'sortable': True, u'type': u'string'}, u'owner': {u'description': u'ID of user/tenant who uploaded artifact.', u'filter_ops': [u'eq', u'neq', u'in'], u'glareType': u'String', u'maxLength': 255, u'readOnly': True, u'required_on_activate': False, u'sortable': True, u'type': u'string'}, u'status': {u'default': u'drafted', u'description': u'Artifact status.', u'enum': [u'drafted', u'active', u'deactivated', u'deleted'], u'filter_ops': [u'eq', u'neq', u'in'], u'glareType': u'String', u'mutable': True, u'sortable': True, u'type': u'string'}, u'tags': {u'default': [], u'description': u'List of tags added to Artifact.', u'filter_ops': [], u'glareType': u'StringList', u'items': {u'maxLength': 255, u'minLength': 1, u'pattern': u'^[^,/]+$', u'type': u'string'}, u'maxItems': 255, u'mutable': True, u'required_on_activate': False, u'type': [u'array', u'null'], u'uniqueItems': True}, u'updated_at': { u'description': u'Datetime when artifact has been updated last time.', u'filter_ops': [u'lt', u'gt'], u'format': u'date-time', u'glareType': u'DateTime', u'mutable': True, u'readOnly': True, u'sortable': True, u'type': u'string'}, u'version': {u'default': u'0.0.0', u'description': u'Artifact version(semver).', u'filter_ops': [u'eq', u'neq', u'in', u'gt', u'gte', u'lt', u'lte'], u'glareType': u'String', u'pattern': u'/^([0-9]+)\\.([0-9]+)\\.([0-9]+)(?:-' u'([0-9A-Za-z-]+(?:\\.[0-9A-Za-z-]+)*))?' u'(?:\\+[0-9A-Za-z-]+)?$/', u'required_on_activate': False, u'sortable': True, u'type': u'string'}, u'visibility': {u'default': u'private', u'description': u'Artifact visibility that defines if ' u'artifact can be available to other ' u'users.', u'enum': [u'private', u'public'], u'filter_ops': [u'eq', u'neq', u'in'], u'glareType': u'String', u'mutable': True, u'sortable': True, u'type': u'string'} } def generate_type_props(props): props.update(fixture_base_props) return props fixtures = { u'sample_artifact': { u'name': u'sample_artifact', u'properties': generate_type_props({ u'blob': {u'additionalProperties': False, u'description': u'I am Blob', u'filter_ops': [], u'glareType': u'Blob', u'mutable': True, u'properties': { u'md5': {u'type': [u'string', u'null']}, u'sha1': {u'type': [u'string', u'null']}, u'sha256': {u'type': [u'string', u'null']}, u'content_type': { u'type': u'string'}, u'external': { u'type': u'boolean'}, u'size': {u'type': [ u'number', u'null']}, u'status': { u'enum': [ u'saving', u'active'], u'type': u'string'}}, u'required': [u'size', u'md5', u'sha1', u'sha256', u'external', u'status', u'content_type'], u'required_on_activate': False, u'type': [u'object', u'null']}, u'bool1': {u'default': False, u'filter_ops': [u'eq'], u'glareType': u'Boolean', u'required_on_activate': False, u'type': [u'boolean', u'null']}, u'bool2': {u'default': False, u'filter_ops': [u'eq'], u'glareType': u'Boolean', u'required_on_activate': False, u'type': [u'boolean', u'null']}, u'link1': {u'filter_ops': [u'eq', u'neq'], u'glareType': u'Link', u'required_on_activate': False, u'type': [u'string', u'null']}, u'link2': {u'filter_ops': [u'eq', u'neq'], u'glareType': u'Link', u'required_on_activate': False, u'type': [u'string', u'null']}, u'dict_of_blobs': { u'additionalProperties': { u'additionalProperties': False, u'properties': { u'md5': {u'type': [u'string', u'null']}, u'sha1': {u'type': [u'string', u'null']}, u'sha256': {u'type': [u'string', u'null']}, u'content_type': { u'type': u'string'}, u'external': { u'type': u'boolean'}, u'size': { u'type': [ u'number', u'null']}, u'status': { u'enum': [ u'saving', u'active'], u'type': u'string'}}, u'required': [u'size', u'md5', u'sha1', u'sha256', u'external', u'status', u'content_type'], u'type': [u'object', u'null']}, u'default': {}, u'filter_ops': [], u'glareType': u'BlobDict', u'maxProperties': 255, u'required_on_activate': False, u'type': [u'object', u'null']}, u'dict_of_int': { u'additionalProperties': { u'type': u'integer'}, u'default': {}, u'filter_ops': [u'eq', u'in'], u'glareType': u'IntegerDict', u'maxProperties': 255, u'required_on_activate': False, u'type': [u'object', u'null']}, u'dict_of_links': {u'additionalProperties': {u'type': u'string'}, u'default': {}, u'filter_ops': [u'eq'], u'glareType': u'LinkDict', u'maxProperties': 255, u'mutable': True, u'required_on_activate': False, u'type': [u'object', u'null']}, u'list_of_links': {u'default': [], u'filter_ops': [u'eq'], u'glareType': u'LinkList', u'items': {u'type': u'string'}, u'maxItems': 255, u'mutable': True, u'required_on_activate': False, u'type': [u'array', u'null']}, u'dict_of_str': { u'additionalProperties': {u'maxLength': 255, u'type': u'string'}, u'default': {}, u'filter_ops': [u'eq', u'in'], u'glareType': u'StringDict', u'maxProperties': 255, u'required_on_activate': False, u'type': [u'object', u'null']}, u'dict_validators': { u'additionalProperties': False, u'filter_ops': [], u'glareType': u'StringDict', u'maxProperties': 3, u'properties': {u'abc': {u'maxLength': 255, u'type': [u'string', u'null']}, u'def': {u'maxLength': 255, u'type': [u'string', u'null']}, u'ghi': {u'maxLength': 255, u'type': [u'string', u'null']}, u'jkl': {u'maxLength': 255, u'type': [u'string', u'null']}}, u'required_on_activate': False, u'type': [u'object', u'null']}, u'float1': {u'filter_ops': [u'eq', u'neq', u'in', u'gt', u'gte', u'lt', u'lte'], u'glareType': u'Float', u'required_on_activate': False, u'sortable': True, u'type': [u'number', u'null']}, u'float2': {u'filter_ops': [u'eq', u'neq', u'in', u'gt', u'gte', u'lt', u'lte'], u'glareType': u'Float', u'required_on_activate': False, u'sortable': True, u'type': [u'number', u'null']}, u'int1': {u'filter_ops': [u'eq', u'neq', u'in', u'gt', u'gte', u'lt', u'lte'], u'glareType': u'Integer', u'required_on_activate': False, u'sortable': True, u'type': [u'integer', u'null']}, u'int2': {u'filter_ops': [u'eq', u'neq', u'in', u'gt', u'gte', u'lt', u'lte'], u'glareType': u'Integer', u'required_on_activate': False, u'sortable': True, u'type': [u'integer', u'null']}, u'int_validators': {u'filter_ops': [u'eq', u'neq', u'in', u'gt', u'gte', u'lt', u'lte'], u'glareType': u'Integer', u'maximum': 20, u'minimum': 10, u'required_on_activate': False, u'type': [u'integer', u'null']}, u'list_of_int': {u'default': [], u'filter_ops': [u'eq', u'in'], u'glareType': u'IntegerList', u'items': { u'type': u'integer'}, u'maxItems': 255, u'required_on_activate': False, u'type': [u'array', u'null']}, u'list_of_str': {u'default': [], u'filter_ops': [u'eq', u'in'], u'glareType': u'StringList', u'items': {u'maxLength': 255, u'type': u'string'}, u'maxItems': 255, u'required_on_activate': False, u'type': [u'array', u'null']}, u'list_validators': {u'default': [], u'filter_ops': [], u'glareType': u'StringList', u'items': {u'maxLength': 255, u'type': u'string'}, u'maxItems': 3, u'required_on_activate': False, u'type': [u'array', u'null'], u'uniqueItems': True}, u'small_blob': {u'additionalProperties': False, u'filter_ops': [], u'glareType': u'Blob', u'mutable': True, u'properties': { u'md5': {u'type': [u'string', u'null']}, u'sha1': {u'type': [u'string', u'null']}, u'sha256': {u'type': [u'string', u'null']}, u'content_type': { u'type': u'string'}, u'external': { u'type': u'boolean'}, u'size': { u'type': [ u'number', u'null']}, u'status': { u'enum': [ u'saving', u'active'], u'type': u'string'}}, u'required': [u'size', u'md5', u'sha1', u'sha256', u'external', u'status', u'content_type'], u'required_on_activate': False, u'type': [u'object', u'null']}, u'str1': {u'filter_ops': [u'eq', u'neq', u'in'], u'glareType': u'String', u'maxLength': 255, u'required_on_activate': False, u'sortable': True, u'type': [u'string', u'null']}, u'string_mutable': {u'filter_ops': [u'eq', u'neq', u'in'], u'glareType': u'String', u'maxLength': 255, u'mutable': True, u'required_on_activate': False, u'type': [u'string', u'null']}, u'string_regex': {u'filter_ops': [u'eq', u'neq', u'in'], u'glareType': u'String', u'maxLength': 255, u'pattern': u'^([0-9a-fA-F]){8}$', u'required_on_activate': False, u'type': [u'string', u'null']}, u'string_required': { u'filter_ops': [u'eq', u'neq', u'in'], u'glareType': u'String', u'maxLength': 255, u'type': [u'string', u'null']}, u'string_validators': { u'enum': [u'aa', u'bb', u'ccccccccccc'], u'filter_ops': [u'eq', u'neq', u'in'], u'glareType': u'String', u'maxLength': 10, u'required_on_activate': False, u'type': [u'string', u'null']}, u'system_attribute': {u'default': u'default', u'filter_ops': [u'eq', u'neq', u'in'], u'glareType': u'String', u'maxLength': 255, u'readOnly': True, u'sortable': True, u'type': [u'string', u'null']} }), u'required': [u'name'], u'title': u'Artifact type sample_artifact of version 1.0', u'version': u'1.0', u'type': u'object'}, u'tosca_templates': { u'name': u'tosca_templates', u'properties': generate_type_props({ u'template': { u'additionalProperties': False, u'description': u'TOSCA template body.', u'filter_ops': [], u'glareType': u'Blob', u'properties': { u'md5': {u'type': [u'string', u'null']}, u'sha1': {u'type': [u'string', u'null']}, u'sha256': {u'type': [u'string', u'null']}, u'content_type': { u'type': u'string'}, u'external': {u'type': u'boolean'}, u'size': {u'type': [u'number', u'null']}, u'status': {u'enum': [u'saving', u'active'], u'type': u'string'}}, u'required': [u'size', u'md5', u'sha1', u'sha256', u'external', u'status', u'content_type'], u'type': [u'object', u'null']}, u'template_format': {u'description': u'TOSCA template format.', u'filter_ops': [u'eq', u'neq', u'in'], u'glareType': u'String', u'maxLength': 255, u'type': [u'string', u'null']}, }), u'required': [u'name'], u'version': u'1.0', u'title': u'Artifact type tosca_templates of version 1.0', u'type': u'object'}, u'murano_packages': { u'name': u'murano_packages', u'properties': generate_type_props({ u'categories': { u'default': [], u'description': u'List of categories specified for ' u'the package.', u'filter_ops': [u'eq', u'neq', u'in'], u'glareType': u'StringList', u'items': {u'maxLength': 255, u'type': u'string'}, u'maxItems': 255, u'mutable': True, u'type': [u'array', u'null']}, u'class_definitions': { u'default': [], u'description': u'List of class definitions ' u'in the package.', u'filter_ops': [u'eq', u'neq', u'in'], u'glareType': u'StringList', u'items': {u'maxLength': 255, u'type': u'string'}, u'maxItems': 255, u'type': [u'array', u'null'], u'uniqueItems': True}, u'dependencies': { u'default': [], u'description': u'List of package dependencies for ' u'this package.', u'filter_ops': [u'eq', u'neq'], u'glareType': u'LinkList', u'items': {u'type': u'string'}, u'maxItems': 255, u'required_on_activate': False, u'type': [u'array', u'null']}, u'display_name': { u'description': u'Package name in human-readable format.', u'filter_ops': [u'eq', u'neq', u'in'], u'glareType': u'String', u'maxLength': 255, u'mutable': True, u'type': [u'string', u'null']}, u'inherits': { u'additionalProperties': {u'maxLength': 255, u'type': u'string'}, u'default': {}, u'filter_ops': [u'eq', u'neq', u'in'], u'glareType': u'StringDict', u'maxProperties': 255, u'type': [u'object', u'null']}, u'keywords': {u'default': [], u'filter_ops': [u'eq', u'neq', u'in'], u'glareType': u'StringList', u'items': {u'maxLength': 255, u'type': u'string'}, u'maxItems': 255, u'mutable': True, u'type': [u'array', u'null']}, u'package': { u'additionalProperties': False, u'description': u'Murano Package binary.', u'filter_ops': [], u'glareType': u'Blob', u'properties': {u'md5': {u'type': [u'string', u'null']}, u'sha1': {u'type': [u'string', u'null']}, u'sha256': {u'type': [u'string', u'null']}, u'content_type': {u'type': u'string'}, u'external': {u'type': u'boolean'}, u'size': {u'type': [u'number', u'null']}, u'status': {u'enum': [u'saving', u'active'], u'type': u'string'}}, u'required': [u'size', u'md5', u'sha1', u'sha256', u'external', u'status', u'content_type'], u'required_on_activate': False, u'type': [u'object', u'null']}, u'type': { u'default': u'Application', u'description': u'Package type.', u'enum': [u'Application', u'Library', ], u'filter_ops': [u'eq', u'neq', u'in'], u'glareType': u'String', u'type': [u'string', u'null']} }), u'required': [u'name'], u'version': u'1.0', u'title': u'Artifact type murano_packages of version 1.0', u'type': u'object'}, u'images': { u'name': u'images', u'properties': generate_type_props({ u'architecture': { u'description': u'Operating system architecture as specified ' u'in http://docs.openstack.org/trunk/' u'openstack-compute/admin/content/adding-' u'images.html', u'filter_ops': [u'eq', u'neq', u'in'], u'glareType': u'String', u'maxLength': 255, u'required_on_activate': False, u'type': [u'string', u'null']}, u'container_format': {u'description': u'Image container format.', u'enum': [u'ami', u'ari', u'aki', u'bare', u'ovf', u'ova', u'docker', ], u'filter_ops': [u'eq', u'neq', u'in'], u'glareType': u'String', u'type': [u'string', u'null']}, u'disk_format': {u'description': u'Image disk format.', u'enum': [u'ami', u'ari', u'aki', u'vhd', u'vhdx', u'vmdk', u'raw', u'qcow2', u'vdi', u'iso', ], u'filter_ops': [u'eq', u'neq', u'in'], u'glareType': u'String', u'type': [u'string', u'null']}, u'image': {u'additionalProperties': False, u'description': u'Image binary.', u'filter_ops': [], u'glareType': u'Blob', u'properties': { u'md5': {u'type': [u'string', u'null']}, u'sha1': {u'type': [u'string', u'null']}, u'sha256': {u'type': [u'string', u'null']}, u'content_type': {u'type': u'string'}, u'external': {u'type': u'boolean'}, u'size': {u'type': [u'number', u'null']}, u'status': {u'enum': [u'saving', u'active'], u'type': u'string'}}, u'required': [u'size', u'md5', u'sha1', u'sha256', u'external', u'status', u'content_type'], u'required_on_activate': False, u'type': [u'object', u'null']}, u'instance_uuid': { u'description': u'Metadata which can be used to record which ' u'instance this image is associated with. ' u'(Informational only, does not create an ' u'instance snapshot.)', u'filter_ops': [u'eq', u'neq', u'in'], u'glareType': u'String', u'maxLength': 255, u'required_on_activate': False, u'type': [u'string', u'null']}, u'kernel_id': { u'description': u'ID of image stored in Glare that should be ' u'used as the kernel when booting an ' u'AMI-style image.', u'filter_ops': [u'eq', u'neq', u'in'], u'glareType': u'String', u'maxLength': 255, u'pattern': u'^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-' u'([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-' u'([0-9a-fA-F]){12}$', u'required_on_activate': False, u'type': [u'string', u'null']}, u'min_disk': { u'description': u'Minimal disk space required to boot image.', u'filter_ops': [u'eq', u'neq', u'in', u'gt', u'gte', u'lt', u'lte'], u'glareType': u'Integer', u'minimum': 0, u'required_on_activate': False, u'type': [u'integer', u'null']}, u'min_ram': { u'description': u'Minimal RAM required to boot image.', u'filter_ops': [u'eq', u'neq', u'in', u'gt', u'gte', u'lt', u'lte'], u'glareType': u'Integer', u'minimum': 0, u'required_on_activate': False, u'type': [u'integer', u'null']}, u'os_distro': { u'description': u'Common name of operating system distribution' u' as specified in http://docs.openstack.org/' u'trunk/openstack-compute/admin/content/' u'adding-images.html', u'filter_ops': [u'eq', u'neq', u'in'], u'glareType': u'String', u'maxLength': 255, u'required_on_activate': False, u'type': [u'string', u'null']}, u'os_version': { u'description': u'Operating system version as specified by the' u' distributor', u'filter_ops': [u'eq', u'neq', u'in'], u'glareType': u'String', u'maxLength': 255, u'required_on_activate': False, u'type': [u'string', u'null']}, u'ramdisk_id': { u'description': u'ID of image stored in Glare that should be ' u'used as the ramdisk when booting an ' u'AMI-style image.', u'filter_ops': [u'eq', u'neq', u'in'], u'glareType': u'String', u'maxLength': 255, u'pattern': u'^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F])' u'{4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$', u'required_on_activate': False, u'type': [u'string', u'null']}}), u'required': [u'name'], u'version': u'1.0', u'title': u'Artifact type images of version 1.0', u'type': u'object'}, u'heat_templates': { u'name': u'heat_templates', u'properties': generate_type_props({ u'default_envs': { u'additionalProperties': {u'maxLength': 255, u'type': u'string'}, u'default': {}, u'description': u'Default environments that can ' u'be applied to the template if no ' u'environments specified by user.', u'filter_ops': [u'eq', u'neq', u'in'], u'glareType': u'StringDict', u'maxProperties': 255, u'mutable': True, u'type': [u'object', u'null']}, u'environments': { u'additionalProperties': {u'type': u'string'}, u'default': {}, u'description': u'References to Heat Environments ' u'that can be used with current ' u'template.', u'filter_ops': [u'eq', u'neq'], u'glareType': u'LinkDict', u'maxProperties': 255, u'mutable': True, u'type': [u'object', u'null']}, u'nested_templates': { u'additionalProperties': {u'additionalProperties': False, u'properties': { u'md5': {u'type': [u'string', u'null']}, u'sha1': {u'type': [u'string', u'null']}, u'sha256': {u'type': [u'string', u'null']}, u'content_type': { u'type': u'string'}, u'external': {u'type': u'boolean'}, u'size': {u'type': [u'number', u'null']}, u'status': {u'enum': [u'saving', u'active'], u'type': u'string'}}, u'required': [u'size', u'md5', u'sha1', u'sha256', u'external', u'status', u'content_type'], u'type': [u'object', u'null']}, u'default': {}, u'description': u'Dict of nested templates where key is the ' u'name of template and value is nested ' u'template body.', u'filter_ops': [], u'glareType': u'BlobDict', u'maxProperties': 255, u'type': [u'object', u'null']}, u'template': { u'additionalProperties': False, u'description': u'Heat template body.', u'filter_ops': [], u'glareType': u'Blob', u'properties': { u'md5': {u'type': [u'string', u'null']}, u'sha1': {u'type': [u'string', u'null']}, u'sha256': {u'type': [u'string', u'null']}, u'content_type': { u'type': u'string'}, u'external': {u'type': u'boolean'}, u'size': {u'type': [u'number', u'null']}, u'status': {u'enum': [u'saving', u'active'], u'type': u'string'}}, u'required': [u'size', u'md5', u'sha1', u'sha256', u'external', u'status', u'content_type'], u'type': [u'object', u'null']}, }), u'version': u'1.0', u'required': [u'name'], u'title': u'Artifact type heat_templates of version 1.0', u'type': u'object'}, u'heat_environments': { u'name': u'heat_environments', u'properties': generate_type_props({ u'environment': { u'additionalProperties': False, u'description': u'Heat Environment text body.', u'filter_ops': [], u'glareType': u'Blob', u'properties': {u'md5': {u'type': [u'string', u'null']}, u'sha1': {u'type': [u'string', u'null']}, u'sha256': {u'type': [u'string', u'null']}, u'content_type': {u'type': u'string'}, u'external': {u'type': u'boolean'}, u'size': {u'type': [u'number', u'null']}, u'status': {u'enum': [u'saving', u'active'], u'type': u'string'}}, u'required': [u'size', u'md5', u'sha1', u'sha256', u'external', u'status', u'content_type'], u'type': [u'object', u'null']}, }), u'required': [u'name'], u'version': u'1.0', u'title': u'Artifact type heat_environments of version 1.0', u'type': u'object'}, u'all': { u'name': u'all', u'properties': generate_type_props({ u'type_name': {u'description': u'Name of artifact type.', u'filter_ops': [u'eq', u'neq', u'in'], u'glareType': u'String', u'maxLength': 255, u'type': [u'string', u'null']}, }), u'required': [u'name'], u'version': u'1.0', u'title': u'Artifact type all of version 1.0', u'type': u'object'} } class TestSchemas(base.TestArtifact): def test_schemas(self): # Get schemas for specific artifact type for at in self.enabled_types: result = self.get(url='/schemas/%s' % at) self.assertEqual(fixtures[at], result['schemas'][at]) # Get list schemas of artifacts result = self.get(url='/schemas') self.assertEqual(fixtures, result['schemas']) # Validation of schemas result = self.get(url='/schemas')['schemas'] for artifact_type, schema in result.items(): jsonschema.Draft4Validator.check_schema(schema) glare-0.5.0/glare/tests/functional/test_scrubber.py000066400000000000000000000114561317401036700224350ustar00rootroot00000000000000# Copyright 2017 - Nokia Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import time from oslo_serialization import jsonutils from six.moves import range from glare.tests import functional from glare.tests.functional import base from glare.tests.utils import execute class TestScrubber(base.TestArtifact): """Test that delayed_delete works and the scrubber deletes""" def setUp(self): functional.FunctionalTest.setUp(self) self.include_scrubber = True self.set_user('user1') self.glare_server.deployment_flavor = 'noauth' self.glare_server.enabled_artifact_types = ','.join( self.enabled_types) self.glare_server.custom_artifact_types_modules = ( 'glare.tests.sample_artifact') def _create_sample_artifact(self): art = self.create_artifact({'name': 'test_art', 'version': '1.0'}) url = '/sample_artifact/%s' % art['id'] headers = {'Content-Type': 'application/octet-stream'} # upload data to blob self.put(url=url + '/small_blob', data='aaaaaa', headers=headers) # upload a couple of blobs to dict_of_blobs self.put(url + '/dict_of_blobs/blob1', data='bbbb', headers=headers) self.put(url + '/dict_of_blobs/blob2', data='cccc', headers=headers) # add external location body = jsonutils.dumps( {'url': self._url(url + '/small_blob'), 'md5': "fake", 'sha1': "fake_sha", "sha256": "fake_sha256"}) headers = {'Content-Type': 'application/vnd+openstack.glare-custom-location+json'} self.put(url=url + '/blob', data=body, status=200, headers=headers) return url def test_scrubber_delayed_delete(self): """ Test that artifacts don't get deleted immediately and that the scrubber scrubs them. """ self.start_servers(delayed_delete=True, daemon=True, **self.__dict__.copy()) url = self._create_sample_artifact() # create another artifact art2 = self.create_artifact({'name': 'test_art', 'version': '2.0'}) # delete sample artifact self.delete(url=url) art = self.get(url) self.assertEqual('deleted', art['status']) self.wait_for_scrub(url) # check that the second artifact wasn't removed art = self.get('/sample_artifact/%s' % art2['id']) self.assertEqual('drafted', art['status']) def test_scrubber_app(self): """ Test that the scrubber script runs successfully when not in daemon mode. """ self.start_servers(delayed_delete=True, **self.__dict__.copy()) url = self._create_sample_artifact() # wait for the scrub time on the artifacts to pass time.sleep(self.scrubber_daemon.scrub_time) # create another artifact art2 = self.create_artifact({'name': 'test_art', 'version': '2.0'}) # delete sample artifact self.delete(url=url) art = self.get(url) self.assertEqual('deleted', art['status']) # scrub artifacts and make sure they are deleted exe_cmd = "%s -m glare.cmd.scrubber" % sys.executable cmd = ("%s --config-file %s" % (exe_cmd, self.scrubber_daemon.conf_file_name)) exitcode, out, err = execute(cmd, raise_error=False) self.assertEqual(0, exitcode) self.wait_for_scrub(url) # check that the second artifact wasn't removed art = self.get('/sample_artifact/%s' % art2['id']) self.assertEqual('drafted', art['status']) def wait_for_scrub(self, url): """ The build servers sometimes take longer than 15 seconds to scrub. Give it up to 5 min, checking every 5 seconds. When/if it flips to deleted, bail immediately. """ wait_for = 300 # seconds check_every = 5 # seconds for _ in range(wait_for // check_every): time.sleep(check_every) try: self.get(url, status=404) return except Exception: pass else: self.fail("Artifact wasn't scrubbed") glare-0.5.0/glare/tests/functional/test_visibility.py000066400000000000000000000173301317401036700230120ustar00rootroot00000000000000# Copyright 2017 - Nokia Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from glare.tests.functional import base class TestVisibility(base.TestArtifact): """Test Glare artifact visibility for various users.""" def test_visibility_basic(self): self.set_user('user1') art1 = self.create_artifact(data={'name': 'art1', 'version': 1.0}) url = '/sample_artifact/%s' % art1['id'] # Artifact is visible by its owner self.get(url=url) # Owner can modify the artifact patch = [{"op": "replace", "path": "/description", "value": "dd"}] self.patch(url=url, data=patch) # Artifact is not visible by another user self.set_user('user2') self.get(url=url, status=404) # Artifact is visible by admin self.set_user('admin') self.get(url=url) # Admin can update the artifact patch = [{"op": "replace", "path": "/string_required", "value": "gg"}] self.patch(url=url, data=patch) # Activate and publish the artifact self.patch(url=url, data=self.make_active) self.patch(url=url, data=self.make_public) # All users can see public artifact self.set_user('user1') self.get(url=url) # Default policy 'update_public' forbids the owner to update public # artifacts patch = [{"op": "replace", "path": "/description", "value": "bb"}] self.patch(url=url, data=patch, status=403) self.set_user('admin') self.get(url=url) # Admin can always update public artifacts patch = [{"op": "replace", "path": "/description", "value": "ss"}] self.patch(url=url, data=patch) self.set_user('user2') self.get(url=url) # Regular user cannot update public artifact patch = [{"op": "replace", "path": "/description", "value": "aa"}] self.patch(url=url, data=patch, status=403) def test_visibility_name_version(self): self.set_user('user1') self.create_artifact(data={'name': 'my_art', 'version': 1.0}) # User can't create another artifact with the same name/version self.create_artifact(data={'name': 'my_art', 'version': 1.0}, status=409) art2 = self.create_artifact(data={'name': 'your_art', 'version': 2.0}) url = '/sample_artifact/%s' % art2['id'] # User can't change name and version if such artifact already exists patch = [ {"op": "replace", "path": "/name", "value": "my_art"}, {"op": "replace", "path": "/version", "value": 1.0} ] self.patch(url=url, data=patch, status=409) # Another user can create an artifact with the same name/version self.set_user("user2") art3 = self.create_artifact(data={'name': 'my_art', 'version': 1.0}) # Now admin sees 2 artifacts with the same name/version self.set_user("admin") url = '/sample_artifact?name=my_art&version=1' self.assertEqual(2, len(self.get(url=url)['sample_artifact'])) # Admin can activate and publish artifact art3 url = '/sample_artifact/%s' % art3['id'] patch = [{"op": "replace", "path": "/string_required", "value": "gg"}] self.patch(url=url, data=patch) self.patch(url=url, data=self.make_active) self.patch(url=url, data=self.make_public) # After that user1 sees 2 artifacts with the same name/version as well self.set_user("user1") url = '/sample_artifact?name=my_art&version=1' self.assertEqual(2, len(self.get(url=url)['sample_artifact'])) # User2 still sees only his public artifact self.set_user("user2") url = '/sample_artifact?name=my_art&version=1' self.assertEqual(1, len(self.get(url=url)['sample_artifact'])) # Admin is able to create a private artifact with the same name/version self.set_user("admin") art4 = self.create_artifact(data={'name': 'my_art', 'version': 1.0}) # And he sees 3 artifacts url = '/sample_artifact?name=my_art&version=1' self.assertEqual(3, len(self.get(url=url)['sample_artifact'])) # But he can't publish his artifact, because this name/version already # exists in public scope url = '/sample_artifact/%s' % art4['id'] patch = [{"op": "replace", "path": "/string_required", "value": "gg"}] self.patch(url=url, data=patch) self.patch(url=url, data=self.make_active) self.patch(url=url, data=self.make_public, status=409) # Admin publishes artifact art2 url = '/sample_artifact/%s' % art2['id'] patch = [{"op": "replace", "path": "/string_required", "value": "gg"}] self.patch(url=url, data=patch) self.patch(url=url, data=self.make_active) self.patch(url=url, data=self.make_public) # User2 can create his own private artifact with the same name/version self.set_user("user2") self.create_artifact(data={'name': 'your_art', 'version': 2.0}) def test_visibility_artifact_types(self): self.set_user('user1') self.create_artifact(data={'name': 'my_art', 'version': 1.0}, type_name='images') self.create_artifact(data={'name': 'my_art', 'version': 1.0}, type_name='heat_templates') self.create_artifact(data={'name': 'my_art', 'version': 1.0}, type_name='heat_environments') def test_visibility_all(self): self.set_user('user1') art1 = self.create_artifact(data={'name': 'my_art', 'version': 1.0}, type_name='images') art2 = self.create_artifact(data={'name': 'my_art', 'version': 1.0}, type_name='heat_templates') # User 1 sees his 2 artifacts url = '/all?name=my_art&version=1' self.assertEqual(2, len(self.get(url=url)['all'])) self.set_user('user2') self.create_artifact(data={'name': 'my_art', 'version': 1.0}, type_name='images') self.create_artifact(data={'name': 'my_art', 'version': 1.0}, type_name='heat_templates') # User 2 sees his 2 artifacts url = '/all?name=my_art&version=1' self.assertEqual(2, len(self.get(url=url)['all'])) # Admin sees 4 artifacts from both users self.set_user("admin") self.assertEqual(4, len(self.get(url=url)['all'])) # After publishing art1 and art2 user 2 can see 4 artifacts as well url = '/sample_artifact/%s' % art1['id'] patch = [{"op": "replace", "path": "/string_required", "value": "gg"}] self.patch(url=url, data=patch) self.patch(url=url, data=self.make_active) self.patch(url=url, data=self.make_public) url = '/sample_artifact/%s' % art2['id'] patch = [{"op": "replace", "path": "/string_required", "value": "gg"}] self.patch(url=url, data=patch) self.patch(url=url, data=self.make_active) self.patch(url=url, data=self.make_public) self.set_user("user2") url = '/all?name=my_art&version=1' self.assertEqual(4, len(self.get(url=url)['all'])) glare-0.5.0/glare/tests/hooks_artifact.py000066400000000000000000000172251317401036700204250ustar00rootroot00000000000000# Copyright 2017 - Nokia Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile from oslo_config import cfg from oslo_log import log as logging from oslo_versionedobjects import fields from glare.objects import base from glare.objects.meta import wrappers Field = wrappers.Field.init Dict = wrappers.DictField.init List = wrappers.ListField.init Blob = wrappers.BlobField.init Folder = wrappers.FolderField.init CONF = cfg.CONF LOG = logging.getLogger(__name__) class HookChecker(base.BaseArtifact): fields = { 'temp_dir': Field( fields.StringField, required_on_activate=False, mutable=True), 'temp_file_path_create': Field( fields.StringField, required_on_activate=False, mutable=True), 'temp_file_path_update': Field( fields.StringField, required_on_activate=False, mutable=True), 'temp_file_path_activate': Field( fields.StringField, required_on_activate=False, mutable=True), 'temp_file_path_reactivate': Field( fields.StringField, required_on_activate=False, mutable=True), 'temp_file_path_deactivate': Field( fields.StringField, required_on_activate=False, mutable=True), 'temp_file_path_publish': Field( fields.StringField, required_on_activate=False, mutable=True), 'blob': Blob( required_on_activate=False, mutable=True) } artifact_type_opts = [ cfg.StrOpt('temp_file_path') ] @classmethod def get_type_name(cls): return "hooks_artifact" @classmethod def pre_create_hook(cls, context, af): # create a temporary file and set the path to artifact field __, af.temp_file_path_create = tempfile.mkstemp(dir=af.temp_dir) with open(af.temp_file_path_create, 'w') as f: f.write('pre_create_hook was called\n') @classmethod def post_create_hook(cls, context, af): with open(af.temp_file_path_create, 'a') as f: f.write('post_create_hook was called\n') @classmethod def pre_update_hook(cls, context, af): # create a temporary file and set the path to artifact field __, af.temp_file_path_update = tempfile.mkstemp(dir=af.temp_dir) with open(af.temp_file_path_update, 'w') as f: f.write('pre_update_hook was called\n') @classmethod def post_update_hook(cls, context, af): with open(af.temp_file_path_update, 'a') as f: f.write('post_update_hook was called\n') @classmethod def pre_activate_hook(cls, context, af): # create a temporary file and set the path to artifact field __, af.temp_file_path_activate = tempfile.mkstemp(dir=af.temp_dir) with open(af.temp_file_path_activate, 'w') as f: f.write('pre_activate_hook was called\n') @classmethod def post_activate_hook(cls, context, af): with open(af.temp_file_path_activate, 'a') as f: f.write('post_activate_hook was called\n') @classmethod def pre_publish_hook(cls, context, af): # create a temporary file and set the path to artifact field __, af.temp_file_path_publish = tempfile.mkstemp(dir=af.temp_dir) with open(af.temp_file_path_publish, 'w') as f: f.write('pre_publish_hook was called\n') @classmethod def post_publish_hook(cls, context, af): with open(af.temp_file_path_publish, 'a') as f: f.write('post_publish_hook was called\n') @classmethod def pre_deactivate_hook(cls, context, af): # create a temporary file and set the path to artifact field __, af.temp_file_path_deactivate = tempfile.mkstemp(dir=af.temp_dir) with open(af.temp_file_path_deactivate, 'w') as f: f.write('pre_deactivate_hook was called\n') @classmethod def post_deactivate_hook(cls, context, af): with open(af.temp_file_path_deactivate, 'a') as f: f.write('post_deactivate_hook was called\n') @classmethod def pre_reactivate_hook(cls, context, af): # create a temporary file and set the path to artifact field __, af.temp_file_path_reactivate = tempfile.mkstemp(dir=af.temp_dir) with open(af.temp_file_path_reactivate, 'w') as f: f.write('pre_reactivate_hook was called\n') @classmethod def post_reactivate_hook(cls, context, af): with open(af.temp_file_path_reactivate, 'a') as f: f.write('post_reactivate_hook was called\n') @classmethod def pre_upload_hook(cls, context, af, field_name, blob_key, fd): # create a temporary file and set the path to artifact field file_path = getattr( CONF, 'artifact_type:hooks_artifact').temp_file_path if file_path: with open(file_path, 'w') as f: f.write('pre_upload_hook was called\n') return fd @classmethod def post_upload_hook(cls, context, af, field_name, blob_key): file_path = getattr( CONF, 'artifact_type:hooks_artifact').temp_file_path if file_path: with open(file_path, 'a') as f: f.write('post_upload_hook was called\n') @classmethod def pre_add_location_hook( cls, context, af, field_name, blob_key, location): # create a temporary file and set the path to artifact field file_path = getattr( CONF, 'artifact_type:hooks_artifact').temp_file_path if file_path: with open(file_path, 'w') as f: f.write('pre_add_location_hook was called\n') @classmethod def post_add_location_hook(cls, context, af, field_name, blob_key): file_path = getattr( CONF, 'artifact_type:hooks_artifact').temp_file_path if file_path: with open(file_path, 'a') as f: f.write('post_add_location_hook was called\n') @classmethod def pre_download_hook(cls, context, af, field_name, blob_key): file_path = getattr( CONF, 'artifact_type:hooks_artifact').temp_file_path if file_path: with open(file_path, 'a') as f: f.write('pre_download_hook was called\n') @classmethod def post_download_hook(cls, context, af, field_name, blob_key, fd): file_path = getattr( CONF, 'artifact_type:hooks_artifact').temp_file_path if file_path: with open(file_path, 'a') as f: f.write('post_download_hook was called\n') return fd @classmethod def pre_delete_hook(cls, context, af): file_path = getattr( CONF, 'artifact_type:hooks_artifact').temp_file_path if file_path: with open(file_path, 'w') as f: f.write('pre_delete_hook was called\n') @classmethod def post_delete_hook(cls, context, af): file_path = getattr( CONF, 'artifact_type:hooks_artifact').temp_file_path if file_path: with open(file_path, 'a') as f: f.write('post_delete_hook was called\n') glare-0.5.0/glare/tests/sample_artifact.py000066400000000000000000000147361317401036700205670ustar00rootroot00000000000000# Copyright (c) 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Sample artifact object for testing purposes""" from oslo_versionedobjects import fields from glare.objects import base as base_artifact from glare.objects.meta import fields as glare_fields from glare.objects.meta import validators from glare.objects.meta import wrappers Field = wrappers.Field.init Dict = wrappers.DictField.init List = wrappers.ListField.init Blob = wrappers.BlobField.init Folder = wrappers.FolderField.init class SampleArtifact(base_artifact.BaseArtifact): VERSION = '1.0' fields = { 'blob': Blob(required_on_activate=False, mutable=True, description="I am Blob"), 'small_blob': Blob(max_blob_size=10, required_on_activate=False, mutable=True), 'link1': Field(glare_fields.Link, required_on_activate=False), 'link2': Field(glare_fields.Link, required_on_activate=False), 'bool1': Field(fields.FlexibleBooleanField, required_on_activate=False, filter_ops=(wrappers.FILTER_EQ,), default=False), 'bool2': Field(fields.FlexibleBooleanField, required_on_activate=False, filter_ops=(wrappers.FILTER_EQ,), default=False), 'int1': Field(fields.IntegerField, required_on_activate=False, sortable=True), 'int2': Field(fields.IntegerField, sortable=True, required_on_activate=False), 'float1': Field(fields.FloatField, sortable=True, required_on_activate=False), 'float2': Field(fields.FloatField, sortable=True, required_on_activate=False), 'str1': Field(fields.StringField, sortable=True, required_on_activate=False), 'list_of_str': List(fields.String, required_on_activate=False, filter_ops=(wrappers.FILTER_EQ, wrappers.FILTER_IN)), 'list_of_int': List(fields.Integer, required_on_activate=False, filter_ops=(wrappers.FILTER_EQ, wrappers.FILTER_IN)), 'dict_of_str': Dict(fields.String, required_on_activate=False, filter_ops=(wrappers.FILTER_EQ, wrappers.FILTER_IN)), 'dict_of_int': Dict(fields.Integer, required_on_activate=False, filter_ops=(wrappers.FILTER_EQ, wrappers.FILTER_IN)), 'dict_of_links': Dict(glare_fields.LinkFieldType, mutable=True, required_on_activate=False, filter_ops=(wrappers.FILTER_EQ,)), 'list_of_links': List(glare_fields.LinkFieldType, mutable=True, required_on_activate=False, filter_ops=(wrappers.FILTER_EQ,)), 'dict_of_blobs': Folder(required_on_activate=False, max_folder_size=2000, validators=[ validators.MaxDictKeyLen(1000)]), 'string_mutable': Field(fields.StringField, required_on_activate=False, mutable=True), 'string_regex': Field(fields.StringField, required_on_activate=False, validators=[ validators.Regex('^([0-9a-fA-F]){8}$')]), 'string_required': Field(fields.StringField, required_on_activate=True), 'string_validators': Field(fields.StringField, required_on_activate=False, validators=[ validators.AllowedValues( ['aa', 'bb', 'c' * 11]), validators.MaxStrLen(10) ]), 'int_validators': Field(fields.IntegerField, required_on_activate=False, validators=[ validators.MinNumberSize(10), validators.MaxNumberSize(20) ]), 'list_validators': List(fields.String, required_on_activate=False, filter_ops=[], max_size=3, validators=[validators.Unique()]), 'dict_validators': Dict(fields.String, required_on_activate=False, default=None, filter_ops=[], validators=[ validators.AllowedDictKeys([ 'abc', 'def', 'ghi', 'jkl'])], max_size=3), 'system_attribute': Field(fields.StringField, system=True, sortable=True, default="default") } @classmethod def get_type_name(cls): return "sample_artifact" def to_dict(self): res = self.obj_to_primitive()['versioned_object.data'] res['__some_meta_information__'] = res['name'].upper() return res @classmethod def format_all(cls, values): values['__some_meta_information__'] = values['name'].upper() return values glare-0.5.0/glare/tests/unit/000077500000000000000000000000001317401036700160235ustar00rootroot00000000000000glare-0.5.0/glare/tests/unit/__init__.py000066400000000000000000000017501317401036700201370ustar00rootroot00000000000000# Copyright 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_i18n as i18n def fake_translate_msgid(msgid, domain, desired_locale=None): return msgid i18n.enable_lazy() # To ensure messages don't really get translated while running tests. # As there are lots of places where matching is expected when comparing # exception message(translated) with raw message. i18n._translate_msgid = fake_translate_msgid glare-0.5.0/glare/tests/unit/api/000077500000000000000000000000001317401036700165745ustar00rootroot00000000000000glare-0.5.0/glare/tests/unit/api/__init__.py000066400000000000000000000000001317401036700206730ustar00rootroot00000000000000glare-0.5.0/glare/tests/unit/api/test_create.py000066400000000000000000000227551317401036700214630ustar00rootroot00000000000000# Copyright 2017 - Nokia Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from glare.common import exception as exc from glare.tests.unit import base class TestArtifactCreate(base.BaseTestArtifactAPI): """Test Glare artifact creation.""" def test_create_artifact_minimal(self): for name in ['ttt', 'tt:t', 'tt t', 'tt: t', 'tt,t']: values = {'name': name} res = self.controller.create(self.req, 'sample_artifact', values) self.assertEqual(name, res['name']) self.assertEqual('0.0.0', res['version']) self.assertEqual(self.users['user1']['tenant_id'], res['owner']) self.assertEqual('drafted', res['status']) self.assertEqual('private', res['visibility']) self.assertEqual('', res['description']) self.assertEqual({}, res['metadata']) self.assertEqual([], res['tags']) def test_create_artifact_with_version(self): values = {'name': 'name', 'version': '1.0'} res = self.controller.create(self.req, 'sample_artifact', values) self.assertEqual('name', res['name']) self.assertEqual('1.0.0', res['version']) values = {'name': 'name', 'version': '1:0'} res = self.controller.create(self.req, 'sample_artifact', values) self.assertEqual('1.0.0-0', res['version']) values = {'name': 'name', 'version': '1:0:0'} res = self.controller.create(self.req, 'sample_artifact', values) self.assertEqual('1.0.0-0-0', res['version']) values = {'name': 'name', 'version': '2:0-0'} res = self.controller.create(self.req, 'sample_artifact', values) self.assertEqual('2.0.0-0-0', res['version']) def test_create_artifact_with_fields(self): values = {'name': 'ttt', 'version': '1.0', 'description': "Test Artifact", 'tags': ['a', 'a', 'b'], 'metadata': {'type': 'image'}} res = self.controller.create(self.req, 'sample_artifact', values) self.assertEqual('ttt', res['name']) self.assertEqual('1.0.0', res['version']) self.assertEqual(self.users['user1']['tenant_id'], res['owner']) self.assertEqual('drafted', res['status']) self.assertEqual('private', res['visibility']) self.assertEqual('Test Artifact', res['description']) self.assertEqual({'type': 'image'}, res['metadata']) self.assertEqual({'a', 'b'}, set(res['tags'])) def test_create_no_artifact_type(self): values = {'name': 'ttt'} self.assertRaises(exc.NotFound, self.controller.create, self.req, 'wrong_type', values) def test_create_artifact_no_name(self): values = {'version': '1.0'} self.assertRaises(exc.BadRequest, self.controller.create, self.req, 'sample_artifact', values) def test_create_artifact_wrong_parameters(self): values = {'name': 'test', 'version': 'invalid_format'} self.assertRaises(exc.BadRequest, self.controller.create, self.req, 'sample_artifact', values) values = {'name': 'test', 'version': -1} self.assertRaises(exc.BadRequest, self.controller.create, self.req, 'sample_artifact', values) values = {'name': 'test', 'version': ':'} self.assertRaises(exc.BadRequest, self.controller.create, self.req, 'sample_artifact', values) values = {'name': '', 'version': '1.0'} self.assertRaises(exc.BadRequest, self.controller.create, self.req, 'sample_artifact', values) values = {'name': 'a' * 256} self.assertRaises(exc.BadRequest, self.controller.create, self.req, 'sample_artifact', values) values = {'name': 'test', 'description': 'a' * 4097} self.assertRaises(exc.BadRequest, self.controller.create, self.req, 'sample_artifact', values) values = {'name': 'test', 'tags': ['a' * 256]} self.assertRaises(exc.BadRequest, self.controller.create, self.req, 'sample_artifact', values) values = {'name': 'test', 'tags': ['']} self.assertRaises(exc.BadRequest, self.controller.create, self.req, 'sample_artifact', values) values = {'name': 'test', 'tags': ['a/a']} self.assertRaises(exc.BadRequest, self.controller.create, self.req, 'sample_artifact', values) values = {'name': 'test', 'tags': ['a,a']} self.assertRaises(exc.BadRequest, self.controller.create, self.req, 'sample_artifact', values) values = {'name': 'test', 'tags': [str(i) for i in range(256)]} self.assertRaises(exc.BadRequest, self.controller.create, self.req, 'sample_artifact', values) values = {'name': 'test', 'metadata': {'key': 'a' * 256}} self.assertRaises(exc.BadRequest, self.controller.create, self.req, 'sample_artifact', values) values = {'name': 'test', 'metadata': {'': 'a'}} self.assertRaises(exc.BadRequest, self.controller.create, self.req, 'sample_artifact', values) values = {'name': 'test', 'metadata': {'a' * 256: 'a'}} self.assertRaises(exc.BadRequest, self.controller.create, self.req, 'sample_artifact', values) values = {'name': 'test', 'metadata': {('a' + str(i)): 'a' for i in range(256)}} self.assertRaises(exc.BadRequest, self.controller.create, self.req, 'sample_artifact', values) def test_create_artifact_not_existing_field(self): values = {'name': 'test', 'not_exist': 'some_value'} self.assertRaises(exc.BadRequest, self.controller.create, self.req, 'sample_artifact', values) values = {'name': 'test', '': 'a'} self.assertRaises(exc.BadRequest, self.controller.create, self.req, 'sample_artifact', values) def test_create_artifact_blob(self): values = {'name': 'test', 'blob': 'DATA'} self.assertRaises(exc.BadRequest, self.controller.create, self.req, 'sample_artifact', values) def test_create_artifact_system_fields(self): values = {'name': 'test', 'id': '5fdeba9a-ba12-4147-bb8a-a8daada84222'} self.assertRaises(exc.Forbidden, self.controller.create, self.req, 'sample_artifact', values) values = {'name': 'test', 'created_at': '2000-01-01'} self.assertRaises(exc.Forbidden, self.controller.create, self.req, 'sample_artifact', values) values = {'name': 'test', 'updated_at': '2000-01-01'} self.assertRaises(exc.Forbidden, self.controller.create, self.req, 'sample_artifact', values) values = {'name': 'test', 'activated_at': '2000-01-01'} self.assertRaises(exc.Forbidden, self.controller.create, self.req, 'sample_artifact', values) values = {'name': 'test', 'owner': 'new_owner'} self.assertRaises(exc.Forbidden, self.controller.create, self.req, 'sample_artifact', values) def test_create_artifact_status_and_visibility(self): values = {'name': 'test', 'status': 'activated'} self.assertRaises(exc.BadRequest, self.controller.create, self.req, 'sample_artifact', values) values = {'name': 'test', 'visibility': 'public'} self.assertRaises(exc.BadRequest, self.controller.create, self.req, 'sample_artifact', values) def test_create_artifact_unicode(self): name = u'\u0442\u0435\u0441\u0442' description = u'\u041E\u043F\u0438\u0441\u0430\u043D\u0438\u0435' tags = [u'\u041C\u0435\u0442\u043A\u0430'] metadata = {'key': u'\u0417\u043D\u0430\u0447\u0435\u043D\u0438\u0435'} values = { 'name': name, 'version': '1.0', 'description': description, 'tags': tags, 'metadata': metadata } res = self.controller.create(self.req, 'images', values) self.assertEqual(name, res['name']) self.assertEqual('1.0.0', res['version']) self.assertEqual(self.users['user1']['tenant_id'], res['owner']) self.assertEqual('drafted', res['status']) self.assertEqual('private', res['visibility']) self.assertEqual(description, res['description']) self.assertEqual(metadata, res['metadata']) self.assertEqual(tags, res['tags']) def test_create_artifact_4_byte_unicode(self): bad_name = u'A name with forbidden symbol \U0001f62a' values = { 'name': bad_name, 'version': '1.0', } self.assertRaises(exc.BadRequest, self.controller.create, self.req, 'images', values) glare-0.5.0/glare/tests/unit/api/test_delete.py000066400000000000000000000226561317401036700214620ustar00rootroot00000000000000# Copyright 2017 - Nokia Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from six import BytesIO from glare.common import exception as exc from glare.common import store_api from glare.db import artifact_api from glare.tests.unit import base class TestArtifactDelete(base.BaseTestArtifactAPI): """Test Glare artifact deletion.""" def setUp(self): super(TestArtifactDelete, self).setUp() values = {'name': 'ttt', 'version': '1.0'} self.artifact = self.controller.create( self.req, 'sample_artifact', values) # Upload data self.controller.upload_blob( self.req, 'sample_artifact', self.artifact['id'], 'blob', BytesIO(b'a' * 100), 'application/octet-stream') # Check that data was uploaded successfully self.artifact = self.controller.show( self.req, 'sample_artifact', self.artifact['id']) self.assertEqual(100, self.artifact['blob']['size']) self.assertEqual('active', self.artifact['blob']['status']) @mock.patch('glare.common.store_api.delete_blob', side_effect=store_api.delete_blob) def test_delete_with_data(self, mocked_delete): # Delete artifact and check that 'delete_blob' was called self.controller.delete(self.req, 'sample_artifact', self.artifact['id']) self.assertRaises(exc.NotFound, self.controller.show, self.req, 'sample_artifact', self.artifact['id']) self.assertEqual(1, mocked_delete.call_count) @mock.patch('glare.common.store_api.delete_blob', side_effect=store_api.delete_blob) def test_delete_with_blob_dict(self, mocked_delete): # Upload data for i in range(10): self.controller.upload_blob( self.req, 'sample_artifact', self.artifact['id'], 'dict_of_blobs/blob%d' % i, BytesIO(b'a' * 100), 'application/octet-stream') # Check that data was uploaded successfully self.artifact = self.controller.show( self.req, 'sample_artifact', self.artifact['id']) for i in range(10): self.assertEqual( 100, self.artifact['dict_of_blobs']['blob%d' % i]['size']) self.assertEqual( 'active', self.artifact['dict_of_blobs']['blob%d' % i]['status']) # Delete artifact and check that 'delete_blob' was called for each blob # 10 times for blob dict elements and once for 'blob' self.controller.delete(self.req, 'sample_artifact', self.artifact['id']) self.assertRaises(exc.NotFound, self.controller.show, self.req, 'sample_artifact', self.artifact['id']) self.assertEqual(11, mocked_delete.call_count) def test_delete_not_found(self): self.assertRaises(exc.NotFound, self.controller.delete, self.req, 'sample_artifact', 'INVALID_ID') def test_delete_saving_blob(self): blob = self.artifact['blob'] # Change status of the blob to 'saving' blob['status'] = 'saving' artifact_api.ArtifactAPI().update_blob( self.req.context, self.artifact['id'], {'blob': blob}) self.artifact = self.controller.show( self.req, 'sample_artifact', self.artifact['id']) blob = self.artifact['blob'] self.assertEqual(100, blob['size']) self.assertEqual('saving', blob['status']) # Deleting of the artifact leads to Conflict error self.assertRaises(exc.Conflict, self.controller.delete, self.req, 'sample_artifact', self.artifact['id']) self.artifact = self.controller.show( self.req, 'sample_artifact', self.artifact['id']) self.assertEqual('drafted', self.artifact['status']) def test_delete_deleted_artifact(self): # Change status of the artifact to 'deleted' artifact_api.ArtifactAPI().save( self.req.context, self.artifact['id'], {'status': 'deleted'}) # Delete should work properly self.controller.delete(self.req, 'sample_artifact', self.artifact['id']) self.assertRaises(exc.NotFound, self.controller.show, self.req, 'sample_artifact', self.artifact['id']) @mock.patch('glare.common.store_api.delete_blob', side_effect=exc.NotFound) def test_delete_link_not_exist(self, mocked_delete): # Delete artifact and check that 'delete_blob' was called self.controller.delete(self.req, 'sample_artifact', self.artifact['id']) self.assertRaises(exc.NotFound, self.controller.show, self.req, 'sample_artifact', self.artifact['id']) self.assertEqual(1, mocked_delete.call_count) @mock.patch('glare.common.store_api.delete_blob', side_effect=exc.Forbidden) def test_no_delete_permission(self, mocked_delete): # Try to delete artifact self.assertRaises(exc.Forbidden, self.controller.delete, self.req, 'sample_artifact', self.artifact['id']) @mock.patch('glare.common.store_api.delete_blob', side_effect=exc.GlareException) def test_delete_unknown_store_exception(self, mocked_delete): # Try to delete artifact self.assertRaises(exc.GlareException, self.controller.delete, self.req, 'sample_artifact', self.artifact['id']) @mock.patch('glare.common.store_api.delete_blob', side_effect=exc.NotFound) def test_delete_blob_not_found(self, mocked_delete): # Upload a file to blob dict self.controller.upload_blob( self.req, 'sample_artifact', self.artifact['id'], 'dict_of_blobs/blob', BytesIO(b'a' * 100), 'application/octet-stream') # Despite the exception artifact should be deleted successfully self.controller.delete(self.req, 'sample_artifact', self.artifact['id']) self.assertRaises(exc.NotFound, self.controller.show, self.req, 'sample_artifact', self.artifact['id']) self.assertEqual(2, mocked_delete.call_count) @mock.patch('glare.common.store_api.delete_blob', side_effect=store_api.delete_blob) def test_delayed_delete_global(self, mocked_delete): # Enable delayed delete self.config(delayed_delete=True) # Delete artifact and check that 'delete_blob' was not called self.controller.delete(self.req, 'sample_artifact', self.artifact['id']) self.assertEqual(0, mocked_delete.call_count) # Check that artifact status is 'deleted' and its blob is # 'pending_delete' self.artifact = self.controller.show( self.req, 'sample_artifact', self.artifact['id']) self.assertEqual('deleted', self.artifact['status']) self.assertEqual('active', self.artifact['blob']['status']) # Disable delayed delete self.config(delayed_delete=False) # Delete artifact and check that 'delete_blob' was called this time self.controller.delete(self.req, 'sample_artifact', self.artifact['id']) self.assertEqual(1, mocked_delete.call_count) self.assertRaises(exc.NotFound, self.controller.show, self.req, 'sample_artifact', self.artifact['id']) @mock.patch('glare.common.store_api.delete_blob', side_effect=store_api.delete_blob) def test_delayed_delete_per_artifact_type(self, mocked_delete): # Enable delayed delete for sample_artifact type # Global parameter is disabled self.config(delayed_delete=True, group='artifact_type:sample_artifact') # Delete artifact and check that 'delete_blob' was not called self.controller.delete(self.req, 'sample_artifact', self.artifact['id']) self.assertEqual(0, mocked_delete.call_count) # Check that artifact status is 'deleted' and its blob is # 'pending_delete' self.artifact = self.controller.show( self.req, 'sample_artifact', self.artifact['id']) self.assertEqual('deleted', self.artifact['status']) self.assertEqual('active', self.artifact['blob']['status']) # Disable delayed delete self.config(delayed_delete=False, group='artifact_type:sample_artifact') # Delete artifact and check that 'delete_blob' was called this time self.controller.delete(self.req, 'sample_artifact', self.artifact['id']) self.assertEqual(1, mocked_delete.call_count) self.assertRaises(exc.NotFound, self.controller.show, self.req, 'sample_artifact', self.artifact['id']) glare-0.5.0/glare/tests/unit/api/test_delete_blobs.py000066400000000000000000000143171317401036700226360ustar00rootroot00000000000000# Copyright 2017 - Nokia Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from six import BytesIO from glare.common import exception as exc from glare.tests.unit import base class TestDeleteBlobs(base.BaseTestArtifactAPI): """Test deleting of custom locations.""" def setUp(self): super(TestDeleteBlobs, self).setUp() values = {'name': 'ttt', 'version': '1.0', 'int1': '10'} self.sample_artifact = self.controller.create( self.req, 'sample_artifact', values) self.ct = 'application/vnd+openstack.glare-custom-location+json' def test_delete_external_blob(self): # Add external location body = {'url': 'https://FAKE_LOCATION.com', 'md5': "fake", 'sha1': "fake_sha", "sha256": "fake_sha256"} self.controller.upload_blob( self.req, 'sample_artifact', self.sample_artifact['id'], 'blob', body, self.ct) art = self.controller.show(self.req, 'sample_artifact', self.sample_artifact['id']) self.assertEqual('active', art['blob']['status']) self.assertEqual('fake', art['blob']['md5']) self.assertEqual('fake_sha', art['blob']['sha1']) self.assertEqual('fake_sha256', art['blob']['sha256']) self.assertIsNone(art['blob']['size']) self.assertIsNone(art['blob']['content_type']) self.assertEqual('https://FAKE_LOCATION.com', art['blob']['url']) self.assertNotIn('id', art['blob']) # Delete external blob works self.controller.delete_external_blob( self.req, 'sample_artifact', self.sample_artifact['id'], 'blob') art = self.controller.show(self.req, 'sample_artifact', self.sample_artifact['id']) self.assertIsNone(art['blob']) def test_delete_external_blob_dict(self): # Add external location to the folder body = {'url': 'https://FAKE_LOCATION.com', 'md5': "fake", 'sha1': "fake_sha", "sha256": "fake_sha256"} self.controller.upload_blob( self.req, 'sample_artifact', self.sample_artifact['id'], 'dict_of_blobs/blob', body, self.ct) art = self.controller.show(self.req, 'sample_artifact', self.sample_artifact['id']) self.assertEqual('active', art['dict_of_blobs']['blob']['status']) self.assertEqual('fake', art['dict_of_blobs']['blob']['md5']) self.assertEqual('fake_sha', art['dict_of_blobs']['blob']['sha1']) self.assertEqual('fake_sha256', art['dict_of_blobs']['blob']['sha256']) self.assertIsNone(art['dict_of_blobs']['blob']['size']) self.assertIsNone(art['dict_of_blobs']['blob']['content_type']) self.assertEqual('https://FAKE_LOCATION.com', art['dict_of_blobs']['blob']['url']) self.assertNotIn('id', art['blob']) # Delete external blob works self.controller.delete_external_blob( self.req, 'sample_artifact', self.sample_artifact['id'], 'dict_of_blobs/blob') art = self.controller.show(self.req, 'sample_artifact', self.sample_artifact['id']) self.assertNotIn('blob', art['dict_of_blobs']) def test_delete_internal_blob(self): # Upload data to regular blob self.controller.upload_blob( self.req, 'sample_artifact', self.sample_artifact['id'], 'blob', BytesIO(b'aaa'), 'application/octet-stream') artifact = self.controller.show(self.req, 'sample_artifact', self.sample_artifact['id']) self.assertEqual(3, artifact['blob']['size']) self.assertEqual('active', artifact['blob']['status']) # Deletion of uploaded internal blobs fails with Forbidden self.assertRaises( exc.Forbidden, self.controller.delete_external_blob, self.req, 'sample_artifact', self.sample_artifact['id'], 'blob') def test_delete_internal_blob_dict(self): # Upload data to the blob dict self.controller.upload_blob( self.req, 'sample_artifact', self.sample_artifact['id'], 'dict_of_blobs/blob', BytesIO(b'aaa'), 'application/octet-stream') artifact = self.controller.show(self.req, 'sample_artifact', self.sample_artifact['id']) self.assertEqual(3, artifact['dict_of_blobs']['blob']['size']) self.assertEqual('active', artifact['dict_of_blobs']['blob']['status']) # Deletion of uploaded internal blobs fails with Forbidden self.assertRaises( exc.Forbidden, self.controller.delete_external_blob, self.req, 'sample_artifact', self.sample_artifact['id'], 'dict_of_blobs/blob') def test_delete_blob_wrong(self): # Non-blob field self.assertRaises( exc.BadRequest, self.controller.delete_external_blob, self.req, 'sample_artifact', self.sample_artifact['id'], 'int1') # Non-existing field self.assertRaises( exc.BadRequest, self.controller.delete_external_blob, self.req, 'sample_artifact', self.sample_artifact['id'], 'Nonexisting') # Empty blob self.assertRaises( exc.NotFound, self.controller.delete_external_blob, self.req, 'sample_artifact', self.sample_artifact['id'], 'blob') # No blob in the blob dict self.assertRaises( exc.NotFound, self.controller.delete_external_blob, self.req, 'sample_artifact', self.sample_artifact['id'], 'dict_of_blobs/Nonexisting') glare-0.5.0/glare/tests/unit/api/test_download.py000066400000000000000000000146341317401036700220240ustar00rootroot00000000000000# Copyright 2017 - Nokia Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from six import BytesIO from glare.common import exception as exc from glare.db import artifact_api from glare.tests.unit import base class TestArtifactDownload(base.BaseTestArtifactAPI): def setUp(self): super(TestArtifactDownload, self).setUp() values = {'name': 'ttt', 'version': '1.0', 'string_required': 'str2'} self.sample_artifact = self.controller.create( self.req, 'sample_artifact', values) self.controller.upload_blob( self.req, 'sample_artifact', self.sample_artifact['id'], 'blob', BytesIO(b'aaa'), 'application/octet-stream') artifact = self.controller.show(self.req, 'sample_artifact', self.sample_artifact['id']) self.assertEqual(3, artifact['blob']['size']) self.assertEqual('active', artifact['blob']['status']) def test_download_basic(self): downloaded_blob = self.controller.download_blob( self.req, 'sample_artifact', self.sample_artifact['id'], 'blob') self.assertEqual(b'aaa', downloaded_blob['data'].data) def test_download_from_folders(self): self.controller.upload_blob( self.req, 'sample_artifact', self.sample_artifact['id'], 'dict_of_blobs/folder1', BytesIO(b'bbb'), 'application/octet-stream') downloaded_blob = self.controller.download_blob( self.req, 'sample_artifact', self.sample_artifact['id'], 'dict_of_blobs/folder1') self.assertEqual(b'bbb', downloaded_blob['data'].data) # Negative dict_of_blobs tests: # Key error self.assertRaises(exc.NotFound, self.controller.download_blob, self.req, 'sample_artifact', self.sample_artifact['id'], "dict_of_blobs/ImaginaryFolder") # incorrect dict_of_blobs spelling self.assertRaises(exc.BadRequest, self.controller.download_blob, self.req, 'sample_artifact', self.sample_artifact['id'], "NOT_DICT_FIELD/folder1") def test_download_from_non_existing_fields(self): self.assertRaises(exc.BadRequest, self.controller.download_blob, self.req, 'sample_artifact', self.sample_artifact['id'], "NON_EXISTING_FIELD") def test_download_of_saving_blob(self): self.sample_artifact = self.controller.show( self.req, 'sample_artifact', self.sample_artifact['id']) # Change status of the blob to 'saving' self.sample_artifact['blob']['status'] = 'saving' artifact_api.ArtifactAPI().update_blob( self.req.context, self.sample_artifact['id'], {'blob': self.sample_artifact['blob']}) self.sample_artifact = self.controller.show( self.req, 'sample_artifact', self.sample_artifact['id']) self.assertEqual('saving', self.sample_artifact['blob']['status']) # assert that we can't download while blob in saving status self.assertRaises(exc.Conflict, self.controller.download_blob, self.req, 'sample_artifact', self.sample_artifact['id'], "blob") def test_download_from_deactivated_artifact_as_other_user(self): self.req = self.get_fake_request(user=self.users['admin']) art = self.controller.show(self.req, 'sample_artifact', self.sample_artifact['id']) # change artifact status to deactivted: draft-> activate -> deactivated for status in ['active', 'deactivated']: changes = [{'op': 'replace', 'path': '/status', 'value': status}] self.req = self.get_fake_request(user=self.users['admin']) art = self.update_with_values(changes, art_id=art['id']) # make request from other user (That didn't create the artifact) self.req = self.get_fake_request(user=self.users['user1']) self.assertRaises(exc.Forbidden, self.controller.download_blob, self.req, 'sample_artifact', art['id'], "blob") # Make sure that admin can download from deactivated artifact self.req = self.get_fake_request(user=self.users['admin']) downloaded_blob = self.controller.download_blob( self.req, 'sample_artifact', art['id'], 'blob') self.assertEqual(b'aaa', downloaded_blob['data'].data) def test_download_for_deleted_artifact(self): self.config(delayed_delete=True) self.controller.delete(self.req, 'sample_artifact', self.sample_artifact['id']) self.assertRaises(exc.Forbidden, self.controller.download_blob, self.req, 'sample_artifact', self.sample_artifact['id'], "blob") def test_download_external_blob(self): values = {'name': 'aaa', 'version': '2.0'} url = "http: // FAKE_LOCATION.COM" content_type = 'application/vnd+openstack.glare-custom-location+json' art = self.controller.create(self.req, 'sample_artifact', values) body = {'url': url, 'md5': "fake"} self.controller.upload_blob(self.req, 'sample_artifact', art['id'], 'blob', body, content_type) downloaded_blob = self.controller.download_blob(self.req, 'sample_artifact', art['id'], 'blob') self.assertEqual(url, downloaded_blob['data']['url']) self.assertTrue(downloaded_blob['meta']['external']) self.assertEqual("fake", downloaded_blob['meta']['md5']) self.assertIsNone(downloaded_blob['meta']['sha1']) self.assertIsNone(downloaded_blob['meta']['sha256']) glare-0.5.0/glare/tests/unit/api/test_list.py000066400000000000000000000624141317401036700211670ustar00rootroot00000000000000# Copyright 2017 - Nokia Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from glare.common import exception as exc from glare.tests import sample_artifact from glare.tests.unit import base import random class TestArtifactList(base.BaseTestArtifactAPI): def test_list_simple_fields(self): # Create a bunch of artifacts for list testing values = [ {'name': 'art1', 'version': '0.0.1', 'string_required': 'str1', 'int1': 5, 'float1': 5.0, 'bool1': 'yes'}, {'name': 'art1', 'version': '1-beta', 'string_required': 'str2', 'int1': 6, 'float1': 6.0, 'bool1': 'yes'}, {'name': 'art1', 'version': '1', 'string_required': 'str1', 'int1': 5, 'float1': 5.0, 'bool1': 'no', 'description': 'ggg'}, {'name': 'art1', 'version': '2-rc1', 'string_required': 'str22', 'int1': 7, 'float1': 7.0, 'bool1': 'yes'}, {'name': 'art1', 'version': '10', 'string_required': 'str222', 'int1': 5, 'float1': 5.0, 'bool1': 'yes'}, {'name': 'art2', 'version': '1', 'string_required': 'str1', 'int1': 8, 'float1': 8.0, 'bool1': 'no'}, {'name': 'art3', 'version': '1', 'string_required': 'str1', 'int1': -5, 'float1': -5.0, 'bool1': 'yes'}, ] arts = [self.controller.create(self.req, 'sample_artifact', val) for val in values] # Activate 3rd and 4th artifacts changes = [{'op': 'replace', 'path': '/status', 'value': 'active'}] arts[3] = self.update_with_values(changes, art_id=arts[3]['id']) arts[4] = self.update_with_values(changes, art_id=arts[4]['id']) # Publish 4th artifact changes = [{'op': 'replace', 'path': '/visibility', 'value': 'public'}] self.req = self.get_fake_request(user=self.users['admin']) arts[4] = self.update_with_values(changes, art_id=arts[4]['id']) self.req = self.get_fake_request(user=self.users['user1']) # Do tests basic tests # input format for filters is a list of tuples: # (filter_name, filter_value) # List all artifacts res = self.controller.list(self.req, 'sample_artifact') self.assertEqual(7, len(res['artifacts'])) self.assertEqual('sample_artifact', res['type_name']) # List all artifacts as an anonymous. Only public artifacts are visible anon_req = self.get_fake_request(user=self.users['anonymous']) res = self.controller.list(anon_req, 'sample_artifact') self.assertEqual(1, len(res['artifacts'])) self.assertIn(arts[4], res['artifacts']) # Filter by name filters = [('name', 'art1')] res = self.controller.list(self.req, 'sample_artifact', filters) self.assertEqual(5, len(res['artifacts'])) filters = [('name', 'in:art2,art3')] res = self.controller.list(self.req, 'sample_artifact', filters) self.assertEqual(2, len(res['artifacts'])) for i in (5, 6): self.assertIn(arts[i], res['artifacts']) # Filter by string_required filters = [('string_required', 'str1')] res = self.controller.list(self.req, 'sample_artifact', filters) self.assertEqual(4, len(res['artifacts'])) for i in (0, 2, 5, 6): self.assertIn(arts[i], res['artifacts']) # Filter by int1 filters = [('int1', '5')] res = self.controller.list(self.req, 'sample_artifact', filters) self.assertEqual(3, len(res['artifacts'])) for i in (0, 2, 4): self.assertIn(arts[i], res['artifacts']) filters = [('int1', 'in:5,6')] res = self.controller.list(self.req, 'sample_artifact', filters) self.assertEqual(4, len(res['artifacts'])) for i in (0, 1, 2, 4): self.assertIn(arts[i], res['artifacts']) # Filter by float1 filters = [('float1', '5.0')] res = self.controller.list(self.req, 'sample_artifact', filters) self.assertEqual(3, len(res['artifacts'])) for i in (0, 2, 4): self.assertIn(arts[i], res['artifacts']) # Filter by bool1 filters = [('bool1', 'yes')] res = self.controller.list(self.req, 'sample_artifact', filters) self.assertEqual(5, len(res['artifacts'])) for i in (0, 1, 3, 4, 6): self.assertIn(arts[i], res['artifacts']) # Filter by id filters = [('id', arts[0]['id'])] res = self.controller.list(self.req, 'sample_artifact', filters) self.assertEqual(1, len(res['artifacts'])) self.assertIn(arts[0], res['artifacts']) # Filter by status filters = [('status', 'active')] res = self.controller.list(self.req, 'sample_artifact', filters) self.assertEqual(2, len(res['artifacts'])) for i in (3, 4): self.assertIn(arts[i], res['artifacts']) # Filter by visibility filters = [('visibility', 'public')] res = self.controller.list(self.req, 'sample_artifact', filters) self.assertEqual(1, len(res['artifacts'])) self.assertIn(arts[4], res['artifacts']) # Filter by owner filters = [('owner', arts[0]['owner'])] res = self.controller.list(self.req, 'sample_artifact', filters) self.assertEqual(7, len(res['artifacts'])) for i in range(6): self.assertIn(arts[i], res['artifacts']) # Filter by description leads to BadRequest filters = [('description', 'ggg')] self.assertRaises(exc.BadRequest, self.controller.list, self.req, 'sample_artifact', filters) # Filter by created_at with eq operator leads to BadRequest filters = [('created_at', arts[4]['created_at'])] self.assertRaises(exc.BadRequest, self.controller.list, self.req, 'sample_artifact', filters) # Filter by updated_at with eq operator leads to BadRequest filters = [('updated_at', arts[4]['updated_at'])] self.assertRaises(exc.BadRequest, self.controller.list, self.req, 'sample_artifact', filters) # Filter by activated_at with eq operator leads to BadRequest filters = [('activated_at', arts[4]['activated_at'])] self.assertRaises(exc.BadRequest, self.controller.list, self.req, 'sample_artifact', filters) # Filter by any blob leads to BadRequest filters = [('blob', 'something')] self.assertRaises(exc.BadRequest, self.controller.list, self.req, 'sample_artifact', filters) # Filter by nonexistent field leads to BadRequest filters = [('NONEXISTENT', 'something')] self.assertRaises(exc.BadRequest, self.controller.list, self.req, 'sample_artifact', filters) def test_list_marker_and_limit(self): # Create artifacts art_list = [ self.controller.create( self.req, 'sample_artifact', {'name': 'name%s' % i, 'version': '%d.0' % i, 'tags': ['tag%s' % i], 'int1': 1024 + i, 'float1': 123.456, 'str1': 'bugaga', 'bool1': True}) for i in range(5)] # sort with 'next_marker' sort = [('int1', 'asc'), ('name', 'desc')] result = self.controller.list(self.req, 'sample_artifact', filters=(), limit=1, sort=sort) self.assertEqual([art_list[0]], result['artifacts']) marker = result['next_marker'] result = self.controller.list(self.req, 'sample_artifact', filters=(), marker=marker, limit=1, sort=sort) self.assertEqual([art_list[1]], result['artifacts']) # sort by custom marker sort = [('int1', 'asc')] marker = art_list[1]['id'] result = self.controller.list(self.req, 'sample_artifact', filters=(), marker=marker, sort=sort) self.assertEqual(art_list[2:], result['artifacts']) sort = [('int1', 'desc')] result = self.controller.list(self.req, 'sample_artifact', filters=(), marker=marker, sort=sort) self.assertEqual(art_list[:1], result['artifacts']) sort = [('float1', 'asc'), ('name', 'desc')] result = self.controller.list(self.req, 'sample_artifact', filters=(), marker=marker, sort=sort) self.assertEqual([art_list[0]], result['artifacts']) # paginate by name in desc order with limit 2 sort = [('name', 'desc')] result = self.controller.list(self.req, 'sample_artifact', filters=(), limit=2, sort=sort) self.assertEqual(art_list[4:2:-1], result['artifacts']) marker = result['next_marker'] result = self.controller.list(self.req, 'sample_artifact', filters=(), marker=marker, limit=2, sort=sort) self.assertEqual(art_list[2:0:-1], result['artifacts']) marker = result['next_marker'] result = self.controller.list(self.req, 'sample_artifact', filters=(), marker=marker, limit=2, sort=sort) self.assertEqual([art_list[0]], result['artifacts']) # paginate by version in desc order with limit 2 sort = [('version', 'desc')] result = self.controller.list(self.req, 'sample_artifact', filters=(), limit=2, sort=sort) self.assertEqual(art_list[4:2:-1], result['artifacts']) marker = result['next_marker'] result = self.controller.list(self.req, 'sample_artifact', filters=(), marker=marker, limit=2, sort=sort) self.assertEqual(art_list[2:0:-1], result['artifacts']) marker = result['next_marker'] result = self.controller.list(self.req, 'sample_artifact', filters=(), marker=marker, limit=2, sort=sort) self.assertEqual([art_list[0]], result['artifacts']) def test_list_version(self): values = [ {'name': 'art1', 'version': '0.0.1'}, {'name': 'art1', 'version': '1-beta'}, {'name': 'art1', 'version': '1'}, {'name': 'art1', 'version': '10-rc1'}, {'name': 'art1', 'version': '10'}, {'name': 'art2', 'version': '1'}, {'name': 'art3', 'version': '1'}, ] arts = [self.controller.create(self.req, 'sample_artifact', val) for val in values] # List all artifacts res = self.controller.list(self.req, 'sample_artifact', []) self.assertEqual(7, len(res['artifacts'])) self.assertEqual('sample_artifact', res['type_name']) # Get latest artifacts res = self.controller.list(self.req, 'sample_artifact', [], latest=True) self.assertEqual(3, len(res['artifacts'])) for i in (4, 5, 6): self.assertIn(arts[i], res['artifacts']) # Various version filters filters = [('version', '1')] res = self.controller.list(self.req, 'sample_artifact', filters) self.assertEqual(3, len(res['artifacts'])) for i in (2, 5, 6): self.assertIn(arts[i], res['artifacts']) filters = [('version', '1'), ('name', 'art1')] res = self.controller.list(self.req, 'sample_artifact', filters) self.assertEqual(1, len(res['artifacts'])) self.assertIn(arts[2], res['artifacts']) filters = [('version', 'gt:1')] res = self.controller.list(self.req, 'sample_artifact', filters) self.assertEqual(2, len(res['artifacts'])) for i in (3, 4): self.assertIn(arts[i], res['artifacts']) filters = [('version', 'gte:1')] res = self.controller.list(self.req, 'sample_artifact', filters) self.assertEqual(5, len(res['artifacts'])) for i in (2, 3, 4, 5, 6): self.assertIn(arts[i], res['artifacts']) filters = [('version', 'lte:1')] res = self.controller.list(self.req, 'sample_artifact', filters) self.assertEqual(5, len(res['artifacts'])) for i in (0, 1, 2, 5, 6): self.assertIn(arts[i], res['artifacts']) filters = [('version', 'gt:1-beta'), ('version', 'lt:10')] res = self.controller.list(self.req, 'sample_artifact', filters) self.assertEqual(4, len(res['artifacts'])) for i in (2, 3, 5, 6): self.assertIn(arts[i], res['artifacts']) filters = [('version', 'in:0.0.1,10-rc1')] res = self.controller.list(self.req, 'sample_artifact', filters) self.assertEqual(2, len(res['artifacts'])) for i in (0, 3): self.assertIn(arts[i], res['artifacts']) # Filter by invalid version filters = [('version', 'INVALID_VERSION')] self. assertRaises(exc.BadRequest, self.controller.list, self.req, 'sample_artifact', filters) # Filter by invalid operator filters = [('version', 'INVALID_op:1')] self. assertRaises(exc.BadRequest, self.controller.list, self.req, 'sample_artifact', filters) def test_list_compound_fields(self): # Create a bunch of artifacts for list testing values = [ {'name': 'art1', 'dict_of_str': {'a': 'aa', 'b': 'bb'}, 'dict_of_int': {'one': 1, 'two': 2}, 'list_of_str': ['aa', 'bb'], 'list_of_int': [1, 2]}, {'name': 'art2', 'dict_of_str': {'b': 'bb', 'c': 'cc'}, 'dict_of_int': {'two': 2, 'three': 3}, 'list_of_str': ['bb', 'cc'], 'list_of_int': [2, 3]}, {'name': 'art3', 'dict_of_str': {'a': 'aa', 'c': 'cc'}, 'dict_of_int': {'one': 1, 'three': 3}, 'list_of_str': ['aa', 'cc'], 'list_of_int': [1, 3]}, {'name': 'art4', 'dict_of_str': {'a': 'bb'}, 'dict_of_int': {'one': 2}, 'list_of_str': ['aa'], 'list_of_int': [1]}, {'name': 'art5', 'dict_of_str': {'b': 'bb'}, 'dict_of_int': {'two': 2}, 'list_of_str': ['bb'], 'list_of_int': [2]}, {'name': 'art6', 'dict_of_str': {}, 'dict_of_int': {}, 'list_of_str': [], 'list_of_int': []}, ] arts = [self.controller.create(self.req, 'sample_artifact', val) for val in values] # List all artifacts res = self.controller.list(self.req, 'sample_artifact', []) self.assertEqual(6, len(res['artifacts'])) self.assertEqual('sample_artifact', res['type_name']) # Return artifacts that contain key 'a' in 'dict_of_str' filters = [('dict_of_str', 'eq:a')] res = self.controller.list(self.req, 'sample_artifact', filters) self.assertEqual(3, len(res['artifacts'])) for i in (0, 2, 3): self.assertIn(arts[i], res['artifacts']) # Return artifacts that contain key 'a' or 'c' in 'dict_of_str' filters = [('dict_of_str', 'in:a,c')] res = self.controller.list(self.req, 'sample_artifact', filters) self.assertEqual(4, len(res['artifacts'])) for i in (0, 1, 2, 3): self.assertIn(arts[i], res['artifacts']) # Filter with invalid operator leads to BadRequest filters = [('dict_of_str', 'invalid:a')] self.assertRaises(exc.BadRequest, self.controller.list, self.req, 'sample_artifact', filters) # Return artifacts that contain key one in 'dict_of_int' filters = [('dict_of_int', 'eq:one')] res = self.controller.list(self.req, 'sample_artifact', filters) self.assertEqual(3, len(res['artifacts'])) for i in (0, 2, 3): self.assertIn(arts[i], res['artifacts']) # Return artifacts that contain key one or three in 'dict_of_int' filters = [('dict_of_int', 'in:one,three')] res = self.controller.list(self.req, 'sample_artifact', filters) self.assertEqual(4, len(res['artifacts'])) for i in (0, 1, 2, 3): self.assertIn(arts[i], res['artifacts']) # Filter by dicts values # Return artifacts that contain value 'bb' in 'dict_of_str[b]' filters = [('dict_of_str.b', 'eq:bb')] res = self.controller.list(self.req, 'sample_artifact', filters) self.assertEqual(3, len(res['artifacts'])) for i in (0, 1, 4): self.assertIn(arts[i], res['artifacts']) # Return artifacts that contain values 'aa' or 'bb' in 'dict_of_str[a]' filters = [('dict_of_str.a', 'in:aa,bb')] res = self.controller.list(self.req, 'sample_artifact', filters) self.assertEqual(3, len(res['artifacts'])) for i in (0, 2, 3): self.assertIn(arts[i], res['artifacts']) # Filter with invalid operator leads to BadRequest filters = [('dict_of_str.a', 'invalid:aa')] self.assertRaises(exc.BadRequest, self.controller.list, self.req, 'sample_artifact', filters) # Return artifacts that contain value '2' in 'dict_of_int[two]' filters = [('dict_of_int.two', 'eq:2')] res = self.controller.list(self.req, 'sample_artifact', filters) self.assertEqual(3, len(res['artifacts'])) for i in (0, 1, 4): self.assertIn(arts[i], res['artifacts']) # Return artifacts that contain values '1' or '2' in 'dict_of_int[one]' filters = [('dict_of_int.one', 'in:1,2')] res = self.controller.list(self.req, 'sample_artifact', filters) self.assertEqual(3, len(res['artifacts'])) for i in (0, 2, 3): self.assertIn(arts[i], res['artifacts']) # Filter with invalid operator leads to BadRequest filters = [('dict_of_int.one', 'invalid:1')] self.assertRaises(exc.BadRequest, self.controller.list, self.req, 'sample_artifact', filters) # Filter by nonexistent dict leads to BadRequest filters = [('NOTEXIST.one', 'eq:1')] self.assertRaises(exc.BadRequest, self.controller.list, self.req, 'sample_artifact', filters) # Test with TypeError filters = [('dict_of_int.1', 'lala')] self.assertRaises(exc.BadRequest, self.controller.list, self.req, 'sample_artifact', filters) # Return artifacts that contain key 'aa' in 'list_of_str' filters = [('list_of_str', 'eq:aa')] res = self.controller.list(self.req, 'sample_artifact', filters) self.assertEqual(3, len(res['artifacts'])) for i in (0, 2, 3): self.assertIn(arts[i], res['artifacts']) # Return artifacts that contain key 'aa' or 'cc' in 'list_of_str' filters = [('list_of_str', 'in:aa,cc')] res = self.controller.list(self.req, 'sample_artifact', filters) self.assertEqual(4, len(res['artifacts'])) for i in (0, 1, 2, 3): self.assertIn(arts[i], res['artifacts']) # Filter with invalid operator leads to BadRequest filters = [('list_of_str', 'invalid:aa')] self.assertRaises(exc.BadRequest, self.controller.list, self.req, 'sample_artifact', filters) # Return artifacts that contain key 1 in 'list_of_int' filters = [('list_of_int', 'eq:1')] res = self.controller.list(self.req, 'sample_artifact', filters) self.assertEqual(3, len(res['artifacts'])) for i in (0, 2, 3): self.assertIn(arts[i], res['artifacts']) # Return artifacts that contain key 1 or three in 'list_of_int' filters = [('list_of_int', 'in:1,3')] res = self.controller.list(self.req, 'sample_artifact', filters) self.assertEqual(4, len(res['artifacts'])) for i in (0, 1, 2, 3): self.assertIn(arts[i], res['artifacts']) def test_filter_by_tags(self): values = [ {'name': 'name1', 'tags': ['tag1', 'tag2']}, {'name': 'name2', 'tags': ['tag1', 'tag3']}, {'name': 'name3', 'tags': ['tag1']}, {'name': 'name4', 'tags': ['tag2']}, {'name': 'name5', 'tags': ['tag4']}, {'name': 'name6', 'tags': ['tag4', 'tag5']}, ] arts = [self.controller.create(self.req, 'sample_artifact', val) for val in values] filters = [('tags', 'tag1')] res = self.controller.list(self.req, 'sample_artifact', filters) self.assertEqual(3, len(res['artifacts'])) for i in (0, 1, 2): self.assertIn(arts[i], res['artifacts']) filters = [('tags', 'tag1,tag2')] res = self.controller.list(self.req, 'sample_artifact', filters) self.assertEqual(1, len(res['artifacts'])) self.assertIn(arts[0], res['artifacts']) filters = [('tags', 'NOT_A_TAG')] res = self.controller.list(self.req, 'sample_artifact', filters) self.assertEqual(0, len(res['artifacts'])) filters = [('tags-any', 'tag1')] res = self.controller.list(self.req, 'sample_artifact', filters) self.assertEqual(3, len(res['artifacts'])) for i in (0, 1, 2): self.assertIn(arts[i], res['artifacts']) filters = [('tags-any', 'tag1,NOT_A_TAG')] res = self.controller.list(self.req, 'sample_artifact', filters) self.assertEqual(3, len(res['artifacts'])) for i in (0, 1, 2): self.assertIn(arts[i], res['artifacts']) filters = [('tags-any', 'tag2,tag5')] res = self.controller.list(self.req, 'sample_artifact', filters) self.assertEqual(3, len(res['artifacts'])) for i in (0, 3, 5): self.assertIn(arts[i], res['artifacts']) # Filtering by tags with operators leads to BadRequest for f in ('tags', 'tags-any'): filters = [(f, 'eq:tag1')] self.assertRaises( exc.BadRequest, self.controller.list, self.req, 'sample_artifact', filters) def test_list_and_sort_fields(self): amount = 7 # Create a bunch of artifacts for list sorting tests names = random.sample(["art%d" % i for i in range(amount)], amount) floats = random.sample([0.01 * i for i in range(amount)], amount) ints = random.sample([1 * i for i in range(amount)], amount) strings = random.sample(["str%d" % i for i in range(amount)], amount) versions = random.sample(["0.%d" % i for i in range(amount)], amount) for i in range(amount): val = {'name': names[i], 'float1': floats[i], 'int1': ints[i], 'str1': strings[i], 'version': versions[i]} self.controller.create(self.req, 'sample_artifact', val) fields = ['name', 'id', 'visibility', 'version', 'float1', 'int1', 'str1'] for sort_name in fields: for sort_dir in ['asc', 'desc']: arts = self.controller.list( self.req, 'sample_artifact', [], sort=[(sort_name, sort_dir)])['artifacts'] self.assertEqual(amount, len(arts)) sorted_arts = sorted(arts, key=lambda x: x[sort_name], reverse=sort_dir == 'desc') self.assertEqual(sorted_arts, arts) def test_list_and_sort_negative(self): # sort by non-existent field self.assertRaises(exc.BadRequest, self.controller.list, self.req, 'sample_artifact', [], sort=[("NONEXISTENT", "desc")]) # sort by wrong direction self.assertRaises(exc.BadRequest, self.controller.list, self.req, 'sample_artifact', [], sort=[("name", "WRONG_DIR")]) # For performance sake sorting by more than one custom field # is forbidden. Nevertheless, sorting by several basic field are # absolutely fine. # List of basic fields is located in glare/db/sqlalchemy/api.py as # BASE_ARTIFACT_PROPERTIES tuple. sort = [("int1", "desc"), ("float1", "desc")] self.assertRaises(exc.BadRequest, self.controller.list, self.req, 'sample_artifact', [], sort=sort) # sort with non-sortable fields for name, field in sample_artifact.SampleArtifact.fields.items(): for sort_dir in ['asc', 'desc']: if not field.sortable: self.assertRaises( exc.BadRequest, self.controller.list, self.req, 'sample_artifact', [], sort=[(name, sort_dir)]) glare-0.5.0/glare/tests/unit/api/test_locations.py000066400000000000000000000203761317401036700222100ustar00rootroot00000000000000# Copyright 2017 - Nokia Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from glare.common import exception as exc from glare.db import artifact_api from glare.tests.unit import base class TestLocations(base.BaseTestArtifactAPI): """Test adding custom locations.""" def setUp(self): super(TestLocations, self).setUp() values = {'name': 'ttt', 'version': '1.0'} self.sample_artifact = self.controller.create( self.req, 'sample_artifact', values) self.ct = 'application/vnd+openstack.glare-custom-location+json' def test_add_location(self): with mock.patch( 'glare.common.store_api.save_blob_to_store') as mocked_add: body = {'url': 'https://FAKE_LOCATION.com', 'md5': "fake", 'sha1': "fake_sha", "sha256": "fake_sha256"} self.controller.upload_blob( self.req, 'sample_artifact', self.sample_artifact['id'], 'blob', body, self.ct) art = self.controller.show(self.req, 'sample_artifact', self.sample_artifact['id']) self.assertEqual('active', art['blob']['status']) self.assertEqual('fake', art['blob']['md5']) self.assertEqual('fake_sha', art['blob']['sha1']) self.assertEqual('fake_sha256', art['blob']['sha256']) self.assertIsNone(art['blob']['size']) self.assertIsNone(art['blob']['content_type']) self.assertEqual('https://FAKE_LOCATION.com', art['blob']['url']) self.assertNotIn('id', art['blob']) self.assertEqual(0, mocked_add.call_count) # Adding location for the second time leads to Conflict error body = {'url': 'https://ANOTHER_FAKE_LOCATION.com', 'md5': "fake", 'sha1': "fake_sha", "sha256": "fake_sha256"} self.assertRaises( exc.Conflict, self.controller.upload_blob, self.req, 'sample_artifact', self.sample_artifact['id'], 'blob', body, self.ct) def test_add_dict_location(self): with mock.patch( 'glare.common.store_api.save_blob_to_store') as mocked_add: body = {'url': 'https://FAKE_LOCATION.com', 'md5': "fake", 'sha1': "fake_sha", "sha256": "fake_sha256"} self.controller.upload_blob( self.req, 'sample_artifact', self.sample_artifact['id'], 'dict_of_blobs/blob', body, self.ct) art = self.controller.show(self.req, 'sample_artifact', self.sample_artifact['id']) self.assertEqual('active', art['dict_of_blobs']['blob']['status']) self.assertEqual('fake', art['dict_of_blobs']['blob']['md5']) self.assertEqual('fake_sha', art['dict_of_blobs']['blob']['sha1']) self.assertEqual('fake_sha256', art['dict_of_blobs']['blob']['sha256']) self.assertIsNone(art['dict_of_blobs']['blob']['size']) self.assertIsNone(art['dict_of_blobs']['blob']['content_type']) self.assertEqual('https://FAKE_LOCATION.com', art['dict_of_blobs']['blob']['url']) self.assertNotIn('id', art['blob']) self.assertEqual(0, mocked_add.call_count) # Adding location for the second time leads to Conflict error body = {'url': 'https://ANOTHER_FAKE_LOCATION.com', 'md5': "fake", 'sha1': "fake_sha", "sha256": "fake_sha256"} self.assertRaises( exc.Conflict, self.controller.upload_blob, self.req, 'sample_artifact', self.sample_artifact['id'], 'dict_of_blobs/blob', body, self.ct) def test_add_location_saving_blob(self): body = {'url': 'https://FAKE_LOCATION.com', 'md5': "fake", 'sha1': "fake_sha", "sha256": "fake_sha256"} self.controller.upload_blob( self.req, 'sample_artifact', self.sample_artifact['id'], 'blob', body, self.ct) art = self.controller.show(self.req, 'sample_artifact', self.sample_artifact['id']) # Change status of the blob to 'saving' art['blob']['status'] = 'saving' artifact_api.ArtifactAPI().update_blob( self.req.context, self.sample_artifact['id'], {'blob': art['blob']}) art = self.controller.show( self.req, 'sample_artifact', self.sample_artifact['id']) self.assertEqual('saving', art['blob']['status']) body = {'url': 'https://ANOTHER_FAKE_LOCATION.com', 'md5': "fake", 'sha1': "fake_sha", "sha256": "fake_sha256"} self.assertRaises( exc.Conflict, self.controller.upload_blob, self.req, 'sample_artifact', self.sample_artifact['id'], 'blob', body, self.ct) def test_too_long_location_url(self): body = {'url': 'http://FAKE_LOCATION%s.com' % ('a' * 2049), 'md5': "fake", 'sha1': "fake_sha", "sha256": "fake_sha256"} self.assertRaises( exc.BadRequest, self.controller.upload_blob, self.req, 'sample_artifact', self.sample_artifact['id'], 'blob', body, self.ct) def test_internal_location(self): # allow regular user to set internal locations rule = {"artifact:set_internal_location": "rule:admin_or_owner"} self.policy(rule) art_id = self.sample_artifact['id'] # Setting locations with forbidden schemas fails forbidden_schemes = ('file', 'filesystem', 'swift+config', 'sql') for scheme in forbidden_schemes: body = {'md5': 'fake', 'sha1': 'fake_sha', 'sha256': 'fake_sha256', 'location_type': 'internal', 'url': scheme + '://FAKE_LOCATION.com'} self.assertRaises( exc.Forbidden, self.controller.upload_blob, self.req, 'sample_artifact', art_id, 'blob', body, self.ct) # Setting locations with unknown schemes fail body = {'md5': 'fake', 'sha1': 'fake_sha', 'sha256': 'fake_sha256', 'location_type': 'internal', 'url': 'UNKNOWN://FAKE_LOCATION.com'} self.assertRaises( exc.BadRequest, self.controller.upload_blob, self.req, 'sample_artifact', art_id, 'blob', body, self.ct) with mock.patch( 'glare.common.store_api.save_blob_to_store') as mocked_add: body = {'md5': 'fake', 'sha1': 'fake_sha', 'sha256': 'fake_sha256', 'location_type': 'internal', 'url': 'https://FAKE_LOCATION.com'} self.controller.upload_blob( self.req, 'sample_artifact', art_id, 'blob', body, self.ct) art = self.controller.show(self.req, 'sample_artifact', art_id) self.assertFalse(art['blob']['external']) self.assertEqual('active', art['blob']['status']) self.assertEqual('fake', art['blob']['md5']) self.assertEqual('fake_sha', art['blob']['sha1']) self.assertEqual('fake_sha256', art['blob']['sha256']) self.assertIsNone(art['blob']['size']) self.assertIsNone(art['blob']['content_type']) self.assertEqual('/artifacts/sample_artifact/%s/blob' % art_id, art['blob']['url']) self.assertNotIn('id', art['blob']) self.assertEqual(0, mocked_add.call_count) # deletion of artifact leads to the deletion of data under the internal # location with mock.patch('glare.common.store_api.delete_blob') as mocked_del: self.controller.delete(self.req, 'sample_artifact', art_id) self.assertEqual(1, mocked_del.call_count) glare-0.5.0/glare/tests/unit/api/test_update.py000066400000000000000000000715341317401036700215010ustar00rootroot00000000000000# Copyright 2017 - Nokia Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from six import BytesIO from uuid import uuid4 from glare.common import exception as exc from glare.db import artifact_api from glare.tests.unit import base class TestArtifactUpdate(base.BaseTestArtifactAPI): """Test Glare artifact updates.""" def setUp(self): super(TestArtifactUpdate, self).setUp() values = {'name': 'ttt', 'version': '1.0'} self.sample_artifact = self.controller.create( self.req, 'sample_artifact', values) def test_basic_update(self): changes = [ {'op': 'replace', 'path': '/name', 'value': 'new_name'}, {'op': 'replace', 'path': '/version', 'value': '1.0.0'}, {'op': 'replace', 'path': '/description', 'value': 'Test'}, {'op': 'replace', 'path': '/tags', 'value': ['tag1', 'tag2']}, {'op': 'replace', 'path': '/metadata', 'value': {'k': 'v'}}, ] res = self.update_with_values(changes) self.assertEqual('new_name', res['name']) self.assertEqual('1.0.0', res['version']) self.assertEqual('Test', res['description']) self.assertEqual({'tag1', 'tag2'}, set(res['tags'])) self.assertEqual({'k': 'v'}, res['metadata']) def test_update_replace_values(self): changes = [ {'op': 'replace', 'path': '/int1', 'value': 1}, {'op': 'replace', 'path': '/float1', 'value': 1.0}, {'op': 'replace', 'path': '/str1', 'value': 'Test'}, {'op': 'replace', 'path': '/list_of_int', 'value': [0, 1]}, {'op': 'replace', 'path': '/dict_of_str', 'value': {'k': 'v'}}, ] res = self.update_with_values(changes) self.assertEqual(1, res['int1']) self.assertEqual(1.0, res['float1']) self.assertEqual('Test', res['str1']) self.assertEqual([0, 1], res['list_of_int']) self.assertEqual({'k': 'v'}, res['dict_of_str']) changes = [ {'op': 'replace', 'path': '/int1', 'value': 2}, {'op': 'replace', 'path': '/float1', 'value': 2.0}, {'op': 'replace', 'path': '/str1', 'value': 'New_Test'}, {'op': 'replace', 'path': '/list_of_int/1', 'value': 4}, {'op': 'replace', 'path': '/dict_of_str/k', 'value': 'new_val'}, ] res = self.update_with_values(changes) self.assertEqual(2, res['int1']) self.assertEqual(2.0, res['float1']) self.assertEqual('New_Test', res['str1']) self.assertEqual([0, 4], res['list_of_int']) self.assertEqual({'k': 'new_val'}, res['dict_of_str']) def test_update_no_artifact_type(self): changes = [{'op': 'replace', 'path': '/name', 'value': 'new_name'}] self.update_with_values( changes, exc_class=exc.NotFound, art_type='wrong_type') def test_update_name_version(self): # Create additional artifacts values = {'name': 'ttt', 'version': '2.0'} self.controller.create(self.req, 'sample_artifact', values) values = {'name': 'ddd', 'version': '1.0'} self.controller.create(self.req, 'sample_artifact', values) # This name/version is already taken changes = [{'op': 'replace', 'path': '/version', 'value': '2.0'}] self.assertRaises(exc.Conflict, self.update_with_values, changes) changes = [{'op': 'replace', 'path': '/name', 'value': 'ddd'}] self.assertRaises(exc.Conflict, self.update_with_values, changes) # Test coercing # name changes = [{'op': 'replace', 'path': '/name', 'value': True}] res = self.update_with_values(changes) self.assertEqual('True', res['name']) changes = [{'op': 'replace', 'path': '/name', 'value': 1.0}] res = self.update_with_values(changes) self.assertEqual('1.0', res['name']) changes = [{'op': 'replace', 'path': '/name', 'value': "tt:t"}] res = self.update_with_values(changes) self.assertEqual('tt:t', res['name']) # version changes = [{'op': 'replace', 'path': '/version', 'value': 2.0}] res = self.update_with_values(changes) self.assertEqual('2.0.0', res['version']) changes = [{'op': 'replace', 'path': '/version', 'value': '1-alpha'}] res = self.update_with_values(changes) self.assertEqual('1.0.0-alpha', res['version']) changes = [{'op': 'replace', 'path': '/version', 'value': '1:0'}] res = self.update_with_values(changes) self.assertEqual('1.0.0-0', res['version']) def test_update_deleted_artifact(self): # Enable delayed delete self.config(delayed_delete=True) # Delete artifact and check its status self.controller.delete(self.req, 'sample_artifact', self.sample_artifact['id']) art = self.controller.show(self.req, 'sample_artifact', self.sample_artifact['id']) self.assertEqual('deleted', art['status']) changes = [{'op': 'replace', 'path': '/int1', 'value': 1}] self.assertRaises(exc.Forbidden, self.update_with_values, changes) changes = [{'op': 'replace', 'path': '/name', 'value': 'new'}] self.assertRaises(exc.Forbidden, self.update_with_values, changes) def test_update_lists(self): changes = [{'op': 'replace', 'path': '/list_of_str', 'value': ['val1', 'val2']}] res = self.update_with_values(changes) self.assertEqual({'val1', 'val2'}, set(res['list_of_str'])) changes = [{'op': 'remove', 'path': '/list_of_str/0'}] res = self.update_with_values(changes) self.assertEqual(['val2'], res['list_of_str']) changes = [{'op': 'replace', 'path': '/list_of_str', 'value': None}] res = self.update_with_values(changes) self.assertEqual([], res['list_of_str']) changes = [{'op': 'add', 'path': '/list_of_str/-', 'value': 'val1'}] res = self.update_with_values(changes) self.assertEqual(['val1'], res['list_of_str']) changes = [{'op': 'replace', 'path': '/list_of_str/0', 'value': 'val2'}] res = self.update_with_values(changes) self.assertEqual(['val2'], res['list_of_str']) changes = [{'op': 'replace', 'path': '/list_of_str', 'value': []}] res = self.update_with_values(changes) self.assertEqual([], res['list_of_str']) changes = [{'op': 'replace', 'path': '/list_of_str', 'value': {}}] self.update_with_values(changes, exc_class=exc.BadRequest) changes = [{'op': 'replace', 'path': '/list_of_str', 'value': {'a': 'b'}}] self.update_with_values(changes, exc_class=exc.BadRequest) changes = [{'op': 'replace', 'path': '/list_of_str', 'value': [['a']]}] self.update_with_values(changes, exc_class=exc.BadRequest) changes = [{'op': 'remove', 'path': '/list_of_str/-', 'value': 'val3'}] self.update_with_values(changes, exc_class=exc.BadRequest) def test_update_dicts(self): changes = [{'op': 'replace', 'path': '/dict_of_str', 'value': {'k1': 'v1', 'k2': 'v2'}}] res = self.update_with_values(changes) self.assertEqual({'k1': 'v1', 'k2': 'v2'}, res['dict_of_str']) changes = [{'op': 'remove', 'path': '/dict_of_str/k1'}] res = self.update_with_values(changes) self.assertEqual({'k2': 'v2'}, res['dict_of_str']) changes = [{'op': 'replace', 'path': '/dict_of_str', 'value': None}] res = self.update_with_values(changes) self.assertEqual({}, res['dict_of_str']) changes = [{'op': 'add', 'path': '/dict_of_str/k1', 'value': 'v1'}] res = self.update_with_values(changes) self.assertEqual({'k1': 'v1'}, res['dict_of_str']) changes = [{'op': 'replace', 'path': '/dict_of_str/k1', 'value': 'v2'}] res = self.update_with_values(changes) self.assertEqual({'k1': 'v2'}, res['dict_of_str']) changes = [{'op': 'replace', 'path': '/dict_of_str', 'value': {}}] res = self.update_with_values(changes) self.assertEqual({}, res['dict_of_str']) changes = [{'op': 'replace', 'path': '/dict_of_str', 'value': []}] self.update_with_values(changes, exc_class=exc.BadRequest) changes = [{'op': 'replace', 'path': '/dict_of_str', 'value': ['a']}] self.update_with_values(changes, exc_class=exc.BadRequest) changes = [{'op': 'replace', 'path': '/dict_of_str/k10', 'value': {'k100': 'v100'}}] self.update_with_values(changes, exc_class=exc.BadRequest) def test_update_artifact_wrong_parameters(self): changes = [{'op': 'replace', 'path': '/name', 'value': ''}] self.update_with_values(changes, exc_class=exc.BadRequest) changes = [{'op': 'replace', 'path': '/name', 'value': 'a' * 256}] self.update_with_values(changes, exc_class=exc.BadRequest) changes = [{'op': 'replace', 'path': '/version', 'value': ''}] self.update_with_values(changes, exc_class=exc.BadRequest) changes = [{'op': 'replace', 'path': '/version', 'value': 'invalid'}] self.update_with_values(changes, exc_class=exc.BadRequest) changes = [{'op': 'replace', 'path': '/version', 'value': -1}] self.update_with_values(changes, exc_class=exc.BadRequest) changes = [{'op': 'replace', 'path': '/description', 'value': 'a' * 4097}] self.update_with_values(changes, exc_class=exc.BadRequest) changes = [{'op': 'replace', 'path': '/tags', 'value': ['a' * 256]}] self.update_with_values(changes, exc_class=exc.BadRequest) changes = [{'op': 'replace', 'path': '/tags', 'value': ['']}] self.update_with_values(changes, exc_class=exc.BadRequest) changes = [{'op': 'replace', 'path': '/tags', 'value': ['a/a']}] self.update_with_values(changes, exc_class=exc.BadRequest) changes = [{'op': 'replace', 'path': '/tags', 'value': ['a,a']}] self.update_with_values(changes, exc_class=exc.BadRequest) changes = [{'op': 'replace', 'path': '/tags', 'value': [str(i) for i in range(256)]}] self.update_with_values(changes, exc_class=exc.BadRequest) changes = [{'op': 'replace', 'path': '/metadata', 'value': {'key': 'a' * 256}}] self.update_with_values(changes, exc_class=exc.BadRequest) changes = [{'op': 'replace', 'path': '/metadata', 'value': {'': 'a'}}] self.update_with_values(changes, exc_class=exc.BadRequest) changes = [{'op': 'replace', 'path': '/metadata', 'value': {'a' * 256: 'a'}}] self.update_with_values(changes, exc_class=exc.BadRequest) changes = [{'op': 'replace', 'path': '/metadata', 'value': {('a' + str(i)): 'a' for i in range(256)}}] self.update_with_values(changes, exc_class=exc.BadRequest) changes = [{'op': 'replace', 'path': '/int1', 'value': 'aaa'}] self.update_with_values(changes, exc_class=exc.BadRequest) changes = [{'op': 'replace', 'path': '/float1', 'value': 'aaa'}] self.update_with_values(changes, exc_class=exc.BadRequest) def test_update_artifact_not_existing_field(self): changes = [{'op': 'replace', 'path': '/wrong_field', 'value': 'a'}] self.update_with_values(changes, exc_class=exc.BadRequest) changes = [{'op': 'replace', 'path': '/', 'value': 'a'}] self.update_with_values(changes, exc_class=exc.BadRequest) changes = [{'op': 'add', 'path': '/wrong_field', 'value': 'a'}] self.update_with_values(changes, exc_class=exc.BadRequest) changes = [{'op': 'add', 'path': '/', 'value': 'a'}] self.update_with_values(changes, exc_class=exc.BadRequest) def test_update_artifact_remove_field(self): changes = [{'op': 'remove', 'path': '/name'}] self.update_with_values(changes, exc_class=exc.BadRequest) changes = [{'op': 'remove', 'path': '/list_of_int/10'}] self.update_with_values(changes, exc_class=exc.BadRequest) changes = [{'op': 'remove', 'path': '/status'}] self.update_with_values(changes, exc_class=exc.BadRequest) changes = [ {'op': 'add', 'path': '/list_of_int/-', 'value': 4}, {'op': 'add', 'path': '/dict_of_str/k', 'value': 'new_val'}, ] self.update_with_values(changes) changes = [{'op': 'remove', 'path': '/list_of_int/0'}] res = self.update_with_values(changes) self.assertEqual([], res['list_of_int']) changes = [{'op': 'remove', 'path': '/dict_of_str/k'}] res = self.update_with_values(changes) self.assertEqual({}, res['dict_of_str']) def test_update_artifact_blob(self): changes = [{'op': 'replace', 'path': '/blob', 'value': 'a'}] self.update_with_values(changes, exc_class=exc.BadRequest) def test_update_artifact_system_fields(self): changes = [{'op': 'replace', 'path': '/id', 'value': '5fdeba9a-ba12-4147-bb8a-a8daada84222'}] self.update_with_values(changes, exc_class=exc.Forbidden) changes = [{'op': 'replace', 'path': '/created_at', 'value': '2000-01-01'}] self.update_with_values(changes, exc_class=exc.Forbidden) changes = [{'op': 'replace', 'path': '/updated_at', 'value': '2000-01-01'}] self.update_with_values(changes, exc_class=exc.Forbidden) changes = [{'op': 'replace', 'path': '/activated_at', 'value': '2000-01-01'}] self.update_with_values(changes, exc_class=exc.Forbidden) changes = [{'op': 'replace', 'path': '/owner', 'value': 'new_owner'}] self.update_with_values(changes, exc_class=exc.Forbidden) changes = [{'op': 'replace', 'path': '/system_attribute', 'value': 'some_value'}] self.update_with_values(changes, exc_class=exc.Forbidden) def test_update_artifact_visibility(self): self.req = self.get_fake_request(user=self.users['admin']) changes = [{'op': 'replace', 'path': '/visibility', 'value': 'wrong_value'}] self.update_with_values(changes, exc_class=exc.BadRequest) changes = [{'op': 'replace', 'path': '/visibility', 'value': 'public'}] self.update_with_values(changes, exc_class=exc.Forbidden) changes = [{'op': 'replace', 'path': '/visibility', 'value': None}] self.update_with_values(changes, exc_class=exc.BadRequest) changes = [{'op': 'replace', 'path': '/string_required', 'value': 'some_string'}, {'op': 'replace', 'path': '/status', 'value': 'active'}] res = self.update_with_values(changes) self.assertEqual('active', res['status']) self.assertEqual('some_string', res['string_required']) changes = [{'op': 'replace', 'path': '/visibility', 'value': 'public'}] res = self.update_with_values(changes) self.assertEqual('public', res['visibility']) changes = [{'op': 'replace', 'path': '/visibility', 'value': 'public'}] res = self.update_with_values(changes) self.assertEqual('public', res['visibility']) changes = [{'op': 'replace', 'path': '/visibility', 'value': 'private'}] self.update_with_values(changes, exc_class=exc.Forbidden) def test_update_artifact_status(self): self.req = self.get_fake_request(user=self.users['admin']) changes = [{'op': 'replace', 'path': '/status', 'value': 'wrong_value'}] self.update_with_values(changes, exc_class=exc.BadRequest) # It's forbidden to activate artifact until required_on_activate field # 'string_required' is set changes = [{'op': 'replace', 'path': '/status', 'value': 'active'}] self.update_with_values(changes, exc_class=exc.Forbidden) changes = [{'op': 'replace', 'path': '/status', 'value': None}] self.update_with_values(changes, exc_class=exc.BadRequest) # It's forbidden to deactivate drafted artifact changes = [{'op': 'replace', 'path': '/status', 'value': 'deactivated'}] self.update_with_values(changes, exc_class=exc.Forbidden) changes = [{'op': 'replace', 'path': '/string_required', 'value': 'some_string'}] res = self.update_with_values(changes) self.assertEqual('some_string', res['string_required']) # It's impossible to activate the artifact when it has 'saving' blobs self.controller.upload_blob( self.req, 'sample_artifact', self.sample_artifact['id'], 'blob', BytesIO(b'aaa'), 'application/octet-stream') self.sample_artifact = self.controller.show( self.req, 'sample_artifact', self.sample_artifact['id']) # Change status of the blob to 'saving' self.sample_artifact['blob']['status'] = 'saving' artifact_api.ArtifactAPI().update_blob( self.req.context, self.sample_artifact['id'], {'blob': self.sample_artifact['blob']}) self.sample_artifact = self.controller.show( self.req, 'sample_artifact', self.sample_artifact['id']) self.assertEqual('saving', self.sample_artifact['blob']['status']) # Now activating of the artifact leads to Conflict changes = [{'op': 'replace', 'path': '/status', 'value': 'active'}] self.assertRaises(exc.Conflict, self.update_with_values, changes) # Reverting status of the blob to active again self.sample_artifact['blob']['status'] = 'active' artifact_api.ArtifactAPI().update_blob( self.req.context, self.sample_artifact['id'], {'blob': self.sample_artifact['blob']}) self.sample_artifact = self.controller.show( self.req, 'sample_artifact', self.sample_artifact['id']) self.assertEqual('active', self.sample_artifact['blob']['status']) # It's possible to change artifact status with other fields in # one request changes = [ {'op': 'replace', 'path': '/name', 'value': 'new_name'}, {'op': 'replace', 'path': '/status', 'value': 'active'} ] self.sample_artifact = self.update_with_values(changes) self.assertEqual('new_name', self.sample_artifact['name']) self.assertEqual('active', self.sample_artifact['status']) changes = [{'op': 'replace', 'path': '/status', 'value': 'active'}] res = self.update_with_values(changes) self.assertEqual('active', res['status']) # It's possible to change artifact status with other fields in # one request changes = [ {'op': 'replace', 'path': '/string_mutable', 'value': 'str'}, {'op': 'replace', 'path': '/status', 'value': 'deactivated'} ] self.sample_artifact = self.update_with_values(changes) self.assertEqual('str', self.sample_artifact['string_mutable']) self.assertEqual('deactivated', self.sample_artifact['status']) changes = [{'op': 'replace', 'path': '/status', 'value': 'deactivated'}] res = self.update_with_values(changes) self.assertEqual('deactivated', res['status']) # It's possible to change artifact status with other fields in # one request changes = [ {'op': 'replace', 'path': '/status', 'value': 'active'}, {'op': 'replace', 'path': '/description', 'value': 'test'}, ] self.sample_artifact = self.update_with_values(changes) self.assertEqual('test', self.sample_artifact['description']) self.assertEqual('active', self.sample_artifact['status']) changes = [{'op': 'replace', 'path': '/status', 'value': 'active'}] res = self.update_with_values(changes) self.assertEqual('active', res['status']) changes = [{'op': 'replace', 'path': '/status', 'value': None}] self.update_with_values(changes, exc_class=exc.BadRequest) # Enable delayed delete self.config(delayed_delete=True) # Delete artifact and check its status self.controller.delete(self.req, 'sample_artifact', self.sample_artifact['id']) art = self.controller.show(self.req, 'sample_artifact', self.sample_artifact['id']) self.assertEqual('deleted', art['status']) changes = [{'op': 'replace', 'path': '/status', 'value': 'active'}] self.assertRaises(exc.Forbidden, self.update_with_values, changes) def test_update_artifact_mutable_fields(self): changes = [{'op': 'replace', 'path': '/string_required', 'value': 'some_string'}] res = self.update_with_values(changes) self.assertEqual('some_string', res['string_required']) changes = [{'op': 'replace', 'path': '/status', 'value': 'active'}] res = self.update_with_values(changes) self.assertEqual('active', res['status']) changes = [{'op': 'replace', 'path': '/name', 'value': 'new_name'}] self.update_with_values(changes, exc_class=exc.Forbidden) changes = [{'op': 'replace', 'path': '/metadata', 'value': {'k': 'v'}}] self.update_with_values(changes, exc_class=exc.Forbidden) changes = [{'op': 'add', 'path': '/metadata/k', 'value': 'v'}] self.update_with_values(changes, exc_class=exc.Forbidden) changes = [{'op': 'replace', 'path': '/tags', 'value': ['a']}] res = self.update_with_values(changes) self.assertEqual(['a'], res['tags']) changes = [{'op': 'add', 'path': '/tags/-', 'value': 'b'}] res = self.update_with_values(changes) self.assertEqual({'a', 'b'}, set(res['tags'])) changes = [{'op': 'replace', 'path': '/description', 'value': 'Test'}] res = self.update_with_values(changes) self.assertEqual('Test', res['description']) changes = [{'op': 'replace', 'path': '/string_mutable', 'value': 'some_value'}] res = self.update_with_values(changes) self.assertEqual('some_value', res['string_mutable']) def test_update_artifact_unicode(self): name = u'\u0442\u0435\u0441\u0442' description = u'\u041E\u043F\u0438\u0441\u0430\u043D\u0438\u0435' tags = [u'\u041C\u0435\u0442\u043A\u0430'] metadata = {'key': u'\u0417\u043D\u0430\u0447\u0435\u043D\u0438\u0435'} changes = [ {'op': 'replace', 'path': '/name', 'value': name}, {'op': 'replace', 'path': '/version', 'value': '1.0.0'}, {'op': 'replace', 'path': '/description', 'value': description}, {'op': 'replace', 'path': '/tags', 'value': tags}, {'op': 'replace', 'path': '/metadata', 'value': metadata}, ] res = self.update_with_values(changes) self.assertEqual(name, res['name']) self.assertEqual('1.0.0', res['version']) self.assertEqual(self.users['user1']['tenant_id'], res['owner']) self.assertEqual('drafted', res['status']) self.assertEqual('private', res['visibility']) self.assertEqual(description, res['description']) self.assertEqual(metadata, res['metadata']) self.assertEqual(tags, res['tags']) def test_update_artifact_4_byte_unicode(self): bad_name = u'A name with forbidden symbol \U0001f62a' changes = [ {'op': 'replace', 'path': '/name', 'value': bad_name} ] self.assertRaises(exc.BadRequest, self.update_with_values, changes) class TestLinks(base.BaseTestArtifactAPI): """Test Glare artifact link management.""" def setUp(self): super(TestLinks, self).setUp() values = {'name': 'ttt', 'version': '1.0'} self.sample_artifact = self.controller.create( self.req, 'sample_artifact', values) values = {'name': 'sss', 'version': '1.0'} self.dependency = self.controller.create( self.req, 'sample_artifact', values) def test_manage_links(self): dep_url = "/artifacts/sample_artifact/%s" % self.dependency['id'] # set valid link patch = [{"op": "replace", "path": "/link1", "value": dep_url}] res = self.update_with_values(patch) self.assertEqual(res['link1'], dep_url) # remove link from artifact patch = [{"op": "replace", "path": "/link1", "value": None}] res = self.update_with_values(patch) self.assertIsNone(res['link1']) # set invalid external link dep_url = "http://example.com/artifacts/" \ "sample_artifact/%s" % self.dependency['id'] patch = [{"op": "replace", "path": "/link1", "value": dep_url}] self.assertRaises(exc.BadRequest, self.update_with_values, patch) # try to set invalid link patch = [{"op": "replace", "path": "/link1", "value": "Invalid"}] self.assertRaises(exc.BadRequest, self.update_with_values, patch) # try to set link to non-existing artifact non_exiting_url = "/artifacts/sample_artifact/%s" % uuid4() patch = [{"op": "replace", "path": "/link1", "value": non_exiting_url}] self.assertRaises(exc.BadRequest, self.update_with_values, patch) def test_manage_dict_of_links(self): dep_url = "/artifacts/sample_artifact/%s" % self.dependency['id'] # set valid link patch = [{"op": "add", "path": "/dict_of_links/link1", "value": dep_url}] res = self.update_with_values(patch) self.assertEqual(res['dict_of_links']['link1'], dep_url) # remove link from artifact patch = [{"op": "remove", "path": "/dict_of_links/link1"}] res = self.update_with_values(patch) self.assertNotIn('link1', res['dict_of_links']) # set invalid external link dep_url = "http://example.com/artifacts/" \ "sample_artifact/%s" % self.dependency['id'] patch = [{"op": "replace", "path": "/dict_of_links/link1", "value": dep_url}] self.assertRaises(exc.BadRequest, self.update_with_values, patch) # try to set invalid link patch = [{"op": "replace", "path": "/dict_of_links/link1", "value": "Invalid"}] self.assertRaises(exc.BadRequest, self.update_with_values, patch) # try to set link to non-existing artifact non_exiting_url = "/artifacts/sample_artifact/%s" % uuid4() patch = [{"op": "replace", "path": "/dict_of_links/link1", "value": non_exiting_url}] self.assertRaises(exc.BadRequest, self.update_with_values, patch) def test_manage_list_of_links(self): dep_url = "/artifacts/sample_artifact/%s" % self.dependency['id'] # set valid link patch = [{"op": "add", "path": "/list_of_links/-", "value": dep_url}] res = self.update_with_values(patch) self.assertEqual(res['list_of_links'][0], dep_url) # remove link from artifact patch = [{"op": "remove", "path": "/list_of_links/0"}] res = self.update_with_values(patch) self.assertEqual(0, len(res['list_of_links'])) # set invalid external link dep_url = "http://example.com/artifacts/" \ "sample_artifact/%s" % self.dependency['id'] patch = [{"op": "replace", "path": "/list_of_links/-", "value": dep_url}] self.assertRaises(exc.BadRequest, self.update_with_values, patch) # try to set invalid link patch = [{"op": "add", "path": "/list_of_links/-", "value": "Invalid"}] self.assertRaises(exc.BadRequest, self.update_with_values, patch) # try to set link to non-existing artifact non_exiting_url = "/artifacts/sample_artifact/%s" % uuid4() patch = [{"op": "add", "path": "/list_of_links/-", "value": non_exiting_url}] self.assertRaises(exc.BadRequest, self.update_with_values, patch) glare-0.5.0/glare/tests/unit/api/test_upload.py000066400000000000000000000403041317401036700214720ustar00rootroot00000000000000# Copyright 2017 - Nokia Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from glance_store import exceptions as store_exc import mock from six import BytesIO from glare.common import exception as exc from glare.common import store_api from glare.db import artifact_api from glare.tests import sample_artifact from glare.tests.unit import base class TestArtifactUpload(base.BaseTestArtifactAPI): """Test blob uploading.""" def setUp(self): super(TestArtifactUpload, self).setUp() values = {'name': 'ttt', 'version': '1.0'} self.sample_artifact = self.controller.create( self.req, 'sample_artifact', values) def test_upload_basic(self): self.controller.upload_blob( self.req, 'sample_artifact', self.sample_artifact['id'], 'blob', BytesIO(b'aaa'), 'application/octet-stream') artifact = self.controller.show(self.req, 'sample_artifact', self.sample_artifact['id']) self.assertEqual(3, artifact['blob']['size']) self.assertEqual('active', artifact['blob']['status']) def test_blob_size_too_big(self): # small blob size is limited by 10 bytes self.assertRaises( exc.RequestEntityTooLarge, self.controller.upload_blob, self.req, 'sample_artifact', self.sample_artifact['id'], 'small_blob', BytesIO(b'a' * 11), 'application/octet-stream') def test_already_uploaded(self): self.controller.upload_blob( self.req, 'sample_artifact', self.sample_artifact['id'], 'blob', BytesIO(b'aaa'), 'application/octet-stream') artifact = self.controller.show(self.req, 'sample_artifact', self.sample_artifact['id']) self.assertEqual(3, artifact['blob']['size']) self.assertEqual('active', artifact['blob']['status']) # Re-uploading blob leads to Conflict error self.assertRaises( exc.Conflict, self.controller.upload_blob, self.req, 'sample_artifact', self.sample_artifact['id'], 'blob', BytesIO(b'aaa'), 'application/octet-stream') def test_upload_saving_blob(self): self.controller.upload_blob( self.req, 'sample_artifact', self.sample_artifact['id'], 'blob', BytesIO(b'aaa'), 'application/octet-stream') self.sample_artifact = self.controller.show( self.req, 'sample_artifact', self.sample_artifact['id']) # Change status of the blob to 'saving' self.sample_artifact['blob']['status'] = 'saving' artifact_api.ArtifactAPI().update_blob( self.req.context, self.sample_artifact['id'], {'blob': self.sample_artifact['blob']}) self.sample_artifact = self.controller.show( self.req, 'sample_artifact', self.sample_artifact['id']) self.assertEqual('saving', self.sample_artifact['blob']['status']) # Uploading new blob leads to Conflict error self.assertRaises( exc.Conflict, self.controller.upload_blob, self.req, 'sample_artifact', self.sample_artifact['id'], 'blob', BytesIO(b'aaa'), 'application/octet-stream') def test_storage_error(self): self.config(default_store='filesystem', group='artifact_type:sample_artifact') with mock.patch('glance_store.backend.add_to_backend', side_effect=store_exc.GlanceStoreException): self.assertRaises( exc.GlareException, self.controller.upload_blob, self.req, 'sample_artifact', self.sample_artifact['id'], 'blob', BytesIO(b'aaa'), 'application/octet-stream') artifact = self.controller.show(self.req, 'sample_artifact', self.sample_artifact['id']) self.assertIsNone(artifact['blob']) def test_upload_blob_dict(self): self.controller.upload_blob( self.req, 'sample_artifact', self.sample_artifact['id'], 'dict_of_blobs/blb1', BytesIO(b'aaa'), 'application/octet-stream') artifact = self.controller.show(self.req, 'sample_artifact', self.sample_artifact['id']) self.assertEqual(3, artifact['dict_of_blobs']['blb1']['size']) self.assertEqual('active', artifact['dict_of_blobs']['blb1']['status']) # upload another one self.controller.upload_blob( self.req, 'sample_artifact', self.sample_artifact['id'], 'dict_of_blobs/blb2', BytesIO(b'aaa'), 'application/octet-stream') artifact = self.controller.show(self.req, 'sample_artifact', self.sample_artifact['id']) self.assertEqual(3, artifact['dict_of_blobs']['blb2']['size']) self.assertEqual('active', artifact['dict_of_blobs']['blb2']['status']) def test_upload_oversized_blob_dict(self): # dict_of_blobs has a limit in 2000 bytes in it # external location shouldn't affect folder size ct = 'application/vnd+openstack.glare-custom-location+json' body = {'url': 'https://FAKE_LOCATION.com', 'md5': "fake", 'sha1': "fake_sha", "sha256": "fake_sha256"} artifact = self.controller.upload_blob( self.req, 'sample_artifact', self.sample_artifact['id'], 'dict_of_blobs/external', body, ct) self.assertIsNone(artifact['dict_of_blobs']['external']['size']) self.assertEqual('active', artifact['dict_of_blobs']['external']['status']) self.controller.upload_blob( self.req, 'sample_artifact', self.sample_artifact['id'], 'dict_of_blobs/a', BytesIO(1800 * b'a'), 'application/octet-stream') artifact = self.controller.show(self.req, 'sample_artifact', self.sample_artifact['id']) self.assertEqual(1800, artifact['dict_of_blobs']['a']['size']) self.assertEqual('active', artifact['dict_of_blobs']['a']['status']) # upload another one self.controller.upload_blob( self.req, 'sample_artifact', self.sample_artifact['id'], 'dict_of_blobs/b', BytesIO(199 * b'b'), 'application/octet-stream') artifact = self.controller.show(self.req, 'sample_artifact', self.sample_artifact['id']) self.assertEqual(199, artifact['dict_of_blobs']['b']['size']) self.assertEqual('active', artifact['dict_of_blobs']['b']['status']) # upload to have size of 2000 bytes exactly self.controller.upload_blob( self.req, 'sample_artifact', self.sample_artifact['id'], 'dict_of_blobs/c', BytesIO(b'c'), 'application/octet-stream') artifact = self.controller.show(self.req, 'sample_artifact', self.sample_artifact['id']) self.assertEqual(1, artifact['dict_of_blobs']['c']['size']) self.assertEqual('active', artifact['dict_of_blobs']['c']['status']) # Upload to have more than max folder limit, more than 2000 self.assertRaises( exc.RequestEntityTooLarge, self.controller.upload_blob, self.req, 'sample_artifact', self.sample_artifact['id'], 'dict_of_blobs/d', BytesIO(b'd'), 'application/octet-stream') def test_upload_with_content_length(self): # dict_of_blobs has a limit in 2000 bytes in it # external location shouldn't affect folder size ct = 'application/vnd+openstack.glare-custom-location+json' body = {'url': 'https://FAKE_LOCATION.com', 'md5': "fake", 'sha1': "fake_sha", "sha256": "fake_sha256"} artifact = self.controller.upload_blob( self.req, 'sample_artifact', self.sample_artifact['id'], 'dict_of_blobs/external', body, ct) self.assertIsNone(artifact['dict_of_blobs']['external']['size']) self.assertEqual('active', artifact['dict_of_blobs']['external']['status']) # Error if we provide a content length bigger than max folder size with mock.patch('glare.common.store_api.save_blob_to_store') as m: self.assertRaises( exc.RequestEntityTooLarge, self.controller.upload_blob, self.req, 'sample_artifact', self.sample_artifact['id'], 'dict_of_blobs/d', BytesIO(b'd' * 2001), 'application/octet-stream', content_length=2001) # Check that upload hasn't started self.assertEqual(0, m.call_count) # Try to cheat and provide content length lesser than we want to upload with mock.patch('glare.common.store_api.save_blob_to_store', side_effect=store_api.save_blob_to_store) as m: self.assertRaises( exc.RequestEntityTooLarge, self.controller.upload_blob, self.req, 'sample_artifact', self.sample_artifact['id'], 'dict_of_blobs/d', BytesIO(b'd' * 2001), 'application/octet-stream', content_length=100) # Check that upload was called this time self.assertEqual(1, m.call_count) # Upload lesser amount of data works self.controller.upload_blob( self.req, 'sample_artifact', self.sample_artifact['id'], 'dict_of_blobs/a', BytesIO(b'a' * 1800), 'application/octet-stream') artifact = self.controller.show(self.req, 'sample_artifact', self.sample_artifact['id']) self.assertEqual(1800, artifact['dict_of_blobs']['a']['size']) self.assertEqual('active', artifact['dict_of_blobs']['a']['status']) # Now we have only 200 bytes left # Uploading of 201 byte fails immediately with mock.patch('glare.common.store_api.save_blob_to_store') as m: self.assertRaises( exc.RequestEntityTooLarge, self.controller.upload_blob, self.req, 'sample_artifact', self.sample_artifact['id'], 'dict_of_blobs/d', BytesIO(b'd' * 201), 'application/octet-stream', content_length=201) # Check that upload hasn't started self.assertEqual(0, m.call_count) def test_existing_blob_dict_key(self): self.controller.upload_blob( self.req, 'sample_artifact', self.sample_artifact['id'], 'dict_of_blobs/blb', BytesIO(b'aaa'), 'application/octet-stream') artifact = self.controller.show(self.req, 'sample_artifact', self.sample_artifact['id']) self.assertEqual(3, artifact['dict_of_blobs']['blb']['size']) self.assertEqual('active', artifact['dict_of_blobs']['blb']['status']) # If blob key already exists Glare return Conflict error self.assertRaises( exc.Conflict, self.controller.upload_blob, self.req, 'sample_artifact', self.sample_artifact['id'], 'dict_of_blobs/blb', BytesIO(b'aaa'), 'application/octet-stream') def test_blob_dict_storage_error(self): self.config(default_store='filesystem', group='artifact_type:sample_artifact') with mock.patch('glance_store.backend.add_to_backend', side_effect=store_exc.GlanceStoreException): self.assertRaises( exc.GlareException, self.controller.upload_blob, self.req, 'sample_artifact', self.sample_artifact['id'], 'dict_of_blobs/blb', BytesIO(b'aaa'), 'application/octet-stream') artifact = self.controller.show(self.req, 'sample_artifact', self.sample_artifact['id']) self.assertNotIn('blb', artifact['dict_of_blobs']) def test_upload_with_hook(self): with mock.patch.object( sample_artifact.SampleArtifact, 'pre_upload_hook', return_value=BytesIO(b'ffff')): self.controller.upload_blob( self.req, 'sample_artifact', self.sample_artifact['id'], 'blob', BytesIO(b'aaa'), 'application/octet-stream') artifact = self.controller.show(self.req, 'sample_artifact', self.sample_artifact['id']) self.assertEqual(4, artifact['blob']['size']) self.assertEqual('active', artifact['blob']['status']) def test_upload_with_hook_error(self): with mock.patch.object( sample_artifact.SampleArtifact, 'pre_upload_hook', side_effect=Exception): self.assertRaises( exc.BadRequest, self.controller.upload_blob, self.req, 'sample_artifact', self.sample_artifact['id'], 'dict_of_blobs/blb', BytesIO(b'aaa'), 'application/octet-stream') art = self.controller.show(self.req, 'sample_artifact', self.sample_artifact['id']) self.assertEqual({}, art['dict_of_blobs']) def test_upload_nonexistent_field(self): self.assertRaises( exc.BadRequest, self.controller.upload_blob, self.req, 'sample_artifact', self.sample_artifact['id'], 'INVALID', BytesIO(b'aaa'), 'application/octet-stream') self.assertRaises( exc.BadRequest, self.controller.upload_blob, self.req, 'sample_artifact', self.sample_artifact['id'], 'blob/key', BytesIO(b'aaa'), 'application/octet-stream') def test_upload_non_blob_field(self): self.assertRaises( exc.BadRequest, self.controller.upload_blob, self.req, 'sample_artifact', self.sample_artifact['id'], 'int1', BytesIO(b'aaa'), 'application/octet-stream') def test_upload_blob_dict_without_key(self): self.assertRaises( exc.BadRequest, self.controller.upload_blob, self.req, 'sample_artifact', self.sample_artifact['id'], 'dict_of_blobs/', BytesIO(b'aaa'), 'application/octet-stream') def test_parallel_uploading_and_activation(self): """ This test check whether it is possible to activate an artifact, when it has uploading blobs. """ self.controller.upload_blob( self.req, 'sample_artifact', self.sample_artifact['id'], 'blob', BytesIO(b'aaa'), 'application/octet-stream') self.sample_artifact = self.controller.show( self.req, 'sample_artifact', self.sample_artifact['id']) changes = [{'op': 'replace', 'path': '/string_required', 'value': 'ttt'}] self.update_with_values(changes) # Change status of the blob to 'saving' self.sample_artifact['blob']['status'] = 'saving' artifact_api.ArtifactAPI().update_blob( self.req.context, self.sample_artifact['id'], {'blob': self.sample_artifact['blob']}) self.sample_artifact = self.controller.show( self.req, 'sample_artifact', self.sample_artifact['id']) self.assertEqual('saving', self.sample_artifact['blob']['status']) # activation of artifact with saving blobs lead to Conflict error changes = [{'op': 'replace', 'path': '/status', 'value': 'active'}] self.assertRaises(exc.Conflict, self.update_with_values, changes) # create another artifact which doesn't have uploading blobs values = {'name': 'ttt', 'version': '2.0', 'string_required': 'rrr'} new_artifact = self.controller.create( self.req, 'sample_artifact', values) # activation is possible res = self.update_with_values(changes, art_id=new_artifact['id']) self.assertEqual('active', res['status']) glare-0.5.0/glare/tests/unit/base.py000066400000000000000000000131711317401036700173120ustar00rootroot00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures import glance_store as store from glance_store import location import jsonpatch from oslo_config import cfg from oslo_config import fixture as cfg_fixture from oslo_policy import policy as os_policy from oslo_utils import uuidutils import testtools from glare.api.middleware import context from glare.api.v1 import resource from glare.common import policy from glare.common import wsgi from glare.db.sqlalchemy import api as db_api CONF = cfg.CONF class BaseTestCase(testtools.TestCase): def setUp(self): super(BaseTestCase, self).setUp() self._config_fixture = self.useFixture(cfg_fixture.Config()) self.users = { 'user1': { 'id': uuidutils.generate_uuid(), 'tenant_id': uuidutils.generate_uuid(), 'token': uuidutils.generate_uuid(), 'roles': ['member'] }, 'user2': { 'id': uuidutils.generate_uuid(), 'tenant_id': uuidutils.generate_uuid(), 'token': uuidutils.generate_uuid(), 'roles': ['member'] }, 'admin': { 'id': uuidutils.generate_uuid(), 'tenant_id': uuidutils.generate_uuid(), 'token': uuidutils.generate_uuid(), 'roles': ['admin'] }, 'anonymous': { 'id': None, 'tenant_id': None, 'token': None, 'roles': [] } } self.test_dir = self.useFixture(fixtures.TempDir()).path CONF.set_default('connection', 'sqlite://', group='database') db_api.setup_db() enf = policy.init(use_conf=False) for default in enf.registered_rules.values(): if default.name not in enf.rules: enf.rules[default.name] = default.check self.config( custom_artifact_types_modules=[ 'glare.tests.sample_artifact', 'glare.tests.hooks_artifact', 'glare.tests.unpacking_artifact' ], enabled_artifact_types=[ 'unpacking_artifact', 'hooks_artifact', 'sample_artifact', 'images', 'heat_templates', 'heat_environments', 'murano_packages', 'tosca_templates'] ) location.SCHEME_TO_CLS_MAP = {} self._create_stores() self.addCleanup(setattr, location, 'SCHEME_TO_CLS_MAP', dict()) self.addCleanup(db_api.drop_db) self.addCleanup(policy.reset) def config(self, **kw): """Override some configuration values. The keyword arguments are the names of configuration options to override and their values. If a group argument is supplied, the overrides are applied to the specified configuration option group. All overrides are automatically cleared at the end of the current test by the fixtures cleanup process. """ self._config_fixture.config(**kw) @staticmethod def policy(new_rules): enf = policy.init(use_conf=False) for rule_name, rule_check_str in new_rules.items(): enf.rules[rule_name] = os_policy.RuleDefault( rule_name, rule_check_str).check @staticmethod def get_fake_request(user): req = wsgi.Request.blank('') req.method = 'POST' kwargs = { 'user': user['id'], 'tenant': user['tenant_id'], 'roles': user['roles'], 'is_admin': 'admin' in user['roles'], } req.context = context.RequestContext(**kwargs) return req def _create_stores(self): """Create known stores. Mock out sheepdog's subprocess dependency on collie. :returns: the number of how many store drivers been loaded. """ store.register_opts(CONF) self.config(default_store='filesystem', filesystem_store_datadir=self.test_dir, group="glance_store") store.create_stores(CONF) @staticmethod def generate_json_patch(values): patch = jsonpatch.JsonPatch(values) tuple(map(patch._get_operation, patch.patch)) return patch def update_with_values(self, values, exc_class=None, art_type='sample_artifact', art_id=None): patch = self.generate_json_patch(values) art_id = art_id or self.sample_artifact['id'] if exc_class is None: return self.controller.update(self.req, art_type, art_id, patch) else: self.assertRaises(exc_class, self.controller.update, self.req, art_type, art_id, patch) class BaseTestArtifactAPI(BaseTestCase): def setUp(self): super(BaseTestArtifactAPI, self).setUp() self.controller = resource.ArtifactsController() self.req = self.get_fake_request(user=self.users['user1']) self.config(default_store='database', group='artifact_type:sample_artifact') glare-0.5.0/glare/tests/unit/db/000077500000000000000000000000001317401036700164105ustar00rootroot00000000000000glare-0.5.0/glare/tests/unit/db/__init__.py000066400000000000000000000000001317401036700205070ustar00rootroot00000000000000glare-0.5.0/glare/tests/unit/db/migrations/000077500000000000000000000000001317401036700205645ustar00rootroot00000000000000glare-0.5.0/glare/tests/unit/db/migrations/__init__.py000066400000000000000000000000001317401036700226630ustar00rootroot00000000000000glare-0.5.0/glare/tests/unit/db/migrations/test_migrations.py000066400000000000000000000244571317401036700243650ustar00rootroot00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for database migrations. There are "opportunistic" tests for both mysql and postgresql in here, which allows testing against these databases in a properly configured unit test environment. For the opportunistic testing you need to set up a db named 'openstack_citest' with user 'openstack_citest' and password 'openstack_citest' on localhost. The test will then use that db and u/p combo to run the tests. For postgres on Ubuntu this can be done with the following commands: :: sudo -u postgres psql postgres=# create user openstack_citest with createdb login password 'openstack_citest'; postgres=# create database openstack_citest with owner openstack_citest; """ import contextlib from alembic import script import mock from oslo_db.sqlalchemy import utils as db_utils from oslo_db.tests.sqlalchemy import base as test_base from oslo_log import log as logging import sqlalchemy import sqlalchemy.exc from glare.db.migration import migration import glare.db.sqlalchemy.api from glare.tests.unit import glare_fixtures LOG = logging.getLogger(__name__) @contextlib.contextmanager def patch_with_engine(engine): with mock.patch.object(glare.db.sqlalchemy.api, 'get_engine') as patch_engine: patch_engine.return_value = engine yield class WalkVersionsMixin(object): def _walk_versions(self, engine=None, alembic_cfg=None): # Determine latest version script from the repo, then # upgrade from 1 through to the latest, with no data # in the databases. This just checks that the schema itself # upgrades successfully. # Place the database under version control with patch_with_engine(engine): script_directory = script.ScriptDirectory.from_config(alembic_cfg) self.assertIsNone(self.migration_api.version(engine)) versions = [ver for ver in script_directory.walk_revisions()] for version in reversed(versions): with glare_fixtures.BannedDBSchemaOperations(): self._migrate_up(engine, alembic_cfg, version.revision, with_data=True) for version in versions: with glare_fixtures.BannedDBSchemaOperations(): self._migrate_down(engine, alembic_cfg, version.down_revision, with_data=True) def _migrate_up(self, engine, config, version, with_data=False): """migrate up to a new version of the db. We allow for data insertion and post checks at every migration version with special _pre_upgrade_### and _check_### functions in the main test. """ try: if with_data: data = None pre_upgrade = getattr( self, "_pre_upgrade_%s" % version, None) if pre_upgrade: data = pre_upgrade(engine) self.migration_api.upgrade(version, config=config) self.assertEqual(version, self.migration_api.version(engine)) if with_data: check = getattr(self, "_check_%s" % version, None) if check: check(engine, data) except Exception: LOG.error("Failed to migrate to version %(version)s on engine " "%(engine)s", {'version': version, 'engine': engine}) raise def _migrate_down(self, engine, config, version, with_data=False): try: self.migration_api.downgrade(version, config=config) if with_data: post_downgrade = getattr( self, "_post_downgrade_%s" % version, None) if post_downgrade: post_downgrade(engine) except Exception: LOG.error("Failed to migrate to version %(version)s on engine " "%(engine)s", {'version': version, 'engine': engine}) raise class GlareMigrationsCheckers(object): def setUp(self): super(GlareMigrationsCheckers, self).setUp() self.config = migration.get_alembic_config() self.migration_api = migration def assert_table(self, engine, table_name, indices, columns): table = db_utils.get_table(engine, table_name) index_data = [(index.name, index.columns.keys()) for index in table.indexes] column_data = [column.name for column in table.columns] self.assertItemsEqual(columns, column_data) self.assertItemsEqual(indices, index_data) def test_walk_versions(self): self._walk_versions(self.engine, self.config) def _pre_upgrade_001(self, engine): self.assertRaises(sqlalchemy.exc.NoSuchTableError, db_utils.get_table, engine, 'glare_artifacts') self.assertRaises(sqlalchemy.exc.NoSuchTableError, db_utils.get_table, engine, 'glare_artifact_tags') self.assertRaises(sqlalchemy.exc.NoSuchTableError, db_utils.get_table, engine, 'glare_artifact_properties') self.assertRaises(sqlalchemy.exc.NoSuchTableError, db_utils.get_table, engine, 'glare_artifact_blobs') self.assertRaises(sqlalchemy.exc.NoSuchTableError, db_utils.get_table, engine, 'glare_artifact_locks') def _check_001(self, engine, data): artifacts_indices = [('ix_glare_artifact_name_and_version', ['name', 'version_prefix', 'version_suffix']), ('ix_glare_artifact_type', ['type_name']), ('ix_glare_artifact_status', ['status']), ('ix_glare_artifact_visibility', ['visibility']), ('ix_glare_artifact_owner', ['owner'])] artifacts_columns = ['id', 'name', 'type_name', 'version_prefix', 'version_suffix', 'version_meta', 'description', 'visibility', 'status', 'owner', 'created_at', 'updated_at', 'activated_at'] self.assert_table(engine, 'glare_artifacts', artifacts_indices, artifacts_columns) tags_indices = [('ix_glare_artifact_tags_artifact_id', ['artifact_id']), ('ix_glare_artifact_tags_artifact_id_tag_value', ['artifact_id', 'value'])] tags_columns = ['id', 'artifact_id', 'value'] self.assert_table(engine, 'glare_artifact_tags', tags_indices, tags_columns) prop_indices = [ ('ix_glare_artifact_properties_artifact_id', ['artifact_id']), ('ix_glare_artifact_properties_name', ['name'])] prop_columns = ['id', 'artifact_id', 'name', 'string_value', 'int_value', 'numeric_value', 'bool_value', 'key_name', 'position'] self.assert_table(engine, 'glare_artifact_properties', prop_indices, prop_columns) blobs_indices = [ ('ix_glare_artifact_blobs_artifact_id', ['artifact_id']), ('ix_glare_artifact_blobs_name', ['name'])] blobs_columns = ['id', 'artifact_id', 'size', 'md5', 'sha1', 'sha256', 'name', 'key_name', 'external', 'status', 'content_type', 'url'] self.assert_table(engine, 'glare_artifact_blobs', blobs_indices, blobs_columns) locks_indices = [] locks_columns = ['id'] self.assert_table(engine, 'glare_artifact_locks', locks_indices, locks_columns) def _check_002(self, engine, data): locks_indices = [] locks_columns = ['id', 'acquired_at'] self.assert_table(engine, 'glare_artifact_locks', locks_indices, locks_columns) def _check_003(self, engine, data): locks_indices = [] locks_columns = ['id', 'data'] self.assert_table(engine, 'glare_blob_data', locks_indices, locks_columns) def _check_004(self, engine, data): quota_indices = [] quota_columns = ['project_id', 'quota_name', 'quota_value'] self.assert_table(engine, 'glare_quotas', quota_indices, quota_columns) class TestMigrationsMySQL(GlareMigrationsCheckers, WalkVersionsMixin, test_base.MySQLOpportunisticTestCase): pass class TestMigrationsPostgreSQL(GlareMigrationsCheckers, WalkVersionsMixin, test_base.PostgreSQLOpportunisticTestCase): pass class TestMigrationsSqlite(GlareMigrationsCheckers, WalkVersionsMixin, test_base.DbTestCase,): pass glare-0.5.0/glare/tests/unit/db/test_quota_functions.py000066400000000000000000000150221317401036700232420ustar00rootroot00000000000000# Copyright 2017 - Nokia Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from six import BytesIO from glare.db.sqlalchemy import api from glare.tests.unit import base class TestQuotaFunctions(base.BaseTestArtifactAPI): """Test quota db functions.""" def setUp(self): super(TestQuotaFunctions, self).setUp() self.session = api.get_session() def test_count_artifact_number(self): # initially there are no artifacts self.assertEqual(0, api.count_artifact_number( self.req.context, self.session)) # create 5 images, 3 heat templates, 2 murano packages and 7 samples amount = { 'images': 5, 'heat_templates': 3, 'murano_packages': 2, 'sample_artifact': 7 } for type_name in amount: for num in range(amount[type_name]): self.controller.create( self.req, type_name, {'name': type_name + str(num)}) # create 1 artifact of each type from different user req = self.get_fake_request(self.users['user2']) for type_name in amount: self.controller.create(req, type_name, {'name': type_name}) # count numbers for each type for type_name in amount: num = api.count_artifact_number( self.req.context, self.session, type_name) self.assertEqual(amount[type_name], num) # count the whole amount of artifacts self.assertEqual(17, api.count_artifact_number( self.req.context, self.session)) def test_calculate_uploaded_data(self): # initially there is no data self.assertEqual(0, api.calculate_uploaded_data( self.req.context, self.session)) # create a sample artifact art1 = self.controller.create( self.req, 'sample_artifact', {'name': 'art1'}) # upload 10 bytes to 'blob' self.controller.upload_blob( self.req, 'sample_artifact', art1['id'], 'blob', BytesIO(b'a' * 10), 'application/octet-stream') self.assertEqual(10, api.calculate_uploaded_data( self.req.context, self.session)) # upload 3 blobs to dict_of_blobs with 25, 35 and 45 bytes respectively self.controller.upload_blob( self.req, 'sample_artifact', art1['id'], 'dict_of_blobs/blob1', BytesIO(b'a' * 25), 'application/octet-stream') self.controller.upload_blob( self.req, 'sample_artifact', art1['id'], 'dict_of_blobs/blob2', BytesIO(b'a' * 35), 'application/octet-stream') self.controller.upload_blob( self.req, 'sample_artifact', art1['id'], 'dict_of_blobs/blob3', BytesIO(b'a' * 45), 'application/octet-stream') self.assertEqual(115, api.calculate_uploaded_data( self.req.context, self.session)) # create another sample artifact and upload 100 bytes there art2 = self.controller.create( self.req, 'sample_artifact', {'name': 'art2'}) self.controller.upload_blob( self.req, 'sample_artifact', art2['id'], 'blob', BytesIO(b'a' * 100), 'application/octet-stream') self.assertEqual(215, api.calculate_uploaded_data( self.req.context, self.session)) # create image and upload 150 bytes there img1 = self.controller.create( self.req, 'images', {'name': 'img1'}) self.controller.upload_blob( self.req, 'images', img1['id'], 'image', BytesIO(b'a' * 150), 'application/octet-stream') # the whole amount of uploaded data is 365 bytes self.assertEqual(365, api.calculate_uploaded_data( self.req.context, self.session)) # 215 bytes for sample_artifact self.assertEqual(215, api.calculate_uploaded_data( self.req.context, self.session, 'sample_artifact')) # 150 bytes for images self.assertEqual(150, api.calculate_uploaded_data( self.req.context, self.session, 'images')) # create an artifact from another user and check that it's not included # for the original user req = self.get_fake_request(self.users['user2']) another_art = self.controller.create( req, 'sample_artifact', {'name': 'another'}) # upload 1000 bytes to 'blob' self.controller.upload_blob( req, 'sample_artifact', another_art['id'], 'blob', BytesIO(b'a' * 1000), 'application/octet-stream') # original user still has 365 bytes self.assertEqual(365, api.calculate_uploaded_data( self.req.context, self.session)) # user2 has 1000 self.assertEqual( 1000, api.calculate_uploaded_data(req.context, self.session)) def test_quota_operations(self): # create several quotas values = { "project1": { "max_uploaded_data": 1000, "max_uploaded_data:images": 500, "max_artifact_number": 10 }, "project2": { "max_uploaded_data": 1000, "max_uploaded_data:sample_artifact": 500, "max_artifact_number": 20 }, "project3": { "max_uploaded_data": 1000 } } api.set_quotas(values, self.session) res = api.get_all_quotas(self.session) self.assertEqual(values, res) # Redefine quotas new_values = { "project1": { "max_uploaded_data": 200, "max_uploaded_data:images": 1000, "max_artifact_number": 30, "max_artifact_number:images": 20 }, "project2": {}, } api.set_quotas(new_values, self.session) # project3 should remain unchanged new_values['project3'] = {"max_uploaded_data": 1000} # project 2 quotas removed new_values.pop('project2') res = api.get_all_quotas(self.session) self.assertEqual(new_values, res) glare-0.5.0/glare/tests/unit/glare_fixtures.py000066400000000000000000000027641317401036700214310ustar00rootroot00000000000000# Copyright (c) 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures from glare.common import exception class BannedDBSchemaOperations(fixtures.Fixture): """Ban some operations for migrations""" def __init__(self, banned_resources=None): super(BannedDBSchemaOperations, self).__init__() self._banned_resources = banned_resources or [] @staticmethod def _explode(resource, op): raise exception.DBNotAllowed( 'Operation %s.%s() is not allowed in a database migration' % ( resource, op)) def setUp(self): super(BannedDBSchemaOperations, self).setUp() for thing in self._banned_resources: self.useFixture(fixtures.MonkeyPatch( 'sqlalchemy.%s.drop' % thing, lambda *a, **k: self._explode(thing, 'drop'))) self.useFixture(fixtures.MonkeyPatch( 'sqlalchemy.%s.alter' % thing, lambda *a, **k: self._explode(thing, 'alter'))) glare-0.5.0/glare/tests/unit/middleware/000077500000000000000000000000001317401036700201405ustar00rootroot00000000000000glare-0.5.0/glare/tests/unit/middleware/__init__.py000066400000000000000000000000001317401036700222370ustar00rootroot00000000000000glare-0.5.0/glare/tests/unit/middleware/test_context.py000066400000000000000000000122641317401036700232420ustar00rootroot00000000000000# Copyright 2017 - Nokia Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import webob from glare.api.middleware import context from glare.common import exception as exc from glare.tests.unit import base class TestContextMiddleware(base.BaseTestCase): def _build_request(self, roles=None, identity_status='Confirmed', service_catalog=None): req = webob.Request.blank('/') req.headers['x-auth-token'] = 'token1' req.headers['x-identity-status'] = identity_status req.headers['x-user-id'] = 'user1' req.headers['x-tenant-id'] = 'tenant1' _roles = roles or ['role1', 'role2'] req.headers['x-roles'] = ','.join(_roles) if service_catalog: req.headers['x-service-catalog'] = service_catalog return req def _build_middleware(self): return context.ContextMiddleware(None) def test_header_parsing(self): req = self._build_request() self._build_middleware().process_request(req) self.assertEqual('token1', req.context.auth_token) self.assertEqual('user1', req.context.user) self.assertEqual('tenant1', req.context.tenant) self.assertEqual(['role1', 'role2'], req.context.roles) def test_is_admin_flag(self): # is_admin check should look for 'admin' role by default req = self._build_request(roles=['admin', 'role2']) self._build_middleware().process_request(req) self.assertTrue(req.context.is_admin) # without the 'admin' role, is_admin should be False req = self._build_request() self._build_middleware().process_request(req) self.assertFalse(req.context.is_admin) # if we change the admin_role attribute, we should be able to use it req = self._build_request() self.policy({'context_is_admin': 'role:role1'}) self._build_middleware().process_request(req) self.assertTrue(req.context.is_admin) def test_roles_case_insensitive(self): # accept role from request req = self._build_request(roles=['Admin', 'role2']) self._build_middleware().process_request(req) self.assertTrue(req.context.is_admin) # accept role from config req = self._build_request(roles=['role1']) self.policy({'context_is_admin': 'role:rOLe1'}) self._build_middleware().process_request(req) self.assertTrue(req.context.is_admin) def test_roles_stripping(self): # stripping extra spaces in request req = self._build_request(roles=['\trole1']) self.policy({'context_is_admin': 'role:role1'}) self._build_middleware().process_request(req) self.assertTrue(req.context.is_admin) def test_anonymous_access_enabled(self): req = self._build_request(identity_status='Nope') self.config(allow_anonymous_access=True) middleware = self._build_middleware() middleware.process_request(req) self.assertIsNone(req.context.auth_token) self.assertIsNone(req.context.user) self.assertIsNone(req.context.tenant) self.assertEqual([], req.context.roles) self.assertFalse(req.context.is_admin) self.assertTrue(req.context.read_only) def test_anonymous_access_defaults_to_disabled(self): req = self._build_request(identity_status='Nope') middleware = self._build_middleware() self.assertRaises(exc.Unauthorized, middleware.process_request, req) def test_service_catalog(self): catalog_json = "[{}]" req = self._build_request(service_catalog=catalog_json) self._build_middleware().process_request(req) self.assertEqual([{}], req.context.service_catalog) def test_invalid_service_catalog(self): catalog_json = "bad json" req = self._build_request(service_catalog=catalog_json) middleware = self._build_middleware() self.assertRaises(exc.GlareException, middleware.process_request, req) def test_response(self): req = self._build_request() req.context = context.RequestContext() request_id = req.context.request_id resp = webob.Response() resp.request = req self._build_middleware().process_response(resp) self.assertEqual(request_id, resp.headers['x-openstack-request-id']) resp_req_id = resp.headers['x-openstack-request-id'] # Validate that request-id do not starts with 'req-req-' if isinstance(resp_req_id, bytes): resp_req_id = resp_req_id.decode('utf-8') self.assertFalse(resp_req_id.startswith('req-req-')) self.assertTrue(resp_req_id.startswith('req-')) glare-0.5.0/glare/tests/unit/middleware/test_fault.py000066400000000000000000000105221317401036700226640ustar00rootroot00000000000000# Copyright 2017 - Nokia Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from functools import partial import inspect import mock from oslo_config import cfg from oslo_log import log as logging from glare.api.middleware import fault from glare.common import exception as exc from glare.tests.unit import base CONF = cfg.CONF logging.register_options(CONF) class TestFaultMiddleware(base.BaseTestCase): @staticmethod def get_response(value=None, exception=Exception): if value is None: raise exception return value def _build_middleware(self): return fault.GlareFaultWrapperFilter(None) def test_no_exception(self): req = mock.Mock() req.get_response.return_value = 'Response object' with mock.patch.object(fault.Fault, '__init__') as mocked_fault: res = self._build_middleware()(req) self.assertEqual('Response object', res) self.assertEqual(0, mocked_fault.call_count) def test_exceptions(self): req = mock.Mock() error_map = fault.GlareFaultWrapperFilter.error_map # Raise all exceptions from error_map for name, obj in inspect.getmembers(exc, inspect.isclass): if not issubclass(obj, Exception)\ or obj is exc.InvalidGlobalAPIVersion: continue req.get_response.side_effect = partial(self.get_response, exception=obj) res = self._build_middleware()(req) while name not in error_map: obj = obj.__base__ name = obj.__name__ self.assertEqual(error_map[name].code, res.error['code']) # Raise other possible exceptions that lead to 500 error for e in (Exception, ValueError, TypeError, exc.GlareException): req.get_response.side_effect = partial( self.get_response, exception=e) res = self._build_middleware()(req) self.assertEqual(500, res.error['code']) # InvalidGlobalAPIVersion should also include min_version and # max_version headers req.get_response.side_effect = partial( self.get_response, exception=exc.InvalidGlobalAPIVersion( req_ver=100.0, min_ver=1.0, max_ver=1.1)) res = self._build_middleware()(req) self.assertEqual(406, res.error['code']) self.assertEqual(1.0, res.error['min_version']) self.assertEqual(1.1, res.error['max_version']) def test_trace_marker(self): req = mock.Mock() self.config(debug=True) traceback_marker = 'Traceback (most recent call last)' pref = "PREFIX" suff = "SUFFIX" # Test with marker req.get_response.side_effect = partial( self.get_response, exception=ValueError( pref + traceback_marker + suff)) res = self._build_middleware()(req) self.assertEqual(500, res.error['code']) self.assertEqual(pref, res.error['error']['message']) self.assertEqual(traceback_marker + suff, res.error['error']['traceback']) # Test without marker req.get_response.side_effect = partial( self.get_response, exception=ValueError( pref + suff)) res = self._build_middleware()(req) self.assertEqual(500, res.error['code']) self.assertEqual(pref + suff, res.error['error']['message']) self.assertIn(traceback_marker, res.error['error']['traceback']) def test_fault_class(self): req = mock.Mock() req.get_response.side_effect = partial( self.get_response, exception=exc.BadRequest) res = self._build_middleware()(req)(req) self.assertEqual(400, res.status_code) self.assertEqual('400 Bad Request', res.status) glare-0.5.0/glare/tests/unit/middleware/test_keycloak_auth.py000066400000000000000000000131671317401036700244040ustar00rootroot00000000000000# Copyright 2017 - Nokia Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock import requests import webob from glare.api.middleware import keycloak_auth from glare.common import exception as exc from glare.tests.unit import base class TestKeycloakAuthMiddleware(base.BaseTestCase): def _build_request(self, token): req = webob.Request.blank("/") req.headers["x-auth-token"] = token req.get_response = lambda app: None return req def _build_middleware(self): return keycloak_auth.KeycloakAuthMiddleware(None) @mock.patch("requests.get") def test_header_parsing(self, mocked_get): token = { "iss": "http://localhost:8080/auth/realms/my_realm", "realm_access": { "roles": ["role1", "role2"] } } mocked_resp = mock.Mock() mocked_resp.status_code = 200 mocked_resp.json.return_value = '{"user": "mike"}' mocked_get.return_value = mocked_resp req = self._build_request(token) with mock.patch("jwt.decode", return_value=token): self._build_middleware()(req) self.assertEqual("Confirmed", req.headers["X-Identity-Status"]) self.assertEqual("my_realm", req.headers["X-Project-Id"]) self.assertEqual("role1,role2", req.headers["X-Roles"]) self.assertEqual(1, mocked_get.call_count) def test_no_auth_token(self): req = webob.Request.blank("/") self.assertRaises(exc.Unauthorized, self._build_middleware(), req) @mock.patch("requests.get") def test_no_realm_access(self, mocked_get): token = { "iss": "http://localhost:8080/auth/realms/my_realm", } mocked_resp = mock.Mock() mocked_resp.status_code = 200 mocked_resp.json.return_value = '{"user": "mike"}' mocked_get.return_value = mocked_resp req = self._build_request(token) with mock.patch("jwt.decode", return_value=token): self._build_middleware()(req) self.assertEqual("Confirmed", req.headers["X-Identity-Status"]) self.assertEqual("my_realm", req.headers["X-Project-Id"]) self.assertEqual("", req.headers["X-Roles"]) def test_wrong_token_format(self): req = self._build_request(token="WRONG_FORMAT_TOKEN") self.assertRaises(exc.Unauthorized, self._build_middleware(), req) @mock.patch("requests.get") def test_server_unauthorized(self, mocked_get): token = { "iss": "http://localhost:8080/auth/realms/my_realm", } mocked_resp = mock.Mock() mocked_resp.status_code = 401 mocked_resp.json.return_value = '{"user": "mike"}' mocked_get.return_value = mocked_resp req = self._build_request(token) with mock.patch("jwt.decode", return_value=token): self.assertRaises(exc.Unauthorized, self._build_middleware(), req) @mock.patch("requests.get") def test_server_forbidden(self, mocked_get): token = { "iss": "http://localhost:8080/auth/realms/my_realm", } mocked_resp = mock.Mock() mocked_resp.status_code = 403 mocked_resp.json.return_value = '{"user": "mike"}' mocked_get.return_value = mocked_resp req = self._build_request(token) with mock.patch("jwt.decode", return_value=token): self.assertRaises(exc.Forbidden, self._build_middleware(), req) @mock.patch("requests.get") def test_server_exception(self, mocked_get): token = { "iss": "http://localhost:8080/auth/realms/my_realm", } mocked_resp = mock.Mock() mocked_resp.status_code = 500 mocked_resp.json.return_value = '{"user": "mike"}' mocked_get.return_value = mocked_resp req = self._build_request(token) with mock.patch("jwt.decode", return_value=token): self.assertRaises( exc.GlareException, self._build_middleware(), req) @mock.patch("requests.get") def test_connection_error(self, mocked_get): token = { "iss": "http://localhost:8080/auth/realms/my_realm", } mocked_get.side_effect = requests.ConnectionError req = self._build_request(token) with mock.patch("jwt.decode", return_value=token): self.assertRaises( exc.GlareException, self._build_middleware(), req) @mock.patch("requests.get") def test_userinfo_endpoint_empty(self, mocked_get): self.config(user_info_endpoint_url='', group='keycloak_oidc') token = { "iss": "http://localhost:8080/auth/realms/my_realm", "realm_access": { "roles": ["role1", "role2"] } } req = self._build_request(token) with mock.patch("jwt.decode", return_value=token): self._build_middleware()(req) self.assertEqual("Confirmed", req.headers["X-Identity-Status"]) self.assertEqual("my_realm", req.headers["X-Project-Id"]) self.assertEqual("role1,role2", req.headers["X-Roles"]) self.assertEqual(0, mocked_get.call_count) glare-0.5.0/glare/tests/unit/middleware/test_trusted_auth.py000066400000000000000000000160401317401036700242650ustar00rootroot00000000000000# Copyright 2017 - Nokia Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import webob from glare.api.middleware import context from glare.common import exception as exc from glare.tests.unit import base class TestTrustedAuthMiddleware(base.BaseTestCase): def _build_request(self, token): req = webob.Request.blank("/") req.headers["x-auth-token"] = token req.get_response = lambda app: None return req def _build_middleware(self): return context.TrustedAuthMiddleware(None) def test_header_parsing(self): token = 'user1:tenant1:role1,role2' req = self._build_request(token) self._build_middleware().process_request(req) self.assertEqual("Confirmed", req.headers["X-Identity-Status"]) self.assertEqual("user1", req.headers["X-User-Id"]) self.assertEqual("tenant1", req.headers["X-Tenant-Id"]) self.assertEqual("role1,role2", req.headers["X-Roles"]) self.assertEqual(token, req.context.auth_token) self.assertEqual('user1', req.context.user) self.assertEqual('tenant1', req.context.tenant) self.assertEqual(['role1', 'role2'], req.context.roles) self.assertIn('service_catalog', req.context.to_dict()) def test_no_auth_token(self): req = self._build_request(None) del req.headers['x-auth-token'] self.assertRaises(exc.Unauthorized, self._build_middleware().process_request, req) def test_wrong_format(self): req = self._build_request('WRONG_FORMAT') middleware = self._build_middleware() self.assertRaises(exc.Unauthorized, middleware.process_request, req) req = self._build_request('user1:tenant1:role1:role2') self.assertRaises(exc.Unauthorized, middleware.process_request, req) def test_no_tenant(self): req = self._build_request('user1::role') middleware = self._build_middleware() self.assertRaises(exc.Unauthorized, middleware.process_request, req) def test_no_roles(self): # stripping extra spaces in request req = self._build_request('user1:tenant1:') self._build_middleware().process_request(req) self.assertFalse(req.context.is_admin) self.assertEqual('user1', req.context.user) self.assertEqual("user1", req.headers["X-User-Id"]) self.assertEqual("", req.headers["X-Roles"]) self.assertEqual([], req.context.roles) def test_is_admin_flag(self): # is_admin check should look for 'admin' role by default req = self._build_request('user1:tenant1:role1,admin') self._build_middleware().process_request(req) self.assertTrue(req.context.is_admin) # without the 'admin' role, is_admin should be False req = self._build_request('user1:tenant1:role1,role2') self._build_middleware().process_request(req) self.assertFalse(req.context.is_admin) # if we change the admin_role attribute, we should be able to use it req = self._build_request('user1:tenant1:role1,role2') self.policy({'context_is_admin': 'role:role1'}) self._build_middleware().process_request(req) self.assertTrue(req.context.is_admin) def test_roles_case_insensitive(self): # accept role from request req = self._build_request('user1:tenant1:Admin,role2') self._build_middleware().process_request(req) self.assertTrue(req.context.is_admin) # accept role from config req = self._build_request('user1:tenant1:role1,role2') self.policy({'context_is_admin': 'role:rOLe1'}) self._build_middleware().process_request(req) self.assertTrue(req.context.is_admin) def test_token_stripping(self): # stripping extra spaces in request req = self._build_request(' user1:tenant1:role1\t') self.policy({'context_is_admin': 'role:role1'}) self._build_middleware().process_request(req) self.assertTrue(req.context.is_admin) self.assertEqual('user1', req.context.user) self.assertEqual("user1", req.headers["X-User-Id"]) self.assertEqual("role1", req.headers["X-Roles"]) def test_anonymous_access_enabled(self): req = self._build_request('user1:none:role1,role2') self.config(allow_anonymous_access=True) middleware = self._build_middleware() middleware.process_request(req) self.assertIsNone(req.context.auth_token) self.assertIsNone(req.context.user) self.assertIsNone(req.context.tenant) self.assertEqual([], req.context.roles) self.assertFalse(req.context.is_admin) self.assertTrue(req.context.read_only) def test_anonymous_access_defaults_to_disabled(self): req = self._build_request('user1:none:role1,role2') middleware = self._build_middleware() self.assertRaises(exc.Unauthorized, middleware.process_request, req) def test_response(self): req = self._build_request('user1:tenant1:role1,role2') req.context = context.RequestContext() request_id = req.context.request_id resp = webob.Response() resp.request = req self._build_middleware().process_response(resp) self.assertEqual(request_id, resp.headers['x-openstack-request-id']) resp_req_id = resp.headers['x-openstack-request-id'] # Validate that request-id do not starts with 'req-req-' if isinstance(resp_req_id, bytes): resp_req_id = resp_req_id.decode('utf-8') self.assertFalse(resp_req_id.startswith('req-req-')) self.assertTrue(resp_req_id.startswith('req-')) def test_response_no_request_id(self): req = self._build_request('user1:tenant1:role1,role2') req.context = context.RequestContext() del req.context.request_id resp = webob.Response() resp.request = req self._build_middleware().process_response(resp) self.assertNotIn('x-openstack-request-id', resp.headers) def test_response_no_request_id_prefix(self): # prefix is 'req-' req = self._build_request('user1:tenant1:role1,role2') req.context = context.RequestContext() req.context.request_id = "STRING_WITH_NO_PREFIX" resp = webob.Response() resp.request = req self._build_middleware().process_response(resp) self.assertEqual('req-STRING_WITH_NO_PREFIX', resp.headers['x-openstack-request-id']) glare-0.5.0/glare/tests/unit/middleware/test_version_negotiations.py000066400000000000000000000065571317401036700260360ustar00rootroot00000000000000# Copyright 2017 - Nokia Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import webob from glare.api.middleware import version_negotiation from glare.common import exception as exc from glare.tests.unit import base class TestContextMiddleware(base.BaseTestCase): MIME_TYPE = 'application/vnd.openstack.artifacts-' def _build_request(self, accept, path_info): req = webob.Request.blank(path_info) req.accept = accept return req def _build_middleware(self): return version_negotiation.GlareVersionNegotiationFilter(None) def test_version_request(self): _LINKS = [{ "rel": "describedby", "type": "text/html", "href": "http://docs.openstack.org/", }] for path_info in ('/', '/versions'): expected = {'versions': [ { 'version': '1.0', 'status': 'STABLE', 'links': _LINKS, 'media-type': 'application/vnd.openstack.artifacts-1.0', }, { 'version': '1.1', 'status': 'EXPERIMENTAL', 'links': _LINKS, 'media-type': 'application/vnd.openstack.artifacts-1.1', }] } req = self._build_request(self.MIME_TYPE + '1.0', path_info) res = self._build_middleware().process_request(req) self.assertEqual(expected, res.json_body) def test_wrong_version(self): req = self._build_request(self.MIME_TYPE + 'INVALID', '/artifacts') self.assertRaises(exc.BadRequest, self._build_middleware().process_request, req) def test_too_big_version(self): req = self._build_request(self.MIME_TYPE + '10000.0', '/artifacts') self.assertRaises(exc.InvalidGlobalAPIVersion, self._build_middleware().process_request, req) def test_latest_version(self): req = self._build_request(self.MIME_TYPE + 'latest', '/artifacts') self._build_middleware().process_request(req) self.assertEqual('1.1', req.api_version_request.get_string()) def test_version_unknown(self): req = self._build_request('UNKNOWN', '/artifacts') self._build_middleware().process_request(req) self.assertEqual('1.0', req.api_version_request.get_string()) def test_response(self): res = webob.Response() req = self._build_request('1.0', '/artifacts') mw = self._build_middleware() mw.process_request(req) mw.process_response(res, req) self.assertIn('openstack-api-version', res.headers) self.assertEqual('artifact 1.0', res.headers['openstack-api-version']) self.assertIn('Vary', res.headers) self.assertEqual('openstack-api-version', res.headers['Vary']) glare-0.5.0/glare/tests/unit/test_fixtures.py000066400000000000000000000025651317401036700213150ustar00rootroot00000000000000# Copyright (c) 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy import testtools from glare.common import exception from glare.tests.unit import glare_fixtures class TestBannedDBSchemaOperations(testtools.TestCase): def test_column(self): column = sqlalchemy.Column() with glare_fixtures.BannedDBSchemaOperations(['Column']): self.assertRaises(exception.DBNotAllowed, column.drop) self.assertRaises(exception.DBNotAllowed, column.alter) def test_table(self): table = sqlalchemy.Table() with glare_fixtures.BannedDBSchemaOperations(['Table']): self.assertRaises(exception.DBNotAllowed, table.drop) self.assertRaises(exception.DBNotAllowed, table.alter) glare-0.5.0/glare/tests/unit/test_hacking.py000066400000000000000000000142141317401036700210420ustar00rootroot00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import inspect from glare.hacking import checks from glare.tests.unit import base class HackingTestCase(base.BaseTestCase): def test_assert_true_instance(self): self.assertEqual(1, len(list(checks.assert_true_instance( "self.assertTrue(isinstance(e, " "exception.BuildAbortException))")))) self.assertEqual( 0, len(list(checks.assert_true_instance("self.assertTrue()")))) def test_assert_equal_type(self): self.assertEqual(1, len(list(checks.assert_equal_type( "self.assertEqual(type(als['QuicAssist']), list)")))) self.assertEqual( 0, len(list(checks.assert_equal_type("self.assertTrue()")))) def test_assert_equal_none(self): self.assertEqual(1, len(list(checks.assert_equal_none( "self.assertEqual(A, None)")))) self.assertEqual(1, len(list(checks.assert_equal_none( "self.assertEqual(None, A)")))) self.assertEqual( 0, len(list(checks.assert_equal_none("self.assertIsNone()")))) def test_no_translate_logs(self): for log in checks._all_log_levels: bad = 'LOG.%s(_("Bad"))' % log self.assertEqual(1, len(list(checks.no_translate_logs(bad)))) # Catch abuses when used with a variable and not a literal bad = 'LOG.%s(_(msg))' % log self.assertEqual(1, len(list(checks.no_translate_logs(bad)))) def test_no_direct_use_of_unicode_function(self): self.assertEqual(1, len(list(checks.no_direct_use_of_unicode_function( "unicode('the party don't start til the unicode walks in')")))) self.assertEqual(1, len(list(checks.no_direct_use_of_unicode_function( """unicode('something ' 'something else""")))) self.assertEqual(0, len(list(checks.no_direct_use_of_unicode_function( "six.text_type('party over')")))) self.assertEqual(0, len(list(checks.no_direct_use_of_unicode_function( "not_actually_unicode('something completely different')")))) def test_no_contextlib_nested(self): self.assertEqual(1, len(list(checks.check_no_contextlib_nested( "with contextlib.nested(")))) self.assertEqual(1, len(list(checks.check_no_contextlib_nested( "with nested(")))) self.assertEqual(0, len(list(checks.check_no_contextlib_nested( "with foo as bar")))) def test_dict_constructor_with_list_copy(self): self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " dict([(i, connect_info[i])")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " attrs = dict([(k, _from_json(v))")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " type_names = dict((value, key) for key, value in")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " dict((value, key) for key, value in")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( "foo(param=dict((k, v) for k, v in bar.items()))")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " dict([[i,i] for i in range(3)])")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " dd = dict([i,i] for i in range(3))")))) self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy( " create_kwargs = dict(snapshot=snapshot,")))) self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy( " self._render_dict(xml, data_el, data.__dict__)")))) def test_check_python3_xrange(self): func = checks.check_python3_xrange self.assertEqual(1, len(list(func('for i in xrange(10)')))) self.assertEqual(1, len(list(func('for i in xrange (10)')))) self.assertEqual(0, len(list(func('for i in range(10)')))) self.assertEqual(0, len(list(func('for i in six.moves.range(10)')))) self.assertEqual(0, len(list(func('testxrange(10)')))) def test_dict_iteritems(self): self.assertEqual(1, len(list(checks.check_python3_no_iteritems( "obj.iteritems()")))) self.assertEqual(0, len(list(checks.check_python3_no_iteritems( "six.iteritems(obj)")))) self.assertEqual(0, len(list(checks.check_python3_no_iteritems( "obj.items()")))) def test_dict_iterkeys(self): self.assertEqual(1, len(list(checks.check_python3_no_iterkeys( "obj.iterkeys()")))) self.assertEqual(0, len(list(checks.check_python3_no_iterkeys( "six.iterkeys(obj)")))) self.assertEqual(0, len(list(checks.check_python3_no_iterkeys( "obj.keys()")))) def test_dict_itervalues(self): self.assertEqual(1, len(list(checks.check_python3_no_itervalues( "obj.itervalues()")))) self.assertEqual(0, len(list(checks.check_python3_no_itervalues( "six.itervalues(ob)")))) self.assertEqual(0, len(list(checks.check_python3_no_itervalues( "obj.values()")))) def test_factory(self): class Register(object): def __init__(self): self.funcs = [] def __call__(self, func): self.funcs.append(func) register = Register() checks.factory(register) for name, func in inspect.getmembers(checks, inspect.isfunction): if name != 'factory': self.assertIn(func, register.funcs) glare-0.5.0/glare/tests/unit/test_multistore.py000066400000000000000000000032321317401036700216430ustar00rootroot00000000000000# Copyright 2017 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glare import engine from glare.objects.meta import registry from glare.tests.unit import base class TestMultistore(base.BaseTestCase): def test_multistore(self): types = {'images': 'swift', 'heat_templates': 'rbd', 'heat_environments': 'file', 'tosca_templates': 'sheepdog', 'murano_packages': 'vsphere', 'sample_artifact': 'database', 'hooks_artifact': 'database', 'unpacking_artifact': 'database'} # create engine and register new artifact types engine.Engine() for type_name, store in types.items(): self.config(default_store=store, group='artifact_type:' + type_name) for t in registry.ArtifactRegistry.obj_classes().values(): name = t[0].get_type_name() if name == 'all': continue self.assertEqual( getattr(base.CONF, 'artifact_type:' + name).default_store, types[name]) glare-0.5.0/glare/tests/unit/test_quotas.py000066400000000000000000000605301317401036700207540ustar00rootroot00000000000000# Copyright 2017 - Nokia Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from six import BytesIO from glare.common import exception from glare.common import store_api from glare.tests.unit import base class TestStaticQuotas(base.BaseTestArtifactAPI): """Test static quota limits.""" def test_count_artifact_number(self): user1_req = self.get_fake_request(self.users['user1']) user2_req = self.get_fake_request(self.users['user2']) # initially there are no artifacts self.assertEqual( 0, len(self.controller.list(user1_req, 'all')['artifacts'])) self.assertEqual( 0, len(self.controller.list(user2_req, 'all')['artifacts'])) # set global limit on 10 artifacts self.config(max_artifact_number=10) # 3 images, 15 heat templates, 10 murano packages self.config(max_artifact_number=3, group='artifact_type:images') self.config(max_artifact_number=15, group='artifact_type:heat_templates') self.config(max_artifact_number=10, group='artifact_type:murano_packages') # create 3 images for user1 for i in range(3): img = self.controller.create( user1_req, 'images', {'name': 'img%d' % i}) # creation of another image fails because of artifact type limit self.assertRaises(exception.Forbidden, self.controller.create, user1_req, 'images', {'name': 'img4'}) # create 7 murano packages for i in range(7): self.controller.create( user1_req, 'murano_packages', {'name': 'mp%d' % i}) # creation of another package fails because of global limit self.assertRaises(exception.Forbidden, self.controller.create, user1_req, 'murano_packages', {'name': 'mp8'}) # delete an image and create another murano package work self.controller.delete(user1_req, 'images', img['id']) self.controller.create(user1_req, 'murano_packages', {'name': 'mp8'}) # user2 can create his own artifacts for i in range(10): self.controller.create( user2_req, 'heat_templates', {'name': 'ht%d' % i}) # creation of another heat template fails because of global limit self.assertRaises(exception.Forbidden, self.controller.create, user2_req, 'heat_templates', {'name': 'ht11'}) # disable global limit and try to create 15 heat templates self.config(max_artifact_number=-1) for i in range(15): self.controller.create( user1_req, 'heat_templates', {'name': 'ht%d' % i}) # creation of another heat template fails because of type limit self.assertRaises(exception.Forbidden, self.controller.create, user1_req, 'heat_templates', {'name': 'ht16'}) # disable type limit for heat templates and create 1 heat templates self.config(max_artifact_number=-1, group='artifact_type:heat_templates') self.controller.create( user1_req, 'heat_templates', {'name': 'ht16'}) def test_calculate_uploaded_data(self): user1_req = self.get_fake_request(self.users['user1']) user2_req = self.get_fake_request(self.users['user2']) # initially there are no artifacts self.assertEqual( 0, len(self.controller.list(user1_req, 'all')['artifacts'])) self.assertEqual( 0, len(self.controller.list(user2_req, 'all')['artifacts'])) # set global limit on 1000 bytes self.config(max_uploaded_data=1000) # 300 for sample artifact, 1500 for images, 1000 for murano packages self.config(max_uploaded_data=300, group='artifact_type:sample_artifact') self.config(max_uploaded_data=1500, group='artifact_type:images') self.config(max_uploaded_data=1000, group='artifact_type:murano_packages') # create 2 sample artifacts for user 1 art1 = self.controller.create( user1_req, 'sample_artifact', {'name': 'art1'}) art2 = self.controller.create( user1_req, 'sample_artifact', {'name': 'art2'}) # create 3 images for user1 img1 = self.controller.create( user1_req, 'images', {'name': 'img1'}) img2 = self.controller.create( user1_req, 'images', {'name': 'img2'}) img3 = self.controller.create( user1_req, 'images', {'name': 'img3'}) # upload to art1 fails now because of type limit self.assertRaises( exception.RequestEntityTooLarge, self.controller.upload_blob, user1_req, 'sample_artifact', art1['id'], 'blob', BytesIO(b'a' * 301), 'application/octet-stream', 301) # upload to img1 fails now because of global limit self.assertRaises( exception.RequestEntityTooLarge, self.controller.upload_blob, user1_req, 'images', img1['id'], 'image', BytesIO(b'a' * 1001), 'application/octet-stream', 1001) # upload 300 bytes to 'blob' of art1 self.controller.upload_blob( user1_req, 'sample_artifact', art1['id'], 'blob', BytesIO(b'a' * 300), 'application/octet-stream', content_length=300) # upload another blob to art1 fails because of type limit self.assertRaises( exception.RequestEntityTooLarge, self.controller.upload_blob, user1_req, 'sample_artifact', art1['id'], 'dict_of_blobs/blob', BytesIO(b'a'), 'application/octet-stream', 1) # upload to art2 fails now because of type limit self.assertRaises( exception.RequestEntityTooLarge, self.controller.upload_blob, user1_req, 'sample_artifact', art2['id'], 'blob', BytesIO(b'a'), 'application/octet-stream', 1) # delete art1 and check that upload to art2 works self.controller.delete(user1_req, 'sample_artifact', art1['id']) self.controller.upload_blob( user1_req, 'sample_artifact', art2['id'], 'blob', BytesIO(b'a' * 300), 'application/octet-stream', 300) # upload 700 bytes to img1 works self.controller.upload_blob( user1_req, 'images', img1['id'], 'image', BytesIO(b'a' * 700), 'application/octet-stream', 700) # upload to img2 fails because of global limit self.assertRaises( exception.RequestEntityTooLarge, self.controller.upload_blob, user1_req, 'images', img2['id'], 'image', BytesIO(b'a'), 'application/octet-stream', 1) # user2 can upload data to images img1 = self.controller.create( user2_req, 'images', {'name': 'img1'}) self.controller.upload_blob( user2_req, 'images', img1['id'], 'image', BytesIO(b'a' * 1000), 'application/octet-stream', 1000) # disable global limit and try upload data from user1 again self.config(max_uploaded_data=-1) self.controller.upload_blob( user1_req, 'images', img2['id'], 'image', BytesIO(b'a' * 800), 'application/octet-stream', 800) # uploading more fails because of image type limit self.assertRaises( exception.RequestEntityTooLarge, self.controller.upload_blob, user1_req, 'images', img3['id'], 'image', BytesIO(b'a'), 'application/octet-stream', 1) # disable type limit and try upload data from user1 again self.config(max_uploaded_data=-1, group='artifact_type:images') self.controller.upload_blob( user1_req, 'images', img3['id'], 'image', BytesIO(b'a' * 1000), 'application/octet-stream', 1000) class TestDynamicQuotas(base.BaseTestArtifactAPI): """Test dynamic quota limits.""" def test_count_artifact_number(self): user1_req = self.get_fake_request(self.users['user1']) user2_req = self.get_fake_request(self.users['user2']) # initially there are no artifacts self.assertEqual( 0, len(self.controller.list(user1_req, 'all')['artifacts'])) self.assertEqual( 0, len(self.controller.list(user2_req, 'all')['artifacts'])) values = { user1_req.context.tenant: { "max_artifact_number:images": 3, "max_artifact_number:heat_templates": 15, "max_artifact_number:murano_packages": 10, "max_artifact_number": 10 }, user2_req.context.tenant: { "max_artifact_number": 10 } } admin_req = self.get_fake_request(self.users["admin"]) # define several quotas self.controller.set_quotas(admin_req, values) # create 3 images for user1 for i in range(3): img = self.controller.create( user1_req, 'images', {'name': 'img%d' % i}) # creation of another image fails because of artifact type limit self.assertRaises(exception.Forbidden, self.controller.create, user1_req, 'images', {'name': 'img4'}) # create 7 murano packages for i in range(7): self.controller.create( user1_req, 'murano_packages', {'name': 'mp%d' % i}) # creation of another package fails because of global limit self.assertRaises(exception.Forbidden, self.controller.create, user1_req, 'murano_packages', {'name': 'mp8'}) # delete an image and create another murano package work self.controller.delete(user1_req, 'images', img['id']) self.controller.create(user1_req, 'murano_packages', {'name': 'mp8'}) # user2 can create his own artifacts for i in range(10): self.controller.create( user2_req, 'heat_templates', {'name': 'ht%d' % i}) # creation of another heat template fails because of global limit self.assertRaises(exception.Forbidden, self.controller.create, user2_req, 'heat_templates', {'name': 'ht11'}) # disable global limit for user1 and try to create 15 heat templates values = { user1_req.context.tenant: { "max_artifact_number:images": 3, "max_artifact_number:heat_templates": 15, "max_artifact_number:murano_packages": 10, "max_artifact_number": -1 } } self.controller.set_quotas(admin_req, values) for i in range(15): self.controller.create( user1_req, 'heat_templates', {'name': 'ht%d' % i}) # creation of another heat template fails because of type limit self.assertRaises(exception.Forbidden, self.controller.create, user1_req, 'heat_templates', {'name': 'ht16'}) # disable type limit for heat templates and create 1 heat templates values = { user1_req.context.tenant: { "max_artifact_number:images": 3, "max_artifact_number:heat_templates": -1, "max_artifact_number:murano_packages": 10, "max_artifact_number": -1 } } self.controller.set_quotas(admin_req, values) # now user1 can create another heat template self.controller.create( user1_req, 'heat_templates', {'name': 'ht16'}) def test_calculate_uploaded_data(self): user1_req = self.get_fake_request(self.users['user1']) user2_req = self.get_fake_request(self.users['user2']) # initially there are no artifacts self.assertEqual( 0, len(self.controller.list(user1_req, 'all')['artifacts'])) self.assertEqual( 0, len(self.controller.list(user2_req, 'all')['artifacts'])) values = { user1_req.context.tenant: { "max_uploaded_data:images": 1500, "max_uploaded_data:sample_artifact": 300, "max_uploaded_data:murano_packages": 1000, "max_uploaded_data": 1000 }, user2_req.context.tenant: { "max_uploaded_data": 1000 } } admin_req = self.get_fake_request(self.users["admin"]) # define several quotas self.controller.set_quotas(admin_req, values) # create 2 sample artifacts for user 1 art1 = self.controller.create( user1_req, 'sample_artifact', {'name': 'art1'}) art2 = self.controller.create( user1_req, 'sample_artifact', {'name': 'art2'}) # create 3 images for user1 img1 = self.controller.create( user1_req, 'images', {'name': 'img1'}) img2 = self.controller.create( user1_req, 'images', {'name': 'img2'}) img3 = self.controller.create( user1_req, 'images', {'name': 'img3'}) # upload to art1 fails now because of type limit self.assertRaises( exception.RequestEntityTooLarge, self.controller.upload_blob, user1_req, 'sample_artifact', art1['id'], 'blob', BytesIO(b'a' * 301), 'application/octet-stream', 301) # upload to img1 fails now because of global limit self.assertRaises( exception.RequestEntityTooLarge, self.controller.upload_blob, user1_req, 'images', img1['id'], 'image', BytesIO(b'a' * 1001), 'application/octet-stream', 1001) # upload 300 bytes to 'blob' of art1 self.controller.upload_blob( user1_req, 'sample_artifact', art1['id'], 'blob', BytesIO(b'a' * 300), 'application/octet-stream', content_length=300) # upload another blob to art1 fails because of type limit self.assertRaises( exception.RequestEntityTooLarge, self.controller.upload_blob, user1_req, 'sample_artifact', art1['id'], 'dict_of_blobs/blob', BytesIO(b'a'), 'application/octet-stream', 1) # upload to art2 fails now because of type limit self.assertRaises( exception.RequestEntityTooLarge, self.controller.upload_blob, user1_req, 'sample_artifact', art2['id'], 'blob', BytesIO(b'a'), 'application/octet-stream', 1) # delete art1 and check that upload to art2 works self.controller.delete(user1_req, 'sample_artifact', art1['id']) self.controller.upload_blob( user1_req, 'sample_artifact', art2['id'], 'blob', BytesIO(b'a' * 300), 'application/octet-stream', 300) # upload 700 bytes to img1 works self.controller.upload_blob( user1_req, 'images', img1['id'], 'image', BytesIO(b'a' * 700), 'application/octet-stream', 700) # upload to img2 fails because of global limit self.assertRaises( exception.RequestEntityTooLarge, self.controller.upload_blob, user1_req, 'images', img2['id'], 'image', BytesIO(b'a'), 'application/octet-stream', 1) # user2 can upload data to images img1 = self.controller.create( user2_req, 'images', {'name': 'img1'}) self.controller.upload_blob( user2_req, 'images', img1['id'], 'image', BytesIO(b'a' * 1000), 'application/octet-stream', 1000) # disable global limit and try upload data from user1 again values = { user1_req.context.tenant: { "max_uploaded_data:images": 1500, "max_uploaded_data:sample_artifact": 300, "max_uploaded_data:murano_packages": 1000, "max_uploaded_data": -1 } } self.controller.set_quotas(admin_req, values) self.controller.upload_blob( user1_req, 'images', img2['id'], 'image', BytesIO(b'a' * 800), 'application/octet-stream', 800) # uploading more fails because of image type limit self.assertRaises( exception.RequestEntityTooLarge, self.controller.upload_blob, user1_req, 'images', img3['id'], 'image', BytesIO(b'a'), 'application/octet-stream', 1) # disable type limit and try upload data from user1 again values = { user1_req.context.tenant: { "max_uploaded_data:images": -1, "max_uploaded_data:sample_artifact": 300, "max_uploaded_data:murano_packages": 1000, "max_uploaded_data": -1 } } self.controller.set_quotas(admin_req, values) self.controller.upload_blob( user1_req, 'images', img3['id'], 'image', BytesIO(b'a' * 1000), 'application/octet-stream', 1000) def test_quota_upload_no_content_length(self): user1_req = self.get_fake_request(self.users['user1']) user2_req = self.get_fake_request(self.users['user2']) admin_req = self.get_fake_request(self.users['admin']) values = { user1_req.context.tenant: { "max_uploaded_data:sample_artifact": 20, "max_uploaded_data": 5 }, user2_req.context.tenant: { "max_uploaded_data:sample_artifact": 7, "max_uploaded_data": -1 }, admin_req.context.tenant: { "max_uploaded_data:sample_artifact": -1, "max_uploaded_data": -1 } } # define several quotas self.controller.set_quotas(admin_req, values) # create a sample artifacts for user 1 art1 = self.controller.create( user1_req, 'sample_artifact', {'name': 'art1'}) # Max small_blob size is 10. User1 global quota is 5. # Since user doesn't specify how many bytes he wants to upload, # engine can't verify it before upload. Therefore it allocates # 5 available bytes for user and begins upload. If uploaded data # amount exceeds this limit RequestEntityTooLarge is raised and # upload fails. with mock.patch( 'glare.common.store_api.save_blob_to_store', side_effect=store_api.save_blob_to_store) as mocked_save: data = BytesIO(b'a' * 10) self.assertRaises( exception.RequestEntityTooLarge, self.controller.upload_blob, user1_req, 'sample_artifact', art1['id'], 'small_blob', data, 'application/octet-stream', content_length=None) mocked_save.assert_called_once_with( mock.ANY, data, user1_req.context, 5, store_type='database') # check that blob wasn't uploaded self.assertIsNone( self.controller.show( user1_req, 'sample_artifact', art1['id'])['small_blob']) # try to upload with smaller amount that doesn't exceeds quota with mock.patch( 'glare.common.store_api.save_blob_to_store', side_effect=store_api.save_blob_to_store) as mocked_save: data = BytesIO(b'a' * 4) self.controller.upload_blob( user1_req, 'sample_artifact', art1['id'], 'small_blob', data, 'application/octet-stream', content_length=None) mocked_save.assert_called_once_with( mock.ANY, data, user1_req.context, 5, store_type='database') # check that blob was uploaded blob = self.controller.show( user1_req, 'sample_artifact', art1['id'])['small_blob'] self.assertEqual(4, blob['size']) self.assertEqual('active', blob['status']) # create a sample artifacts for user 2 art2 = self.controller.create( user2_req, 'sample_artifact', {'name': 'art2'}) # Max small_blob size is 10. User1 has no global quota, but his # type quota is 7. # Since user doesn't specify how many bytes he wants to upload, # engine can't verify it before upload. Therefore it allocates # 7 available bytes for user and begins upload. If uploaded data # amount exceeds this limit RequestEntityTooLarge is raised and # upload fails. with mock.patch( 'glare.common.store_api.save_blob_to_store', side_effect=store_api.save_blob_to_store) as mocked_save: data = BytesIO(b'a' * 10) self.assertRaises( exception.RequestEntityTooLarge, self.controller.upload_blob, user2_req, 'sample_artifact', art2['id'], 'small_blob', data, 'application/octet-stream', content_length=None) mocked_save.assert_called_once_with( mock.ANY, data, user2_req.context, 7, store_type='database') # check that blob wasn't uploaded self.assertIsNone( self.controller.show( user2_req, 'sample_artifact', art2['id'])['small_blob']) # try to upload with smaller amount that doesn't exceeds quota with mock.patch( 'glare.common.store_api.save_blob_to_store', side_effect=store_api.save_blob_to_store) as mocked_save: data = BytesIO(b'a' * 7) self.controller.upload_blob( user2_req, 'sample_artifact', art2['id'], 'small_blob', data, 'application/octet-stream', content_length=None) mocked_save.assert_called_once_with( mock.ANY, data, user2_req.context, 7, store_type='database') # check that blob was uploaded blob = self.controller.show( user2_req, 'sample_artifact', art2['id'])['small_blob'] self.assertEqual(7, blob['size']) self.assertEqual('active', blob['status']) # create a sample artifacts for admin arta = self.controller.create( user2_req, 'sample_artifact', {'name': 'arta'}) # Max small_blob size is 10. Admin has no quotas at all. # Since admin doesn't specify how many bytes he wants to upload, # engine can't verify it before upload. Therefore it allocates # 10 available bytes (max allowed small_blob size) for him and begins # upload. If uploaded data amount exceeds this limit # RequestEntityTooLarge is raised and upload fails. with mock.patch( 'glare.common.store_api.save_blob_to_store', side_effect=store_api.save_blob_to_store) as mocked_save: data = BytesIO(b'a' * 11) self.assertRaises( exception.RequestEntityTooLarge, self.controller.upload_blob, admin_req, 'sample_artifact', arta['id'], 'small_blob', data, 'application/octet-stream', content_length=None) mocked_save.assert_called_once_with( mock.ANY, data, admin_req.context, 10, store_type='database') # check that blob wasn't uploaded self.assertIsNone( self.controller.show( admin_req, 'sample_artifact', arta['id'])['small_blob']) # try to upload with smaller amount that doesn't exceeds quota with mock.patch( 'glare.common.store_api.save_blob_to_store', side_effect=store_api.save_blob_to_store) as mocked_save: data = BytesIO(b'a' * 10) self.controller.upload_blob( admin_req, 'sample_artifact', arta['id'], 'small_blob', data, 'application/octet-stream', content_length=None) mocked_save.assert_called_once_with( mock.ANY, data, admin_req.context, 10, store_type='database') # check that blob was uploaded blob = self.controller.show( admin_req, 'sample_artifact', arta['id'])['small_blob'] self.assertEqual(10, blob['size']) self.assertEqual('active', blob['status']) glare-0.5.0/glare/tests/unit/test_store_api.py000066400000000000000000000066621317401036700214330ustar00rootroot00000000000000# Copyright 2017 - Nokia Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import mock from six import BytesIO from glare.common import exception as exc from glare.common import store_api from glare.tests.unit import base from glare.tests import utils class TestStoreAPI(base.BaseTestArtifactAPI): def test_read_data_filesystem(self): # test local read from temp file tfd, path = tempfile.mkstemp() try: os.write(tfd, b'a' * 1000) flobj = store_api.load_from_store( "file://" + path, self.req.context ) self.assertEqual(b'a' * 1000, store_api.read_data(flobj)) flobj = store_api.load_from_store( "file://" + path, self.req.context ) self.assertRaises(exc.RequestEntityTooLarge, store_api.read_data, flobj, limit=999) finally: os.remove(path) def test_read_data_database(self): # test read from sql object values = {'name': 'ttt', 'version': '1.0'} self.sample_artifact = self.controller.create( self.req, 'sample_artifact', values) self.controller.upload_blob( self.req, 'sample_artifact', self.sample_artifact['id'], 'blob', BytesIO(b'a' * 100), 'application/octet-stream') flobj = self.controller.download_blob( self.req, 'sample_artifact', self.sample_artifact['id'], 'blob') self.assertEqual(b'a' * 100, store_api.read_data(flobj['data'])) flobj = self.controller.download_blob( self.req, 'sample_artifact', self.sample_artifact['id'], 'blob') self.assertRaises(exc.RequestEntityTooLarge, store_api.read_data, flobj['data'], limit=99) def test_read_data_http(self): request = mock.patch('requests.Session.request') try: self.request = request.start() self.request.return_value = utils.fake_response( content=b'a' * 1000) # test read from external http flobj = store_api.load_from_store( 'http://localhost/test_file.txt', self.req.context ) self.assertEqual(1000, len(store_api.read_data(flobj))) finally: request.stop() def test_read_data_http_too_large_data(self): request = mock.patch('requests.Session.request') try: self.request = request.start() self.request.return_value = utils.fake_response( content=b'a' * 1000) flobj = store_api.load_from_store( 'http://localhost/test_file.txt', self.req.context ) self.assertRaises(exc.RequestEntityTooLarge, store_api.read_data, flobj, limit=999) finally: request.stop() glare-0.5.0/glare/tests/unit/test_unpacking.py000066400000000000000000000065121317401036700214170ustar00rootroot00000000000000# Copyright 2017 - Nokia Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from time import time from glare.tests.unit import base class TestArtifactHooks(base.BaseTestArtifactAPI): def setUp(self): super(TestArtifactHooks, self).setUp() values = {'name': 'ttt', 'version': '1.0'} self.unpacking_artifact = self.controller.create( self.req, 'unpacking_artifact', values) def test_unpacking(self): var_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../', 'var')) data_path = os.path.join(var_dir, 'hooks.zip') with open(data_path, "rb") as data: self.controller.upload_blob( self.req, 'unpacking_artifact', self.unpacking_artifact['id'], 'zip', data, 'application/octet-stream') artifact = self.controller.show(self.req, 'unpacking_artifact', self.unpacking_artifact['id']) self.assertEqual(818, artifact['zip']['size']) self.assertEqual('active', artifact['zip']['status']) self.assertEqual(11, artifact['content']['aaa.txt']['size']) self.assertEqual(11, artifact['content']['folder1/bbb.txt']['size']) self.assertEqual( 11, artifact['content']['folder1/folder2/ccc.txt']['size']) def test_unpacking_database(self): self.config(default_store='database', group='artifact_type:unpacking_artifact') self.test_unpacking() def test_unpacking_big_archive(self): var_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../', 'var')) data_path = os.path.join(var_dir, 'hooks_100.zip') # play rally - test that this test should pass faster than 3 seconds start = time() with open(data_path, "rb") as data: self.controller.upload_blob( self.req, 'unpacking_artifact', self.unpacking_artifact['id'], 'zip', data, 'application/octet-stream') end = time() self.assertIs(True, (end - start) < 3, (end - start)) artifact = self.controller.show(self.req, 'unpacking_artifact', self.unpacking_artifact['id']) self.assertEqual(15702, artifact['zip']['size']) self.assertEqual('active', artifact['zip']['status']) self.assertEqual(100, len(artifact['content'])) for blob in artifact['content'].values(): self.assertEqual('active', blob['status']) self.assertEqual(15, blob['size']) def test_unpacking_database_big_archive(self): self.config(default_store='database', group='artifact_type:unpacking_artifact') self.test_unpacking_big_archive() glare-0.5.0/glare/tests/unit/test_utils.py000066400000000000000000000246211317401036700206010ustar00rootroot00000000000000# Copyright 2016 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import tempfile import mock from OpenSSL import crypto import six from glare.common import exception as exc from glare.common import utils from glare.tests.unit import base class TestUtils(base.BaseTestCase): """Test class for glare.common.utils""" def test_validate_quotes(self): self.assertIsNone(utils.validate_quotes('"classic"')) self.assertIsNone(utils.validate_quotes('This is a good string')) self.assertIsNone(utils.validate_quotes ('"comma after quotation mark should work",')) self.assertIsNone(utils.validate_quotes (',"comma before quotation mark should work"')) self.assertIsNone(utils.validate_quotes('"we have quotes \\" inside"')) def test_validate_quotes_negative(self): self.assertRaises(exc.InvalidParameterValue, utils.validate_quotes, 'not_comma"blabla"') self.assertRaises(exc.InvalidParameterValue, utils.validate_quotes, '"No comma after quotation mark"Not_comma') self.assertRaises(exc.InvalidParameterValue, utils.validate_quotes, '"The quote is not closed') def test_no_4bytes_params(self): @utils.no_4byte_params def test_func(*args, **kwargs): return args, kwargs bad_char = u'\U0001f62a' # params without 4bytes unicode are okay args, kwargs = test_func('val1', param='val2') self.assertEqual(('val1',), args) self.assertEqual({'param': 'val2'}, kwargs) # test various combinations with bad param self.assertRaises(exc.BadRequest, test_func, bad_char) self.assertRaises(exc.BadRequest, test_func, **{bad_char: 'val1'}) self.assertRaises(exc.BadRequest, test_func, **{'param': bad_char}) class TestReaders(base.BaseTestCase): """Test various readers in glare.common.utils""" def test_cooperative_reader_iterator(self): """Ensure cooperative reader class accesses all bytes of file""" BYTES = 1024 bytes_read = 0 with tempfile.TemporaryFile('w+') as tmp_fd: tmp_fd.write('*' * BYTES) tmp_fd.seek(0) for chunk in utils.CooperativeReader(tmp_fd): bytes_read += len(chunk) self.assertEqual(BYTES, bytes_read) def test_cooperative_reader_explicit_read(self): BYTES = 1024 bytes_read = 0 with tempfile.TemporaryFile('w+') as tmp_fd: tmp_fd.write('*' * BYTES) tmp_fd.seek(0) reader = utils.CooperativeReader(tmp_fd) byte = reader.read(1) while len(byte) != 0: bytes_read += 1 byte = reader.read(1) self.assertEqual(BYTES, bytes_read) def test_cooperative_reader_no_read_method(self): BYTES = 1024 stream = [b'*'] * BYTES reader = utils.CooperativeReader(stream) bytes_read = 0 byte = reader.read() while len(byte) != 0: bytes_read += 1 byte = reader.read() self.assertEqual(BYTES, bytes_read) # some data may be left in the buffer reader = utils.CooperativeReader(stream) reader.buffer = 'some data' buffer_string = reader.read() self.assertEqual('some data', buffer_string) def test_cooperative_reader_no_read_method_buffer_size(self): # Decrease buffer size to 1000 bytes to test its overflow with mock.patch('glare.common.utils.MAX_COOP_READER_BUFFER_SIZE', 1000): BYTES = 1024 stream = [b'*'] * BYTES reader = utils.CooperativeReader(stream) # Reading 1001 bytes to the buffer leads to 413 error self.assertRaises(exc.RequestEntityTooLarge, reader.read, 1001) def test_cooperative_reader_of_iterator(self): """Ensure cooperative reader supports iterator backends too""" data = b'abcdefgh' data_list = [data[i:i + 1] * 3 for i in range(len(data))] reader = utils.CooperativeReader(data_list) chunks = [] while True: chunks.append(reader.read(3)) if chunks[-1] == b'': break meat = b''.join(chunks) self.assertEqual(b'aaabbbcccdddeeefffggghhh', meat) def test_cooperative_reader_of_iterator_stop_iteration_err(self): """Ensure cooperative reader supports iterator backends too""" reader = utils.CooperativeReader([l * 3 for l in '']) chunks = [] while True: chunks.append(reader.read(3)) if chunks[-1] == b'': break meat = b''.join(chunks) self.assertEqual(b'', meat) def _create_generator(self, chunk_size, max_iterations): chars = b'abc' iteration = 0 while True: index = iteration % len(chars) chunk = chars[index:index + 1] * chunk_size yield chunk iteration += 1 if iteration >= max_iterations: raise StopIteration() def _test_reader_chunked(self, chunk_size, read_size, max_iterations=5): generator = self._create_generator(chunk_size, max_iterations) reader = utils.CooperativeReader(generator) result = bytearray() while True: data = reader.read(read_size) if len(data) == 0: break self.assertLessEqual(len(data), read_size) result += data expected = (b'a' * chunk_size + b'b' * chunk_size + b'c' * chunk_size + b'a' * chunk_size + b'b' * chunk_size) self.assertEqual(expected, bytes(result)) def test_cooperative_reader_preserves_size_chunk_less_then_read(self): self._test_reader_chunked(43, 101) def test_cooperative_reader_preserves_size_chunk_equals_read(self): self._test_reader_chunked(1024, 1024) def test_cooperative_reader_preserves_size_chunk_more_then_read(self): chunk_size = 16 * 1024 * 1024 # 16 Mb, as in remote http source read_size = 8 * 1024 # 8k, as in httplib self._test_reader_chunked(chunk_size, read_size) def test_limiting_reader(self): """Ensure limiting reader class accesses all bytes of file""" BYTES = 1024 bytes_read = 0 data = six.BytesIO(b"*" * BYTES) for chunk in utils.LimitingReader(data, BYTES): bytes_read += len(chunk) self.assertEqual(BYTES, bytes_read) bytes_read = 0 data = six.BytesIO(b"*" * BYTES) reader = utils.LimitingReader(data, BYTES) byte = reader.read(1) while len(byte) != 0: bytes_read += 1 byte = reader.read(1) self.assertEqual(BYTES, bytes_read) def test_limiting_reader_fails(self): """Ensure limiting reader class throws exceptions if limit exceeded""" BYTES = 1024 def _consume_all_iter(): bytes_read = 0 data = six.BytesIO(b"*" * BYTES) for chunk in utils.LimitingReader(data, BYTES - 1): bytes_read += len(chunk) self.assertRaises(exc.RequestEntityTooLarge, _consume_all_iter) def _consume_all_read(): bytes_read = 0 data = six.BytesIO(b"*" * BYTES) reader = utils.LimitingReader(data, BYTES - 1) byte = reader.read(1) while len(byte) != 0: bytes_read += 1 byte = reader.read(1) self.assertRaises(exc.RequestEntityTooLarge, _consume_all_read) def test_blob_iterator(self): BYTES = 1024 bytes_read = 0 stream = [b'*'] * BYTES for chunk in utils.BlobIterator(stream, 64): bytes_read += len(chunk) self.assertEqual(BYTES, bytes_read) class TestKeyCert(base.BaseTestCase): def test_validate_key_cert_key(self): var_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../', 'var')) keyfile = os.path.join(var_dir, 'privatekey.key') certfile = os.path.join(var_dir, 'certificate.crt') utils.validate_key_cert(keyfile, certfile) def test_validate_key_cert_no_private_key(self): with tempfile.NamedTemporaryFile('w+') as tmpf: self.assertRaises(RuntimeError, utils.validate_key_cert, "/not/a/file", tmpf.name) def test_validate_key_cert_cert_cant_read(self): with tempfile.NamedTemporaryFile('w+') as keyf: with tempfile.NamedTemporaryFile('w+') as certf: os.chmod(certf.name, 0) self.assertRaises(RuntimeError, utils.validate_key_cert, keyf.name, certf.name) def test_validate_key_cert_key_cant_read(self): with tempfile.NamedTemporaryFile('w+') as keyf: with tempfile.NamedTemporaryFile('w+') as certf: os.chmod(keyf.name, 0) self.assertRaises(RuntimeError, utils.validate_key_cert, keyf.name, certf.name) def test_validate_key_cert_key_crypto_error(self): var_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../', 'var')) keyfile = os.path.join(var_dir, 'privatekey.key') certfile = os.path.join(var_dir, 'certificate.crt') with mock.patch('OpenSSL.crypto.verify', side_effect=crypto.Error): self.assertRaises(RuntimeError, utils.validate_key_cert, keyfile, certfile) glare-0.5.0/glare/tests/unit/test_validation_hooks.py000066400000000000000000000127721317401036700230020ustar00rootroot00000000000000# Copyright 2017 - Nokia Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile from six import BytesIO from glare.tests.unit import base class TestArtifactHooks(base.BaseTestArtifactAPI): def test_create_hook(self): values = {'name': 'ttt', 'version': '1.0', 'temp_dir': self.test_dir} art = self.controller.create(self.req, 'hooks_artifact', values) self.assertEqual(self.test_dir, art['temp_dir']) self.assertIsNotNone(art['temp_file_path_create']) with open(art['temp_file_path_create']) as f: self.assertEqual('pre_create_hook was called\n', f.readline()) self.assertEqual('post_create_hook was called\n', f.readline()) def test_update_ops_hook(self): self.req = self.get_fake_request(user=self.users['admin']) values = {'name': 'ttt', 'version': '1.0', 'temp_dir': self.test_dir} art = self.controller.create(self.req, 'hooks_artifact', values) self.assertEqual(self.test_dir, art['temp_dir']) changes = [{'op': 'replace', 'path': '/description', 'value': 'some_string'}, {'op': 'replace', 'path': '/status', 'value': 'active'}, {'op': 'replace', 'path': '/status', 'value': 'deactivated'}, {'op': 'replace', 'path': '/status', 'value': 'active'}, {'op': 'replace', 'path': '/visibility', 'value': 'public'}] art = self.update_with_values(changes, art_type='hooks_artifact', art_id=art['id']) self.assertEqual('active', art['status']) self.assertEqual('some_string', art['description']) self.assertEqual('public', art['visibility']) actions = ['update', 'activate', 'deactivate', 'reactivate', 'publish'] for action in actions: with open(art['temp_file_path_%s' % action]) as f: self.assertEqual('pre_%s_hook was called\n' % action, f.readline()) self.assertEqual('post_%s_hook was called\n' % action, f.readline()) def test_upload_download_hooks(self): temp_file_path = tempfile.mktemp(dir=self.test_dir) self.config(temp_file_path=temp_file_path, group='artifact_type:hooks_artifact') values = {'name': 'ttt', 'version': '1.0', 'temp_dir': self.test_dir} art = self.controller.create(self.req, 'hooks_artifact', values) art = self.controller.upload_blob( self.req, 'hooks_artifact', art['id'], 'blob', BytesIO(b'aaa'), 'application/octet-stream') self.assertEqual(3, art['blob']['size']) self.assertEqual('active', art['blob']['status']) self.controller.download_blob( self.req, 'hooks_artifact', art['id'], 'blob') with open(temp_file_path) as f: self.assertEqual('pre_upload_hook was called\n', f.readline()) self.assertEqual('post_upload_hook was called\n', f.readline()) self.assertEqual('pre_download_hook was called\n', f.readline()) self.assertEqual('post_download_hook was called\n', f.readline()) def test_add_location_hook(self): temp_file_path = tempfile.mktemp(dir=self.test_dir) self.config(temp_file_path=temp_file_path, group='artifact_type:hooks_artifact') values = {'name': 'ttt', 'version': '1.0', 'temp_dir': self.test_dir} art = self.controller.create(self.req, 'hooks_artifact', values) ct = 'application/vnd+openstack.glare-custom-location+json' body = {'url': 'https://FAKE_LOCATION.com', 'md5': "fake", 'sha1': "fake_sha", "sha256": "fake_sha256"} art = self.controller.upload_blob( self.req, 'hooks_artifact', art['id'], 'blob', body, ct) self.assertIsNone(art['blob']['size']) self.assertEqual('active', art['blob']['status']) # hook isn't called if we download external location self.controller.download_blob( self.req, 'hooks_artifact', art['id'], 'blob') with open(temp_file_path) as f: self.assertEqual( 'pre_add_location_hook was called\n', f.readline()) self.assertEqual( 'post_add_location_hook was called\n', f.readline()) def test_delete_hook(self): temp_file_path = tempfile.mktemp(dir=self.test_dir) self.config(temp_file_path=temp_file_path, group='artifact_type:hooks_artifact') values = {'name': 'ttt', 'version': '1.0', 'temp_dir': self.test_dir} art = self.controller.create(self.req, 'hooks_artifact', values) self.controller.delete(self.req, 'hooks_artifact', art['id']) with open(temp_file_path) as f: self.assertEqual('pre_delete_hook was called\n', f.readline()) self.assertEqual('post_delete_hook was called\n', f.readline()) glare-0.5.0/glare/tests/unit/test_validators.py000066400000000000000000000334051317401036700216110ustar00rootroot00000000000000# Copyright 2017 - Nokia Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_versionedobjects import fields from glare.objects.meta import fields as glare_fields from glare.objects.meta import validators from glare.tests.unit import base class TestValidators(base.BaseTestArtifactAPI): """Class for testing field validators.""" def test_uuid(self): # test if applied string is uuid4 validator = validators.UUID() # valid string - no exception validator('167f8083-6bef-4f37-bf04-250343a2d53c') # invalid string - ValueError self.assertRaises(ValueError, validator, 'INVALID') # only strings can be applied as values self.assertEqual((fields.StringField,), validators.UUID.get_allowed_types()) self.assertEqual( {'pattern': ('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F])' '{4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$')}, validator.to_jsonschema()) def test_regex(self): # test regex '^([0-9a-fA-F]){8}$' validator = validators.Regex('^([0-9a-fA-F]){8}$') # valid string - no exception validator('167f8083') # invalid string - ValueError self.assertRaises(ValueError, validator, 'INVALID') self.assertRaises(ValueError, validator, '167f808Z') self.assertRaises(ValueError, validator, '167f80835') # only strings can be applied as values self.assertEqual((fields.StringField,), validators.UUID.get_allowed_types()) self.assertEqual( {'pattern': '^([0-9a-fA-F]){8}$'}, validator.to_jsonschema()) def test_allowed_values(self): # test that field may have preoccupied values validator_s = validators.AllowedValues(['aaa', 'bbb']) validator_i = validators.AllowedValues([1, 2, 3]) validator_f = validators.AllowedValues([1.0, 2.0, 3.0]) # allowed value - no exception validator_s('aaa') validator_s('bbb') validator_i(1) validator_i(3) validator_f(1.0) validator_f(3.0) # not allowed value - value error self.assertRaises(ValueError, validator_s, 'a') self.assertRaises(ValueError, validator_i, 4) self.assertRaises(ValueError, validator_f, 4.0) # only strings, integers and floats can be applied as values self.assertEqual( (fields.StringField, fields.IntegerField, fields.FloatField), validators.AllowedValues.get_allowed_types()) self.assertEqual({'enum': ['aaa', 'bbb']}, validator_s.to_jsonschema()) self.assertEqual({'enum': [1, 2, 3]}, validator_i.to_jsonschema()) self.assertEqual({'enum': [1.0, 2.0, 3.0]}, validator_f.to_jsonschema()) def test_max_str_len(self): # test max allowed string length validator = validators.MaxStrLen(10) # allowed length - no exception validator('a' * 10) validator('') # too long string - value error self.assertRaises(ValueError, validator, 'a' * 11) # only strings can be applied as values self.assertEqual((fields.StringField,), validators.MaxStrLen.get_allowed_types()) self.assertEqual({'maxLength': 10}, validator.to_jsonschema()) def test_min_str_len(self): # test min allowed string length validator = validators.MinStrLen(10) # allowed length - no exception validator('a' * 10) # too short string - value error self.assertRaises(ValueError, validator, 'a' * 9) self.assertRaises(ValueError, validator, '') # only strings can be applied as values self.assertEqual((fields.StringField,), validators.MinStrLen.get_allowed_types()) self.assertEqual({'minLength': 10}, validator.to_jsonschema()) def test_forbidden_chars(self): # test that string has no forbidden chars validator = validators.ForbiddenChars(['a', '?']) # allowed length - no exception validator('b' * 10) # string contains forbidden chars - value error self.assertRaises(ValueError, validator, 'abc') self.assertRaises(ValueError, validator, '?') # only strings can be applied as values self.assertEqual((fields.StringField,), validators.ForbiddenChars.get_allowed_types()) self.assertEqual({'pattern': '^[^a?]+$'}, validator.to_jsonschema()) def test_max_dict_size(self): # test max dict size validator = validators.MaxDictSize(3) # allowed size - no exception validator({'a': 1, 'b': 2, 'c': 3}) validator({}) # too big dictionary - value error self.assertRaises(ValueError, validator, {'a': 1, 'b': 2, 'c': 3, 'd': 4}) # only dicts can be applied as values self.assertEqual((glare_fields.Dict,), validators.MaxDictSize.get_allowed_types()) self.assertEqual({'maxProperties': 3}, validator.to_jsonschema()) def test_min_dict_size(self): # test min dict size validator = validators.MinDictSize(3) # allowed size - no exception validator({'a': 1, 'b': 2, 'c': 3}) # too small dictionary - value error self.assertRaises(ValueError, validator, {'a': 1, 'b': 2}) self.assertRaises(ValueError, validator, {}) # only dicts can be applied as values self.assertEqual((glare_fields.Dict,), validators.MinDictSize.get_allowed_types()) self.assertEqual({'minProperties': 3}, validator.to_jsonschema()) def test_max_list_size(self): # test max list size validator = validators.MaxListSize(3) # allowed size - no exception validator(['a', 'b', 'c']) validator([]) # too big list - value error self.assertRaises(ValueError, validator, ['a', 'b', 'c', 'd']) # only lists can be applied as values self.assertEqual((glare_fields.List,), validators.MaxListSize.get_allowed_types()) self.assertEqual({'maxItems': 3}, validator.to_jsonschema()) def test_min_list_size(self): # test max list size validator = validators.MinListSize(3) # allowed size - no exception validator(['a', 'b', 'c']) # too small list - value error self.assertRaises(ValueError, validator, ['a', 'b']) self.assertRaises(ValueError, validator, []) # only lists can be applied as values self.assertEqual((glare_fields.List,), validators.MinListSize.get_allowed_types()) self.assertEqual({'minItems': 3}, validator.to_jsonschema()) def test_max_number_size(self): # test max number size validator = validators.MaxNumberSize(10) # allowed size - no exception validator(10) validator(0) validator(10.0) validator(0.0) # too big number - value error self.assertRaises(ValueError, validator, 11) self.assertRaises(ValueError, validator, 10.1) # only integers and floats can be applied as values self.assertEqual((fields.IntegerField, fields.FloatField), validators.MaxNumberSize.get_allowed_types()) self.assertEqual({'maximum': 10}, validator.to_jsonschema()) def test_min_number_size(self): # test min number size validator = validators.MinNumberSize(10) # allowed size - no exception validator(10) validator(10.0) # too small number - value error self.assertRaises(ValueError, validator, 9) self.assertRaises(ValueError, validator, 9.9) self.assertRaises(ValueError, validator, 0) self.assertRaises(ValueError, validator, 0) # only integers and floats can be applied as values self.assertEqual((fields.IntegerField, fields.FloatField), validators.MinNumberSize.get_allowed_types()) self.assertEqual({'minimum': 10}, validator.to_jsonschema()) def test_unique(self): # test uniqueness of list elements # validator raises exception in case of duplicates in the list validator = validators.Unique() # non strict validator removes duplicates without raising of ValueError validator_nonstrict = validators.Unique(convert_to_set=True) # all elements unique - no exception validator(['a', 'b', 'c']) validator([]) # duplicates in the list - value error self.assertRaises(ValueError, validator, ['a', 'a', 'b']) # non-strict validator converts list to set of elements l = ['a', 'a', 'b'] validator_nonstrict(l) self.assertEqual({'a', 'b'}, set(l)) # only lists can be applied as values self.assertEqual((glare_fields.List,), validators.Unique.get_allowed_types()) self.assertEqual({'uniqueItems': True}, validator.to_jsonschema()) def test_allowed_dict_keys(self): # test that dictionary contains only allowed keys validator = validators.AllowedDictKeys(['aaa', 'bbb', 'ccc']) # only allowed keys - no exception validator({'aaa': 5, 'bbb': 6}) validator({}) # if dictionary has other keys - value error self.assertRaises(ValueError, validator, {'aaa': 5, 'a': 7, 'bbb': 6}) # only dicts can be applied as values self.assertEqual((glare_fields.Dict,), validators.AllowedDictKeys.get_allowed_types()) self.assertEqual({'properties': {'aaa': {}, 'bbb': {}, 'ccc': {}}}, validator.to_jsonschema()) def test_required_dict_keys(self): # test that dictionary has required keys validator = validators.RequiredDictKeys(['aaa', 'bbb']) # if dict has required keys - no exception validator({'aaa': 5, 'bbb': 6}) validator({'aaa': 5, 'bbb': 6, 'ccc': 7}) # in other case - value error self.assertRaises(ValueError, validator, {'aaa': 5, 'a': 7}) self.assertRaises(ValueError, validator, {}) # only dicts can be applied as values self.assertEqual((glare_fields.Dict,), validators.RequiredDictKeys.get_allowed_types()) self.assertEqual({'required': ['aaa', 'bbb']}, validator.to_jsonschema()) def test_max_dict_key_len(self): # test max limit for dict key length validator = validators.MaxDictKeyLen(5) # if key length less than the limit - no exception validator({'aaaaa': 5, 'bbbbb': 4}) # in other case - value error self.assertRaises(ValueError, validator, {'aaaaaa': 5, 'a': 7}) # only dicts can be applied as values self.assertEqual((glare_fields.Dict,), validators.MaxDictKeyLen.get_allowed_types()) def test_mix_dict_key_len(self): # test min limit for dict key length validator = validators.MinDictKeyLen(5) # if key length bigger than the limit - no exception validator({'aaaaa': 5, 'bbbbb': 4}) # in other case - value error self.assertRaises(ValueError, validator, {'aaaaa': 5, 'a': 7}) # only dicts can be applied as values self.assertEqual((glare_fields.Dict,), validators.MinDictKeyLen.get_allowed_types()) def test_allowed_list_values(self): # test that list contains only allowed values # AllowedValues validator will be applied to each element of the list validator = validators.ListElementValidator( [validators.AllowedValues(['aaa', 'bbb', 'ccc'])]) # only allowed values - no exception validator(['aaa', 'bbb']) validator([]) # if list has other values - value error self.assertRaises(ValueError, validator, ['aaa', 'a', 'bbb']) self.assertRaises(ValueError, validator, ['ccc', {'aaa': 'bbb'}]) # only lists can be applied as values self.assertEqual((glare_fields.List,), validators.ListElementValidator.get_allowed_types()) self.assertEqual({'itemValidators': [{'enum': ['aaa', 'bbb', 'ccc']}]}, validator.to_jsonschema()) def test_allowed_dict_values(self): # test that dict contains only allowed values # AllowedValues validator will be applied to each element of the dict validator = validators.DictElementValidator( [validators.AllowedValues(['aaa', 'bbb', 'ccc'])]) # only allowed values - no exception validator({'a': 'aaa', 'b': 'bbb'}) validator({}) # if dict has other values - value error self.assertRaises(ValueError, validator, {'a': 'aaa', 'b': 'bbb', 'c': 'c'}) # only dict can be applied as values self.assertEqual((glare_fields.Dict,), validators.DictElementValidator.get_allowed_types()) self.assertEqual( {'propertyValidators': [{'enum': ['aaa', 'bbb', 'ccc']}]}, validator.to_jsonschema()) glare-0.5.0/glare/tests/unit/test_versions.py000066400000000000000000000055731317401036700213160ustar00rootroot00000000000000# Copyright 2016 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils import webob from glare.api import versions from glare.tests.unit import base class VersionsTest(base.BaseTestCase): """Test the version information returned from the API service.""" def test_root_endpoint(self): req = webob.Request.blank('/', base_url='http://127.0.0.1:9494/') req.accept = 'application/json' res = versions.Controller().index(req) self.assertEqual(300, res.status_int) self.assertEqual('application/json', res.content_type) results = jsonutils.loads(res.body)['versions'] expected = [ {'links': [{'href': 'http://docs.openstack.org/', 'rel': 'describedby', 'type': 'text/html'}], 'media-type': 'application/vnd.openstack.artifacts-1.0', 'status': 'STABLE', 'version': '1.0'}, {'links': [{'href': 'http://docs.openstack.org/', 'rel': 'describedby', 'type': 'text/html'}], 'media-type': 'application/vnd.openstack.artifacts-1.1', 'status': 'EXPERIMENTAL', 'version': '1.1'}] self.assertEqual(expected, results) def test_versions_endpoint(self): req = webob.Request.blank('/versions', base_url='http://127.0.0.1:9494/') req.accept = 'application/json' res = versions.Controller().index(req) self.assertEqual(300, res.status_int) self.assertEqual('application/json', res.content_type) results = jsonutils.loads(res.body)['versions'] expected = [ {'links': [{'href': 'http://docs.openstack.org/', 'rel': 'describedby', 'type': 'text/html'}], 'media-type': 'application/vnd.openstack.artifacts-1.0', 'status': 'STABLE', 'version': '1.0'}, {'links': [{'href': 'http://docs.openstack.org/', 'rel': 'describedby', 'type': 'text/html'}], 'media-type': 'application/vnd.openstack.artifacts-1.1', 'status': 'EXPERIMENTAL', 'version': '1.1'}] self.assertEqual(expected, results) glare-0.5.0/glare/tests/unit/test_wsgi.py000066400000000000000000000535611317401036700204170ustar00rootroot00000000000000# Copyright 2010-2011 OpenStack Foundation # Copyright 2014 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import os import socket import eventlet.patcher import fixtures import mock from oslo_concurrency import processutils from oslo_serialization import jsonutils import routes import six from six.moves import http_client as http import webob from glare.api.v1 import router from glare.common import exception from glare.common import wsgi from glare import i18n from glare.tests.unit import base class RequestTest(base.BaseTestCase): def test_content_range(self): request = wsgi.Request.blank('/tests/123') request.headers["Content-Range"] = 'bytes 10-99/*' range_ = request.get_content_range() self.assertEqual(10, range_.start) self.assertEqual(100, range_.stop) # non-inclusive self.assertIsNone(range_.length) def test_content_range_invalid(self): request = wsgi.Request.blank('/tests/123') request.headers["Content-Range"] = 'bytes=0-99' self.assertRaises(webob.exc.HTTPBadRequest, request.get_content_range) def test_language_accept_default(self): request = wsgi.Request.blank('/tests/123') request.headers["Accept-Language"] = "zz-ZZ,zz;q=0.8" result = request.best_match_language() self.assertIsNone(result) def test_language_accept_none(self): request = wsgi.Request.blank('/tests/123') result = request.best_match_language() self.assertIsNone(result) def test_best_match_language_expected(self): # If Accept-Language is a supported language, best_match_language() # returns it. with mock.patch('babel.localedata.locale_identifiers', return_value=['en']): req = wsgi.Request.blank('/', headers={'Accept-Language': 'en'}) self.assertEqual('en_US', req.best_match_language()) def test_request_match_language_unexpected(self): # If Accept-Language is a language we do not support, # best_match_language() returns None. with mock.patch('babel.localedata.locale_identifiers', return_value=['en']): req = wsgi.Request.blank( '/', headers={'Accept-Language': 'Klingon'}) self.assertIsNone(req.best_match_language()) @mock.patch.object(webob.acceptparse.AcceptLanguage, 'best_match') def test_best_match_language_unknown(self, mock_best_match): # Test that we are actually invoking language negotiation by webop request = wsgi.Request.blank('/') accepted = 'unknown-lang' request.headers = {'Accept-Language': accepted} mock_best_match.return_value = None self.assertIsNone(request.best_match_language()) # If Accept-Language is missing or empty, match should be None request.headers = {'Accept-Language': ''} self.assertIsNone(request.best_match_language()) request.headers.pop('Accept-Language') self.assertIsNone(request.best_match_language()) def test_http_error_response_codes(self): """Makes sure v1 unallowed methods return 405""" unallowed_methods = [ ('/schemas', ['PUT', 'DELETE', 'HEAD', 'PATCH', 'POST']), ('/schemas/type_name', ['PUT', 'DELETE', 'HEAD', 'PATCH', 'POST']), ('/artifacts/type_name', ['PUT', 'DELETE', 'HEAD', 'PATCH']), ('/artifacts/type_name/artifact_id', ['PUT', 'HEAD', 'POST']), ('/artifacts/type_name/artifact_id/blob)name', ['HEAD', 'PATCH', 'POST']), ] api = router.API(routes.Mapper()) for uri, methods in unallowed_methods: for method in methods: req = webob.Request.blank(uri) req.method = method res = req.get_response(api) self.assertEqual(http.METHOD_NOT_ALLOWED, res.status_int) # Makes sure not implemented methods return 405 req = webob.Request.blank('/schemas/image') req.method = 'NonexistentMethod' res = req.get_response(api) self.assertEqual(http.METHOD_NOT_ALLOWED, res.status_int) class ResourceTest(base.BaseTestCase): def test_get_action_args(self): env = { 'wsgiorg.routing_args': [ None, { 'controller': None, 'format': None, 'action': 'update', 'id': 12, }, ], } expected = {'action': 'update', 'id': 12} actual = wsgi.Resource(None, None, None).get_action_args(env) self.assertEqual(expected, actual) def test_get_action_args_invalid_index(self): env = {'wsgiorg.routing_args': []} expected = {} actual = wsgi.Resource(None, None, None).get_action_args(env) self.assertEqual(expected, actual) def test_get_action_args_del_controller_error(self): actions = {'format': None, 'action': 'update', 'id': 12} env = {'wsgiorg.routing_args': [None, actions]} expected = {'action': 'update', 'id': 12} actual = wsgi.Resource(None, None, None).get_action_args(env) self.assertEqual(expected, actual) def test_get_action_args_del_format_error(self): actions = {'action': 'update', 'id': 12} env = {'wsgiorg.routing_args': [None, actions]} expected = {'action': 'update', 'id': 12} actual = wsgi.Resource(None, None, None).get_action_args(env) self.assertEqual(expected, actual) def test_dispatch(self): class Controller(object): def index(self, shirt, pants=None): return (shirt, pants) resource = wsgi.Resource(None, None, None) actual = resource.dispatch(Controller(), 'index', 'on', pants='off') expected = ('on', 'off') self.assertEqual(expected, actual) def test_dispatch_default(self): class Controller(object): def default(self, shirt, pants=None): return (shirt, pants) resource = wsgi.Resource(None, None, None) actual = resource.dispatch(Controller(), 'index', 'on', pants='off') expected = ('on', 'off') self.assertEqual(expected, actual) def test_dispatch_no_default(self): class Controller(object): def show(self, shirt, pants=None): return (shirt, pants) resource = wsgi.Resource(None, None, None) self.assertRaises(AttributeError, resource.dispatch, Controller(), 'index', 'on', pants='off') def test_call(self): class FakeController(object): def index(self, shirt, pants=None): return shirt, pants resource = wsgi.Resource(FakeController(), None, None) def dispatch(obj, *args, **kwargs): if isinstance(obj, wsgi.JSONRequestDeserializer): return [] if isinstance(obj, wsgi.JSONResponseSerializer): raise webob.exc.HTTPForbidden() with mock.patch('glare.common.wsgi.Resource.dispatch', side_effect=dispatch): request = wsgi.Request.blank('/') response = resource.__call__(request) self.assertIsInstance(response, webob.exc.HTTPForbidden) self.assertEqual(http.FORBIDDEN, response.status_code) def test_call_raises_exception(self): class FakeController(object): def index(self, shirt, pants=None): return (shirt, pants) resource = wsgi.Resource(FakeController(), None, None) with mock.patch('glare.common.wsgi.Resource.dispatch', side_effect=Exception("test exception")): request = wsgi.Request.blank('/') response = resource.__call__(request) self.assertIsInstance(response, webob.exc.HTTPInternalServerError) self.assertEqual(http.INTERNAL_SERVER_ERROR, response.status_code) @mock.patch.object(wsgi, 'translate_exception') def test_resource_call_error_handle_localized(self, mock_translate_exception): class Controller(object): def delete(self, req, identity): raise webob.exc.HTTPBadRequest(explanation='Not Found') actions = {'action': 'delete', 'identity': 12} env = {'wsgiorg.routing_args': [None, actions]} request = wsgi.Request.blank('/tests/123', environ=env) message_es = 'No Encontrado' resource = wsgi.Resource(Controller(), wsgi.JSONRequestDeserializer(), None) translated_exc = webob.exc.HTTPBadRequest(message_es) mock_translate_exception.return_value = translated_exc e = self.assertRaises(webob.exc.HTTPBadRequest, resource, request) self.assertEqual(message_es, str(e)) @mock.patch.object(webob.acceptparse.AcceptLanguage, 'best_match') @mock.patch.object(i18n, 'translate') def test_translate_exception(self, mock_translate, mock_best_match): mock_translate.return_value = 'No Encontrado' mock_best_match.return_value = 'de' req = wsgi.Request.blank('/tests/123') req.headers["Accept-Language"] = "de" e = webob.exc.HTTPNotFound(explanation='Not Found') e = wsgi.translate_exception(req, e) self.assertEqual('No Encontrado', e.explanation) def test_response_headers_encoded(self): # prepare environment for_openstack_comrades = \ u'\u0417\u0430 \u043e\u043f\u0435\u043d\u0441\u0442\u0435\u043a, ' \ u'\u0442\u043e\u0432\u0430\u0440\u0438\u0449\u0438' class FakeController(object): def index(self, shirt, pants=None): return (shirt, pants) class FakeSerializer(object): def index(self, response, result): response.headers['unicode_test'] = for_openstack_comrades # make request resource = wsgi.Resource(FakeController(), None, FakeSerializer()) actions = {'action': 'index'} env = {'wsgiorg.routing_args': [None, actions]} request = wsgi.Request.blank('/tests/123', environ=env) response = resource.__call__(request) # ensure it has been encoded correctly value = (response.headers['unicode_test'].decode('utf-8') if six.PY2 else response.headers['unicode_test']) self.assertEqual(for_openstack_comrades, value) class JSONResponseSerializerTest(base.BaseTestCase): def test_to_json(self): fixture = {"key": "value"} expected = b'{"key": "value"}' actual = wsgi.JSONResponseSerializer().to_json(fixture) self.assertEqual(expected, actual) def test_to_json_with_date_format_value(self): fixture = {"date": datetime.datetime(1901, 3, 8, 2)} expected = b'{"date": "1901-03-08T02:00:00.000000"}' actual = wsgi.JSONResponseSerializer().to_json(fixture) self.assertEqual(expected, actual) def test_to_json_with_more_deep_format(self): fixture = {"is_public": True, "name": [{"name1": "test"}]} expected = {"is_public": True, "name": [{"name1": "test"}]} actual = wsgi.JSONResponseSerializer().to_json(fixture) actual = jsonutils.loads(actual) for k in expected: self.assertEqual(expected[k], actual[k]) def test_to_json_with_set(self): fixture = set(["foo"]) expected = b'["foo"]' actual = wsgi.JSONResponseSerializer().to_json(fixture) self.assertEqual(expected, actual) def test_default(self): fixture = {"key": "value"} response = webob.Response() wsgi.JSONResponseSerializer().default(response, fixture) self.assertEqual(http.OK, response.status_int) content_types = [h for h in response.headerlist if h[0] == 'Content-Type'] self.assertEqual(1, len(content_types)) self.assertEqual('application/json', response.content_type) self.assertEqual(b'{"key": "value"}', response.body) class JSONRequestDeserializerTest(base.BaseTestCase): def test_has_body_no_content_length(self): request = wsgi.Request.blank('/') request.method = 'POST' request.body = b'asdf' request.headers.pop('Content-Length') self.assertFalse(wsgi.JSONRequestDeserializer().has_body(request)) def test_has_body_zero_content_length(self): request = wsgi.Request.blank('/') request.method = 'POST' request.body = b'asdf' request.headers['Content-Length'] = 0 self.assertFalse(wsgi.JSONRequestDeserializer().has_body(request)) def test_has_body_has_content_length(self): request = wsgi.Request.blank('/') request.method = 'POST' request.body = b'asdf' self.assertIn('Content-Length', request.headers) self.assertTrue(wsgi.JSONRequestDeserializer().has_body(request)) def test_no_body_no_content_length(self): request = wsgi.Request.blank('/') self.assertFalse(wsgi.JSONRequestDeserializer().has_body(request)) def test_from_json(self): fixture = '{"key": "value"}' expected = {"key": "value"} actual = wsgi.JSONRequestDeserializer().from_json(fixture) self.assertEqual(expected, actual) def test_from_json_malformed(self): fixture = 'kjasdklfjsklajf' self.assertRaises(webob.exc.HTTPBadRequest, wsgi.JSONRequestDeserializer().from_json, fixture) def test_default_no_body(self): request = wsgi.Request.blank('/') actual = wsgi.JSONRequestDeserializer().default(request) expected = {} self.assertEqual(expected, actual) def test_default_with_body(self): request = wsgi.Request.blank('/') request.method = 'POST' request.body = b'{"key": "value"}' actual = wsgi.JSONRequestDeserializer().default(request) expected = {"body": {"key": "value"}} self.assertEqual(expected, actual) def test_has_body_has_transfer_encoding(self): self.assertTrue(self._check_transfer_encoding( transfer_encoding='chunked')) def test_has_body_multiple_transfer_encoding(self): self.assertTrue(self._check_transfer_encoding( transfer_encoding='chunked, gzip')) def test_has_body_invalid_transfer_encoding(self): self.assertFalse(self._check_transfer_encoding( transfer_encoding='invalid', content_length=0)) def test_has_body_invalid_transfer_encoding_no_content_len_and_body(self): self.assertFalse(self._check_transfer_encoding( transfer_encoding='invalid', include_body=False)) def test_has_body_invalid_transfer_encoding_no_content_len_but_body(self): self.assertTrue(self._check_transfer_encoding( transfer_encoding='invalid', include_body=True)) def test_has_body_invalid_transfer_encoding_with_content_length(self): self.assertTrue(self._check_transfer_encoding( transfer_encoding='invalid', content_length=5)) def test_has_body_valid_transfer_encoding_with_content_length(self): self.assertTrue(self._check_transfer_encoding( transfer_encoding='chunked', content_length=1)) def test_has_body_valid_transfer_encoding_without_content_length(self): self.assertTrue(self._check_transfer_encoding( transfer_encoding='chunked')) def _check_transfer_encoding(self, transfer_encoding=None, content_length=None, include_body=True): request = wsgi.Request.blank('/') request.method = 'POST' if include_body: request.body = b'fake_body' request.headers['transfer-encoding'] = transfer_encoding if content_length is not None: request.headers['content-length'] = content_length return wsgi.JSONRequestDeserializer().has_body(request) def test_get_bind_addr_default_value(self): expected = ('0.0.0.0', '123456') actual = wsgi.get_bind_addr(default_port="123456") self.assertEqual(expected, actual) class ServerTest(base.BaseTestCase): def test_create_pool(self): """Ensure the wsgi thread pool is an eventlet.greenpool.GreenPool.""" actual = wsgi.Server(threads=1).create_pool() self.assertIsInstance(actual, eventlet.greenpool.GreenPool) @mock.patch.object(wsgi.Server, 'configure_socket') def test_http_keepalive(self, mock_configure_socket): self.config(http_keepalive=False) self.config(workers=None) server = wsgi.Server(threads=1) server.sock = 'fake_socket' # mocking eventlet.wsgi server method to check it is called with # configured 'http_keepalive' value. with mock.patch.object(eventlet.wsgi, 'server') as mock_server: fake_application = "fake-application" server.start(fake_application, 0) server.wait() mock_server.assert_called_once_with('fake_socket', fake_application, log=server._logger, debug=False, custom_pool=server.pool, keepalive=False, socket_timeout=900) def test_number_of_workers(self): """Ensure the default number of workers matches num cpus.""" def pid(): i = 1 while True: i += 1 yield i with mock.patch.object(os, 'fork') as mock_fork: mock_fork.side_effect = pid server = wsgi.Server() server.configure = mock.Mock() fake_application = "fake-application" server.start(fake_application, None) self.assertEqual(processutils.get_worker_count(), len(server.children)) def test_set_eventlet_hub_exception(self): with mock.patch('eventlet.hubs.use_hub', side_effect=Exception): self.assertRaises(exception.WorkerCreationFailure, wsgi.set_eventlet_hub) class GetSocketTestCase(base.BaseTestCase): def setUp(self): super(GetSocketTestCase, self).setUp() self.useFixture(fixtures.MonkeyPatch( "glare.common.wsgi.get_bind_addr", lambda x: ('192.168.0.13', 1234))) addr_info_list = [(2, 1, 6, '', ('192.168.0.13', 80)), (2, 2, 17, '', ('192.168.0.13', 80)), (2, 3, 0, '', ('192.168.0.13', 80))] self.useFixture(fixtures.MonkeyPatch( "glare.common.wsgi.socket.getaddrinfo", lambda *x: addr_info_list)) self.useFixture(fixtures.MonkeyPatch( "glare.common.wsgi.time.time", mock.Mock(side_effect=[0, 1, 5, 10, 20, 35]))) self.useFixture(fixtures.MonkeyPatch( "glare.common.wsgi.utils.validate_key_cert", lambda *x: None)) wsgi.CONF.cert_file = '/etc/ssl/cert' wsgi.CONF.key_file = '/etc/ssl/key' wsgi.CONF.ca_file = '/etc/ssl/ca_cert' wsgi.CONF.tcp_keepidle = 600 def test_correct_configure_socket(self): mock_socket = mock.Mock() self.useFixture(fixtures.MonkeyPatch( 'glare.common.wsgi.ssl.wrap_socket', mock_socket)) self.useFixture(fixtures.MonkeyPatch( 'glare.common.wsgi.eventlet.listen', lambda *x, **y: mock_socket)) server = wsgi.Server() server.default_port = 1234 server.configure_socket() self.assertIn(mock.call.setsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR, 1), mock_socket.mock_calls) self.assertIn(mock.call.setsockopt( socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1), mock_socket.mock_calls) if hasattr(socket, 'TCP_KEEPIDLE'): self.assertIn(mock.call().setsockopt( socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, wsgi.CONF.tcp_keepidle), mock_socket.mock_calls) def test_get_socket_without_all_ssl_reqs(self): wsgi.CONF.key_file = None self.assertRaises(RuntimeError, wsgi.get_socket, 1234) def test_get_socket_with_bind_problems(self): self.useFixture(fixtures.MonkeyPatch( 'glare.common.wsgi.eventlet.listen', mock.Mock(side_effect=( [wsgi.socket.error(socket.errno.EADDRINUSE)] * 3 + [None])))) self.useFixture(fixtures.MonkeyPatch( 'glare.common.wsgi.ssl.wrap_socket', lambda *x, **y: None)) self.assertRaises(RuntimeError, wsgi.get_socket, 1234) def test_get_socket_with_unexpected_socket_errno(self): self.useFixture(fixtures.MonkeyPatch( 'glare.common.wsgi.eventlet.listen', mock.Mock(side_effect=wsgi.socket.error(socket.errno.ENOMEM)))) self.useFixture(fixtures.MonkeyPatch( 'glare.common.wsgi.ssl.wrap_socket', lambda *x, **y: None)) self.assertRaises(wsgi.socket.error, wsgi.get_socket, 1234) glare-0.5.0/glare/tests/unpacking_artifact.py000066400000000000000000000034151317401036700212550ustar00rootroot00000000000000# Copyright 2017 - Nokia Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import io import zipfile from glare.common import exception from glare.objects import base from glare.objects.meta import file_utils from glare.objects.meta import wrappers Blob = wrappers.BlobField.init Folder = wrappers.FolderField.init class Unpacker(base.BaseArtifact): MAX_BLOB_SIZE = 100000 fields = { 'zip': Blob(description="Original zipped data.", required_on_activate=False), 'content': Folder(system=True, required_on_activate=False), } @classmethod def get_type_name(cls): return "unpacking_artifact" @classmethod def pre_upload_hook(cls, context, af, field_name, blob_key, fd): flobj = io.BytesIO(fd.read(cls.MAX_BLOB_SIZE)) # Raise exception if something left in the stream if fd.read(1): msg = ("The file you are trying to upload is too big. " "The system upper limit is %s.") % cls.MAX_BLOB_SIZE raise exception.RequestEntityTooLarge(msg) zip_ref = zipfile.ZipFile(flobj, 'r') file_utils.unpack_zip_archive_to_artifact_folder( context, af, zip_ref, 'content') flobj.seek(0) return flobj glare-0.5.0/glare/tests/utils.py000066400000000000000000000322371317401036700165650ustar00rootroot00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common utilities used in testing""" import errno import functools import os import shlex import shutil import six import socket import subprocess import fixtures from oslo_config import cfg from oslo_config import fixture as cfg_fixture from oslo_log import log import requests import testtools from glare.common import config CONF = cfg.CONF try: CONF.debug except cfg.NoSuchOptError: # NOTE(sigmavirus24): If we run the entire test suite, the logging options # will be registered appropriately and we do not need to re-register them. # However, when we run a test in isolation (or use --debug), those options # will not be registered for us. In order for a test in a class that # inherits from BaseTestCase to even run, we will need to register them # ourselves. BaseTestCase.config will set the debug level if something # calls self.config(debug=True) so we need these options registered # appropriately. # See bug 1433785 for more details. log.register_options(CONF) class BaseTestCase(testtools.TestCase): def setUp(self): super(BaseTestCase, self).setUp() self._config_fixture = self.useFixture(cfg_fixture.Config()) # NOTE(bcwaldon): parse_args has to be called to register certain # command-line options - specifically we need config_dir for # the following policy tests config.parse_args(args=[]) self.addCleanup(CONF.reset) self.test_dir = self.useFixture(fixtures.TempDir()).path self.conf_dir = os.path.join(self.test_dir, 'etc') safe_mkdirs(self.conf_dir) self.set_policy() def set_policy(self): conf_file = "policy.json" self.policy_file = self._copy_data_file(conf_file, self.conf_dir) self.config(policy_file=self.policy_file, group='oslo_policy') def _copy_data_file(self, file_name, dst_dir): src_file_name = os.path.join('glare/tests/etc', file_name) shutil.copy(src_file_name, dst_dir) dst_file_name = os.path.join(dst_dir, file_name) return dst_file_name def config(self, **kw): """Override some configuration values. The keyword arguments are the names of configuration options to override and their values. If a group argument is supplied, the overrides are applied to the specified configuration option group. All overrides are automatically cleared at the end of the current test by the fixtures cleanup process. """ self._config_fixture.config(**kw) class requires(object): """Decorator that initiates additional test setup/teardown.""" def __init__(self, setup=None, teardown=None): self.setup = setup self.teardown = teardown def __call__(self, func): def _runner(*args, **kw): if self.setup: self.setup(args[0]) func(*args, **kw) if self.teardown: self.teardown(args[0]) _runner.__name__ = func.__name__ _runner.__doc__ = func.__doc__ return _runner class depends_on_exe(object): """Decorator to skip test if an executable is unavailable""" def __init__(self, exe): self.exe = exe def __call__(self, func): def _runner(*args, **kw): cmd = 'which %s' % self.exe exitcode, out, err = execute(cmd, raise_error=False) if exitcode != 0: args[0].disabled_message = 'test requires exe: %s' % self.exe args[0].disabled = True func(*args, **kw) _runner.__name__ = func.__name__ _runner.__doc__ = func.__doc__ return _runner def skip_if_disabled(func): """Decorator that skips a test if test case is disabled.""" @functools.wraps(func) def wrapped(*a, **kwargs): func.__test__ = False test_obj = a[0] message = getattr(test_obj, 'disabled_message', 'Test disabled') if getattr(test_obj, 'disabled', False): test_obj.skipTest(message) func(*a, **kwargs) return wrapped def fork_exec(cmd, exec_env=None, logfile=None, pass_fds=None): """Execute a command using fork/exec. This is needed for programs system executions that need path searching but cannot have a shell as their parent process, for example: glare. When glare starts it sets itself as the parent process for its own process group. Thus the pid that a Popen process would have is not the right pid to use for killing the process group. This patch gives the test env direct access to the actual pid. :param cmd: Command to execute as an array of arguments. :param exec_env: A dictionary representing the environment with which to run the command. :param logfile: A path to a file which will hold the stdout/err of the child process. :param pass_fds: Sequence of file descriptors passed to the child. """ env = os.environ.copy() if exec_env is not None: for env_name, env_val in exec_env.items(): if callable(env_val): env[env_name] = env_val(env.get(env_name)) else: env[env_name] = env_val pid = os.fork() if pid == 0: if logfile: fds = [1, 2] with open(logfile, 'r+b') as fptr: for desc in fds: # close fds try: os.dup2(fptr.fileno(), desc) except OSError: pass if pass_fds and hasattr(os, 'set_inheritable'): # os.set_inheritable() is only available and needed # since Python 3.4. On Python 3.3 and older, file descriptors are # inheritable by default. for fd in pass_fds: os.set_inheritable(fd, True) args = shlex.split(cmd) os.execvpe(args[0], args, env) else: return pid def wait_for_fork(pid, raise_error=True, expected_exitcode=0): """Wait for a process to complete This function will wait for the given pid to complete. If the exit code does not match that of the expected_exitcode an error is raised. """ rc = 0 try: (pid, rc) = os.waitpid(pid, 0) rc = os.WEXITSTATUS(rc) if rc != expected_exitcode: raise RuntimeError('The exit code %d is not %d' % (rc, expected_exitcode)) except Exception: if raise_error: raise return rc def execute(cmd, raise_error=True, no_venv=False, exec_env=None, expect_exit=True, expected_exitcode=0, context=None): """Executes a command in a subprocess. Returns a tuple of (exitcode, out, err), where out is the string output from stdout and err is the string output from stderr when executing the command. :param cmd: Command string to execute :param raise_error: If returncode is not 0 (success), then raise a RuntimeError? Default: True) :param no_venv: Disable the virtual environment :param exec_env: Optional dictionary of additional environment variables; values may be callables, which will be passed the current value of the named environment variable :param expect_exit: Optional flag true iff timely exit is expected :param expected_exitcode: expected exitcode from the launcher :param context: additional context for error message """ env = os.environ.copy() if exec_env is not None: for env_name, env_val in exec_env.items(): if callable(env_val): env[env_name] = env_val(env.get(env_name)) else: env[env_name] = env_val # If we're asked to omit the virtualenv, and if one is set up, # restore the various environment variables if no_venv and 'VIRTUAL_ENV' in env: # Clip off the first element of PATH env['PATH'] = env['PATH'].split(os.pathsep, 1)[-1] del env['VIRTUAL_ENV'] # Make sure that we use the programs in the # current source directory's bin/ directory. path_ext = [os.path.join(os.getcwd(), 'bin')] # Also jack in the path cmd comes from, if it's absolute args = shlex.split(cmd) executable = args[0] if os.path.isabs(executable): path_ext.append(os.path.dirname(executable)) env['PATH'] = ':'.join(path_ext) + ':' + env['PATH'] process = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) if expect_exit: result = process.communicate() (out, err) = result exitcode = process.returncode else: out = '' err = '' exitcode = 0 if exitcode != expected_exitcode and raise_error: msg = ("Command %(cmd)s did not succeed. Returned an exit " "code of %(exitcode)d." "\n\nSTDOUT: %(out)s" "\n\nSTDERR: %(err)s" % {'cmd': cmd, 'exitcode': exitcode, 'out': out, 'err': err}) if context: msg += "\n\nCONTEXT: %s" % context raise RuntimeError(msg) return exitcode, out, err def find_executable(cmdname): """Searches the path for a given cmdname. Returns an absolute filename if an executable with the given name exists in the path, or None if one does not. :param cmdname: The bare name of the executable to search for """ # Keep an eye out for the possibility of an absolute pathname if os.path.isabs(cmdname): return cmdname # Get a list of the directories to search path = ([os.path.join(os.getcwd(), 'bin')] + os.environ['PATH'].split(os.pathsep)) # Search through each in turn for elem in path: full_path = os.path.join(elem, cmdname) if os.access(full_path, os.X_OK): return full_path # No dice... return None def get_unused_port(): """Returns an unused port on localhost. """ port, s = get_unused_port_and_socket() s.close() return port def get_unused_port_and_socket(): """Returns an unused port on localhost and the open socket from which it was created. """ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(('localhost', 0)) addr, port = s.getsockname() return (port, s) def xattr_writes_supported(path): """Returns True if the we can write a file to the supplied path and subsequently write a xattr to that file. """ try: import xattr except ImportError: return False def set_xattr(path, key, value): xattr.setxattr(path, "user.%s" % key, value) # We do a quick attempt to write a user xattr to a temporary file # to check that the filesystem is even enabled to support xattrs fake_filepath = os.path.join(path, 'testing-checkme') result = True with open(fake_filepath, 'wb') as fake_file: fake_file.write(b"XXX") fake_file.flush() try: set_xattr(fake_filepath, 'hits', b'1') except IOError as e: if e.errno == errno.EOPNOTSUPP: result = False else: # Cleanup after ourselves... if os.path.exists(fake_filepath): os.unlink(fake_filepath) return result def safe_mkdirs(path): try: os.makedirs(path) except OSError as e: if e.errno != errno.EEXIST: raise class FakeHTTPResponse(object): def __init__(self, status=200, headers=None, data=None, *args, **kwargs): data = data or b'some_data' self.data = six.BytesIO(data) self.read = self.data.read self.status = status self.headers = headers or {'content-length': len(data)} if not kwargs.get('no_response_body', False): self.body = None def getheader(self, name, default=None): return self.headers.get(name.lower(), default) def getheaders(self): return self.headers or {} def read(self, amt): self.data.read(amt) def release_conn(self): pass def close(self): self.data.close() def fake_response(status_code=200, headers=None, content=None, **kwargs): r = requests.models.Response() r.status_code = status_code r.headers = headers or {} r.raw = FakeHTTPResponse(status_code, headers, content, kwargs) return r glare-0.5.0/glare/tests/var/000077500000000000000000000000001317401036700156345ustar00rootroot00000000000000glare-0.5.0/glare/tests/var/certificate.crt000066400000000000000000000126121317401036700206320ustar00rootroot00000000000000# > openssl x509 -in glare/tests/var/certificate.crt -noout -text # Certificate: # Data: # Version: 1 (0x0) # Serial Number: 1 (0x1) # Signature Algorithm: sha1WithRSAEncryption # Issuer: C=AU, ST=Some-State, O=OpenStack, OU=Glare, CN=Glare CA # Validity # Not Before: Feb 2 20:22:13 2015 GMT # Not After : Jan 31 20:22:13 2024 GMT # Subject: C=AU, ST=Some-State, O=OpenStack, OU=Glare, CN=127.0.0.1 # Subject Public Key Info: # Public Key Algorithm: rsaEncryption # RSA Public Key: (4096 bit) # Modulus (4096 bit): # 00:9f:44:13:51:de:e9:5a:f7:ac:33:2a:1a:4c:91: # a1:73:bc:f3:a6:d3:e6:59:ae:e8:e2:34:68:3e:f4: # 40:c1:a1:1a:65:9a:a3:67:e9:2c:b9:79:9c:00:b1: # 7c:c1:e6:9e:de:47:bf:f1:cb:f2:73:d4:c3:62:fe: # 82:90:6f:b4:75:ca:7e:56:8f:99:3d:06:51:3c:40: # f4:ff:74:97:4f:0d:d2:e6:66:76:8d:97:bf:89:ce: # fe:b2:d7:89:71:f2:a0:d9:f5:26:7c:1a:7a:bf:2b: # 8f:72:80:e7:1f:4d:4a:40:a3:b9:9e:33:f6:55:e0: # 40:2b:1e:49:e4:8c:71:9d:11:32:cf:21:41:e1:13: # 28:c6:d6:f6:e0:b3:26:10:6d:5b:63:1d:c3:ee:d0: # c4:66:63:38:89:6b:8f:2a:c2:bd:4f:e4:bc:03:8f: # a2:f2:5c:1d:73:11:9c:7b:93:3d:d6:a3:d1:2d:cd: # 64:23:24:bc:65:3c:71:20:28:60:a0:ea:fe:77:0e: # 1d:95:36:76:ad:e7:2f:1c:27:62:55:e3:9d:11:c1: # fb:43:3e:e5:21:ac:fd:0e:7e:3d:c9:44:d2:bd:6f: # 89:7e:0f:cb:88:54:57:fd:8d:21:c8:34:e1:47:01: # 28:0f:45:a1:7e:60:1a:9c:4c:0c:b8:c1:37:2d:46: # ab:18:9e:ca:49:d3:77:b7:92:3a:d2:7f:ca:d5:02: # f1:75:81:66:39:51:aa:bc:d7:f0:91:23:69:e8:71: # ae:44:76:5e:87:54:eb:72:fc:ac:fd:60:22:e0:6a: # e4:ad:37:b7:f6:e5:24:b4:95:2c:26:0e:75:a0:e9: # ed:57:be:37:42:64:1f:02:49:0c:bd:5d:74:6d:e6: # f2:da:5c:54:82:fa:fc:ff:3a:e4:1a:7a:a9:3c:3d: # ee:b5:df:09:0c:69:c3:51:92:67:80:71:9b:10:8b: # 20:ff:a2:5e:c5:f2:86:a0:06:65:1c:42:f9:91:24: # 54:29:ed:7e:ec:db:4c:7b:54:ee:b1:25:1b:38:53: # ae:01:b6:c5:93:1e:a3:4d:1b:e8:73:47:50:57:e8: # ec:a0:80:53:b1:34:74:37:9a:c1:8c:14:64:2e:16: # dd:a1:2e:d3:45:3e:2c:46:62:20:2a:93:7a:92:4c: # b2:cc:64:47:ad:63:32:0b:68:0c:24:98:20:83:08: # 35:74:a7:68:7a:ef:d6:84:07:d1:5e:d7:c0:6c:3f: # a7:4a:78:62:a8:70:75:37:fb:ce:1f:09:1e:7c:11: # 35:cc:b3:5a:a3:cc:3f:35:c9:ee:24:6f:63:f8:54: # 6f:7c:5b:b4:76:3d:f2:81:6d:ad:64:66:10:d0:c4: # 0b:2c:2f # Exponent: 65537 (0x10001) # Signature Algorithm: sha1WithRSAEncryption # 5f:e8:a8:93:20:6c:0f:12:90:a6:e2:64:21:ed:63:0e:8c:e0: # 0f:d5:04:13:4d:2a:e9:a5:91:b7:e4:51:94:bd:0a:70:4b:94: # c7:1c:94:ed:d7:64:95:07:6b:a1:4a:bc:0b:53:b5:1a:7e:f1: # 9c:12:59:24:5f:36:72:34:ca:33:ee:28:46:fd:21:e6:52:19: # 0c:3d:94:6b:bd:cb:76:a1:45:7f:30:7b:71:f1:84:b6:3c:e0: # ac:af:13:81:9c:0e:6e:3c:9b:89:19:95:de:8e:9c:ef:70:ac: # 07:ae:74:42:47:35:50:88:36:ec:32:1a:55:24:08:f2:44:57: # 67:fe:0a:bb:6b:a7:bd:bc:af:bf:2a:e4:dd:53:84:6b:de:1d: # 2a:28:21:38:06:7a:5b:d8:83:15:65:31:6d:61:67:00:9e:1a: # 61:85:15:a2:4c:9a:eb:6d:59:8e:34:ac:2c:d5:24:4e:00:ff: # 30:4d:a3:d5:80:63:17:52:65:ac:7f:f4:0a:8e:56:a4:97:51: # 39:81:ae:e8:cb:52:09:b3:47:b4:fd:1b:e2:04:f9:f2:76:e3: # 63:ef:90:aa:54:98:96:05:05:a9:91:76:18:ed:5d:9e:6e:88: # 50:9a:f7:2c:ce:5e:54:ba:15:ec:62:ff:5d:be:af:35:03:b1: # 3f:32:3e:0e -----BEGIN CERTIFICATE----- MIIEKjCCAxICAQEwDQYJKoZIhvcNAQEFBQAwWzELMAkGA1UEBhMCQVUxEzARBgNV BAgMClNvbWUtU3RhdGUxEjAQBgNVBAoMCU9wZW5TdGFjazEPMA0GA1UECwwGR2xh bmNlMRIwEAYDVQQDDAlHbGFuY2UgQ0EwHhcNMTUwMjAyMjAyMjEzWhcNMjQwMTMx MjAyMjEzWjBbMQswCQYDVQQGEwJBVTETMBEGA1UECBMKU29tZS1TdGF0ZTESMBAG A1UEChMJT3BlblN0YWNrMQ8wDQYDVQQLEwZHbGFuY2UxEjAQBgNVBAMTCTEyNy4w LjAuMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAJ9EE1He6Vr3rDMq GkyRoXO886bT5lmu6OI0aD70QMGhGmWao2fpLLl5nACxfMHmnt5Hv/HL8nPUw2L+ gpBvtHXKflaPmT0GUTxA9P90l08N0uZmdo2Xv4nO/rLXiXHyoNn1Jnwaer8rj3KA 5x9NSkCjuZ4z9lXgQCseSeSMcZ0RMs8hQeETKMbW9uCzJhBtW2Mdw+7QxGZjOIlr jyrCvU/kvAOPovJcHXMRnHuTPdaj0S3NZCMkvGU8cSAoYKDq/ncOHZU2dq3nLxwn YlXjnRHB+0M+5SGs/Q5+PclE0r1viX4Py4hUV/2NIcg04UcBKA9FoX5gGpxMDLjB Ny1GqxieyknTd7eSOtJ/ytUC8XWBZjlRqrzX8JEjaehxrkR2XodU63L8rP1gIuBq 5K03t/blJLSVLCYOdaDp7Ve+N0JkHwJJDL1ddG3m8tpcVIL6/P865Bp6qTw97rXf CQxpw1GSZ4BxmxCLIP+iXsXyhqAGZRxC+ZEkVCntfuzbTHtU7rElGzhTrgG2xZMe o00b6HNHUFfo7KCAU7E0dDeawYwUZC4W3aEu00U+LEZiICqTepJMssxkR61jMgto DCSYIIMINXSnaHrv1oQH0V7XwGw/p0p4YqhwdTf7zh8JHnwRNcyzWqPMPzXJ7iRv Y/hUb3xbtHY98oFtrWRmENDECywvAgMBAAEwDQYJKoZIhvcNAQEFBQADggEBAF/o qJMgbA8SkKbiZCHtYw6M4A/VBBNNKumlkbfkUZS9CnBLlMcclO3XZJUHa6FKvAtT tRp+8ZwSWSRfNnI0yjPuKEb9IeZSGQw9lGu9y3ahRX8we3HxhLY84KyvE4GcDm48 m4kZld6OnO9wrAeudEJHNVCINuwyGlUkCPJEV2f+Crtrp728r78q5N1ThGveHSoo ITgGelvYgxVlMW1hZwCeGmGFFaJMmuttWY40rCzVJE4A/zBNo9WAYxdSZax/9AqO VqSXUTmBrujLUgmzR7T9G+IE+fJ242PvkKpUmJYFBamRdhjtXZ5uiFCa9yzOXlS6 Fexi/12+rzUDsT8yPg4= -----END CERTIFICATE----- glare-0.5.0/glare/tests/var/hooks.zip000066400000000000000000000014621317401036700175060ustar00rootroot00000000000000PK i[Jfolder1/UT \Y\Yux PKi[JS folder1/bbb.txtUT \Y\Yux KJ.PK t[Jfolder1/folder2/UT \Y\Yux PKt[J`>@ folder1/folder2/ccc.txtUT \Y\Yux KN.PKC[JB& aaa.txtUT \Y\Yux KL.PK i[JAfolder1/UT\Yux PKi[JS Bfolder1/bbb.txtUT\Yux PK t[JAfolder1/folder2/UT\Yux PKt[J`>@ folder1/folder2/ccc.txtUT\Yux PKC[JB& 2aaa.txtUT\Yux PKyglare-0.5.0/glare/tests/var/hooks_100.zip000066400000000000000000000365261317401036700200770ustar00rootroot00000000000000PK i4KdNfile_0UT ??YJ?Yux my number is 00PK i4K1T9file_1UT ??Y?Yux my number is 01PK i4Kfile_2UT ??Y?Yux my number is 02PK i4K5file_3UT ??Y?Yux my number is 03PK i4KyIfile_4UT ??Y?Yux my number is 04PK i4K(~>file_5UT ??Y?Yux my number is 05PK i4Kwfile_6UT ??Y?Yux my number is 06PK i4Kpfile_7UT ??Y?Yux my number is 07PK i4K@file_8UT ??Y?Yux my number is 08PK i4K7file_9UT ??Y?Yux my number is 09PK i4KUWfile_10UT ??Y?Yux my number is 10PK i4Kpe file_11UT ??Y?Yux my number is 11PK i4K4file_12UT ??Y?Yux my number is 12PK i4K\file_13UT ??Y?Yux my number is 13PK i4KbPfile_14UT ??Y?Yux my number is 14PK i4Kie'file_15UT ??Y?Yux my number is 15PK i4Klfile_16UT ??Y?Yux my number is 16PK i4KEkfile_17UT ??Y?Yux my number is 17PK i4KYfile_18UT ??Y?Yux my number is 18PK i4KB.file_19UT ??Y?Yux my number is 19PK i4K%"|file_20UT ??Y?Yux my number is 20PK i4K6% file_21UT ??Y?Yux my number is 21PK i4K g,file_22UT ??Y?Yux my number is 22PK i4KW+file_23UT ??Y?Yux my number is 23PK i4Kfile_31UT ??Y?Yux my number is 31PK i4KHV7file_32UT ??Y?Yux my number is 32PK i4Kf0file_33UT ??Y?Yux my number is 33PK i4K}Tbfile_34UT ??Y?Yux my number is 34PK i4KSfile_35UT ??Y?Yux my number is 35PK i4KQZfile_36UT ??Y?Yux my number is 36PK i4KǢ]file_37UT ??Y?Yux my number is 37PK i4KVkfile_38UT ??Y?Yux my number is 38PK i4Kfile_39UT ??Y?Yux my number is 39PK i4Kx*file_40UT ??Y?Yux my number is 40PK i4K5]file_41UT ??Y?Yux my number is 41PK i4Kvfile_42UT ??Y?Yux my number is 42PK i4Kqfile_43UT ??Y?Yux my number is 43PK i4Ke-file_44UT ??Y?Yux my number is 44PK i4K,UZfile_45UT ??Y?Yux my number is 45PK i4Kfile_46UT ??Y?Yux my number is 46PK i4K4file_47UT ??Y?Yux my number is 47PK i4K)$file_48UT ??Y?Yux my number is 48PK i4KSfile_49UT ??Y?Yux my number is 49PK i4Kc3file_50UT ??Y?Yux my number is 50PK i4KtdDfile_51UT ??Y?Yux my number is 51PK i4Kmfile_52UT ??Y?Yux my number is 52PK i4KXjfile_53UT ??Y?Yux my number is 53PK i4KT4file_54UT ??Y?Yux my number is 54PK i4Kmd Cfile_55UT ??Y?Yux my number is 55PK i4K5file_56UT ??Y?Yux my number is 56PK i4KAfile_57UT ??Y?Yux my number is 57PK i4K=file_58UT ??Y?Yux my number is 58PK i4KF(Jfile_59UT ??Y?Yux my number is 59PK i4K!Nfile_60UT ??Y?Yux my number is 60PK i4KIofile_61UT ??Y?Yux my number is 61PK i4K @file_62UT ??Y?Yux my number is 62PK i4KGfile_63UT ??Y?Yux my number is 63PK i4K8#file_64UT ??Y?Yux my number is 64PK i4K7$hfile_65UT ??Y?Yux my number is 65PK i4Kf-file_66UT ??Y?Yux my number is 66PK i4KV*file_67UT ??Y?Yux my number is 67PK i4KKfile_68UT ??Y?Yux my number is 68PK i4K{afile_69UT ??Y?Yux my number is 69PK i4K`Ufile_70UT ??Y?Yux my number is 70PK i4KRvfile_71UT ??Y?Yux my number is 71PK i4KL[file_72UT ??Y?Yux my number is 72PK i4Kڣ\file_73UT ??Y?Yux my number is 73PK i4Ky68file_74UT ??Y?Yux my number is 74PK i4K?qfile_75UT ??Y?Yux my number is 75PK i4KUW6file_76UT ??Y?Yux my number is 76PK i4Kg1file_77UT ??Y?Yux my number is 77PK i4KRzfile_78UT ??Y?Yux my number is 78PK i4KJxfile_79UT ??Y?Yux my number is 79PK i4K͆file_80UT ??Y?Yux my number is 80PK i4K9file_81UT ??Y?Yux my number is 81PK i4Khfile_82UT ??Y?Yux my number is 82PK i4Kfile_83UT ??Y?Yux my number is 83PK i4K*file_84UT ??Y?Yux my number is 84PK i4K file_85UT ??Y?Yux my number is 85PK i4KKofile_86UT ??Y?Yux my number is 86PK i4K {file_87UT ??Y?Yux my number is 87PK i4Kffile_88UT ??Y?Yux my number is 88PK i4K Vfile_89UT ??Y?Yux my number is 89PK i4K֟file_90UT ??Y?Yux my number is 90PK i4Kxfile_91UT ??Y?Yux my number is 91PK i4K¾qfile_92UT ??Y?Yux my number is 92PK i4KTfile_93UT ??Y?Yux my number is 93PK i4Kfile_94UT ??Y?Yux my number is 94PK i4Ka+file_95UT ??Y?Yux my number is 95PK i4Kzvfile_96UT ??Y?Yux my number is 96PK i4KMJfile_97UT ??Y?Yux my number is 97PK i4KW file_98UT ??Y?Yux my number is 98PK i4KJg file_99UT ??Y?Yux my number is 99PK i4KdNfile_0UT??Yux PK i4K1T9Ofile_1UT??Yux PK i4Kfile_2UT??Yux PK i4K5file_3UT??Yux PK i4KyI<file_4UT??Yux PK i4K(~>file_5UT??Yux PK i4Kwfile_6UT??Yux PK i4Kp)file_7UT??Yux PK i4K@xfile_8UT??Yux PK i4K7file_9UT??Yux PK i4KUWfile_10UT??Yux PK i4Kpe ffile_11UT??Yux PK i4K4file_12UT??Yux PK i4K\file_13UT??Yux PK i4KbPVfile_14UT??Yux PK i4Kie'file_15UT??Yux PK i4Klfile_16UT??Yux PK i4KEkFfile_17UT??Yux PK i4KYfile_18UT??Yux PK i4KB.file_19UT??Yux PK i4K%"|6file_20UT??Yux PK i4K6% file_21UT??Yux PK i4K g,file_22UT??Yux PK i4KW+&file_23UT??Yux PK i4K file_31UT??Yux PK i4KHV7 file_32UT??Yux PK i4Kf0F file_33UT??Yux PK i4K}Tb file_34UT??Yux PK i4KS file_35UT??Yux PK i4KQZ6 file_36UT??Yux PK i4KǢ] file_37UT??Yux PK i4KVk file_38UT??Yux PK i4K& file_39UT??Yux PK i4Kx*v file_40UT??Yux PK i4K5] file_41UT??Yux PK i4Kv file_42UT??Yux PK i4Kqf file_43UT??Yux PK i4Ke- file_44UT??Yux PK i4K,UZfile_45UT??Yux PK i4KVfile_46UT??Yux PK i4K4file_47UT??Yux PK i4K)$file_48UT??Yux PK i4KSFfile_49UT??Yux PK i4Kc3file_50UT??Yux PK i4KtdDfile_51UT??Yux PK i4Km6file_52UT??Yux PK i4KXjfile_53UT??Yux PK i4KT4file_54UT??Yux PK i4Kmd C&file_55UT??Yux PK i4K5vfile_56UT??Yux PK i4KAfile_57UT??Yux PK i4K=file_58UT??Yux PK i4KF(Jffile_59UT??Yux PK i4K!Nfile_60UT??Yux PK i4KIofile_61UT??Yux PK i4K @Vfile_62UT??Yux PK i4KGfile_63UT??Yux PK i4K8#file_64UT??Yux PK i4K7$hFfile_65UT??Yux PK i4Kf-file_66UT??Yux PK i4KV*file_67UT??Yux PK i4KK6file_68UT??Yux PK i4K{afile_69UT??Yux PK i4K`Ufile_70UT??Yux PK i4KRv&file_71UT??Yux PK i4KL[vfile_72UT??Yux PK i4Kڣ\file_73UT??Yux PK i4Ky68file_74UT??Yux PK i4K?qffile_75UT??Yux PK i4KUW6file_76UT??Yux PK i4Kg1file_77UT??Yux PK i4KRzVfile_78UT??Yux PK i4KJxfile_79UT??Yux PK i4K͆file_80UT??Yux PK i4K9Ffile_81UT??Yux PK i4Khfile_82UT??Yux PK i4Kfile_83UT??Yux PK i4K*6file_84UT??Yux PK i4K file_85UT??Yux PK i4KKofile_86UT??Yux PK i4K {&file_87UT??Yux PK i4Kfvfile_88UT??Yux PK i4K Vfile_89UT??Yux PK i4K֟file_90UT??Yux PK i4Kxffile_91UT??Yux PK i4K¾qfile_92UT??Yux PK i4KTfile_93UT??Yux PK i4KVfile_94UT??Yux PK i4Ka+file_95UT??Yux PK i4Kzvfile_96UT??Yux PK i4KMJFfile_97UT??Yux PK i4KW file_98UT??Yux PK i4KJg file_99UT??Yux PKdd 6glare-0.5.0/glare/tests/var/privatekey.key000066400000000000000000000062531317401036700205370ustar00rootroot00000000000000-----BEGIN RSA PRIVATE KEY----- MIIJKAIBAAKCAgEAn0QTUd7pWvesMyoaTJGhc7zzptPmWa7o4jRoPvRAwaEaZZqj Z+ksuXmcALF8weae3ke/8cvyc9TDYv6CkG+0dcp+Vo+ZPQZRPED0/3SXTw3S5mZ2 jZe/ic7+steJcfKg2fUmfBp6vyuPcoDnH01KQKO5njP2VeBAKx5J5IxxnREyzyFB 4RMoxtb24LMmEG1bYx3D7tDEZmM4iWuPKsK9T+S8A4+i8lwdcxGce5M91qPRLc1k IyS8ZTxxIChgoOr+dw4dlTZ2recvHCdiVeOdEcH7Qz7lIaz9Dn49yUTSvW+Jfg/L iFRX/Y0hyDThRwEoD0WhfmAanEwMuME3LUarGJ7KSdN3t5I60n/K1QLxdYFmOVGq vNfwkSNp6HGuRHZeh1Trcvys/WAi4GrkrTe39uUktJUsJg51oOntV743QmQfAkkM vV10beby2lxUgvr8/zrkGnqpPD3utd8JDGnDUZJngHGbEIsg/6JexfKGoAZlHEL5 kSRUKe1+7NtMe1TusSUbOFOuAbbFkx6jTRvoc0dQV+jsoIBTsTR0N5rBjBRkLhbd oS7TRT4sRmIgKpN6kkyyzGRHrWMyC2gMJJgggwg1dKdoeu/WhAfRXtfAbD+nSnhi qHB1N/vOHwkefBE1zLNao8w/NcnuJG9j+FRvfFu0dj3ygW2tZGYQ0MQLLC8CAwEA AQKCAgBL4IvvymqUu0CgE6P57LvlvxS522R4P7uV4W/05jtfxJgl5fmJzO5Q4x4u umB8pJn1vms1EHxPMQNxS1364C0ynSl5pepUx4i2UyAmAG8B680ZlaFPrgdD6Ykw vT0vO2/kx0XxhFAMef1aiQ0TvaftidMqCwmGOlN393Mu3rZWJVZ2lhqj15Pqv4lY 3iD5XJBYdVrekTmwqf7KgaLwtVyqDoiAjdMM8lPZeX965FhmxR8oWh0mHR9gf95J etMmdy6Km//+EbeS/HxWRnE0CD/RsQA7NmDFnXvmhsB6/j4EoHn5xB6ssbpGAxIg JwlY4bUrKXpaEgE7i4PYFb1q5asnTDdUZYAGAGXSBbDiUZM2YOe1aaFB/SA3Y3K2 47brnx7UXhAXSPJ16EZHejSeFbzZfWgj2J1t3DLk18Fpi/5AxxIy/N5J38kcP7xZ RIcSV1QEasYUrHI9buhuJ87tikDBDFEIIeLZxlyeIdwmKrQ7Vzny5Ls94Wg+2UtI XFLDak5SEugdp3LmmTJaugF+s/OiglBVhcaosoKRXb4K29M7mQv2huEAerFA14Bd dp2KByd8ue+fJrAiSxhAyMDAe/uv0ixnmBBtMH0YYHbfUIgl+kR1Ns/bxrJu7T7F kBQWZV4NRbSRB+RGOG2/Ai5jxu0uLu3gtHMO4XzzElWqzHEDoQKCAQEAzfaSRA/v 0831TDL8dmOCO61TQ9GtAa8Ouj+SdyTwk9f9B7NqQWg7qdkbQESpaDLvWYiftoDw mBFHLZe/8RHBaQpEAfbC/+DO6c7O+g1/0Cls33D5VaZOzFnnbHktT3r5xwkZfVBS aPPWl/IZOU8TtNqujQA+mmSnrJ7IuXSsBVq71xgBQT9JBZpUcjZ4eQducmtC43CP GqcSjq559ZKc/sa3PkAtNlKzSUS1abiMcJ86C9PgQ9gOu7y8SSqQ3ivZkVM99rxm wo8KehCcHOPOcIUQKmx4Bs4V3chm8rvygf3aanUHi83xaMeFtIIuOgAJmE9wGQeo k0UGvKBUDIenfwKCAQEAxfVFVxMBfI4mHrgTj/HOq7GMts8iykJK1PuELU6FZhex XOqXRbQ5dCLsyehrKlVPFqUENhXNHaOQrCOZxiVoRje2PfU/1fSqRaPxI7+W1Fsh Fq4PkdJ66NJZJkK5NHwE8SyQf+wpLdL3YhY5LM3tWdX5U9Rr6N8qelE3sLPssAak 1km4/428+rkp1BlCffr3FyL0KJmOYfMiAr8m6hRZWbhkvm5YqX1monxUrKdFJ218 dxzyniqoS1yU5RClY6783dql1UO4AvxpzpCPYDFIwbEb9zkUo0przhmi4KzyxknB /n/viMWzSnsM9YbakH6KunDTUteme1Dri3Drrq9TUQKCAQAVdvL7YOXPnxFHZbDl 7azu5ztcQAfVuxa/1kw/WnwwDDx0hwA13NUK+HNcmUtGbrh/DjwG2x032+UdHUmF qCIN/mHkCoF8BUPLHiB38tw1J3wPNUjm4jQoG96AcYiFVf2d/pbHdo2AHplosHRs go89M+UpELN1h7Ppy4qDuWMME86rtfa7hArqKJFQbdjUVC/wgLkx1tMzJeJLOGfB bgwqiS8jr7CGjsvcgOqfH/qS6iU0glpG98dhTWQaA/OhE9TSzmgQxMW41Qt0eTKr 2Bn1pAhxQ2im3Odue6ou9eNqJLiUi6nDqizUjKakj0SeCs71LqIyGZg58OGo2tSn kaOlAoIBAQCE/fO4vQcJpAJOLwLNePmM9bqAcoZ/9auKjPNO8OrEHPTGZMB+Tscu k+wa9a9RgICiyPgcUec8m0+tpjlAGo+EZRdlZqedWUMviCWQC74MKrD/KK9DG3IB ipfkEX2VmiBD2tm1Z3Z+17XlSuLci/iCmzNnM1XP3GYQSRIt/6Lq23vQjzTfU1z7 4HwOh23Zb0qjW5NG12sFuS9HQx6kskkY8r2UBlRAggP686Z7W+EkzPSKnYMN6cCo 6KkLf3RtlPlDHwq8TUOJlgSLhykbyeCEaDVOkSWhUnU8wJJheS+dMZ5IGbFWZOPA DQ02woOCAdG30ebXSBQL0uB8DL/52sYRAoIBAHtW3NomlxIMqWX8ZYRJIoGharx4 ikTOR/jeETb9t//n6kV19c4ICiXOQp062lwEqFvHkKzxKECFhJZuwFc09hVxUXxC LJjvDfauHWFHcrDTWWbd25CNeZ4Sq79GKf+HJ+Ov87WYcjuBFlCh8ES+2N4WZGCn B5oBq1g6E4p1k6xA5eE6VRiHPuFH8N9t1x6IlCZvZBhuVWdDrDd4qMSDEUTlcxSY mtcAIXTPaPcdb3CjdE5a38r59x7dZ/Te2K7FKETffjSmku7BrJITz3iXEk+sn8ex o3mdnFgeQ6/hxvMGgdK2qNb5ER/s0teFjnfnwHuTSXngMDIDb3kLL0ecWlQ= -----END RSA PRIVATE KEY----- glare-0.5.0/glare/version.py000066400000000000000000000012041317401036700157360ustar00rootroot00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version version_info = pbr.version.VersionInfo('glare') glare-0.5.0/glare/wsgi.py000066400000000000000000000027341317401036700152330ustar00rootroot00000000000000# Copyright (c) 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Glare WSGI module. Use this module to deploy glare as WSGI application. Sample usage with uwsgi: export GLARE_CONFIG_FILE=/etc/glare/glare.conf uwsgi --module glare.wsgi:application --socket 127.0.0.1:8008 Sample apache mod_wsgi configuration: ServerName example.com SetEnv GLARE_CONFIG_FILE=/etc/glare/glare.conf DocumentRoot /path/to/public_html/ WSGIScriptAlias / /usr/lib/python2.7/site-packages/glare/wsgi.py ... """ import os from oslo_config import cfg from oslo_log import log as logging from glare.common import config from glare.common import utils CONF = cfg.CONF logging.register_options(CONF) CONFIG_FILE = os.environ.get("GLARE_CONFIG_FILE", "etc/glare.conf") config.parse_args(args=["--config-file", CONFIG_FILE]) utils.initialize_glance_store() application = config.load_paste_app('glare-api') glare-0.5.0/glare_tempest_plugin/000077500000000000000000000000001317401036700170215ustar00rootroot00000000000000glare-0.5.0/glare_tempest_plugin/README.rst000066400000000000000000000006241317401036700205120ustar00rootroot00000000000000============================ Tempest Integration of Glare ============================ This directory contains Tempest tests to cover the Glare project. To list all glare tempest cases, go to tempest directory, then run: $ testr list-tests glare To run glare tempest plugin tests using tox, go to tempest directory, then run: $ tox -eall-plugin glare And, to run a specific test: TBD glare-0.5.0/glare_tempest_plugin/__init__.py000066400000000000000000000000001317401036700211200ustar00rootroot00000000000000glare-0.5.0/glare_tempest_plugin/clients.py000066400000000000000000000024321317401036700210350ustar00rootroot00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import clients from tempest.common import credentials_factory as common_creds from tempest import config from glare_tempest_plugin.services.artifacts import artifacts_client CONF = config.CONF ADMIN_CREDS = None class Manager(clients.Manager): def __init__(self, credentials=None): if credentials is None: global ADMIN_CREDS if ADMIN_CREDS is None: ADMIN_CREDS = common_creds.get_configured_admin_credentials() credentials = ADMIN_CREDS super(Manager, self).__init__(credentials) self.artifacts_client = artifacts_client.ArtifactsClient( self.auth_provider) glare-0.5.0/glare_tempest_plugin/config.py000066400000000000000000000025671317401036700206520ustar00rootroot00000000000000# Copyright 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg service_available_group = cfg.OptGroup(name='service_available', title='Available OpenStack Services') ServiceAvailableGroup = [ cfg.BoolOpt("glare", default=True, help="Whether or not glare is expected to be available") ] artifacts_group = cfg.OptGroup(name="artifacts", title='Glare Options') ArtifactGroup = [ cfg.StrOpt("catalog_type", default="artifact", help="Catalog type of Artifacts API"), cfg.StrOpt("endpoint_type", default="publicURL", choices=["publicURL", "adminURL", "internalURL"], help="The endpoint type for artifacts service") ] glare-0.5.0/glare_tempest_plugin/contrib/000077500000000000000000000000001317401036700204615ustar00rootroot00000000000000glare-0.5.0/glare_tempest_plugin/contrib/gate_hook.sh000066400000000000000000000020411317401036700227520ustar00rootroot00000000000000#!/usr/bin/env bash # Copyright 2017 - Nokia # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. export DEVSTACK_GATE_INSTALL_TESTONLY=1 export DEVSTACK_GATE_TEMPEST=1 export DEVSTACK_GATE_TEMPEST_NOTESTS=1 export KEEP_LOCALRC=1 export DEVSTACK_LOCAL_CONFIG+=$'\n'"GLARE_CUSTOM_MODULES=glare.tests.sample_artifact" export DEVSTACK_LOCAL_CONFIG+=$'\n'"GLARE_ENABLED_TYPES=heat_templates,heat_environments,murano_packages,tosca_templates,images,sample_artifact" GATE_DEST=$BASE/new DEVSTACK_PATH=$GATE_DEST/devstack $GATE_DEST/devstack-gate/devstack-vm-gate.sh glare-0.5.0/glare_tempest_plugin/contrib/post_test_hook.sh000066400000000000000000000023631317401036700240650ustar00rootroot00000000000000#!/usr/bin/env bash # Copyright 2017 - Nokia # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. sudo chmod -R a+rw /opt/stack/ DEVSTACK_PATH="$BASE/new" (cd $DEVSTACK_PATH/glare/; sudo virtualenv .venv) . $DEVSTACK_PATH/glare/.venv/bin/activate (cd $DEVSTACK_PATH/tempest/; sudo pip install -r requirements.txt -r test-requirements.txt) sudo cp $DEVSTACK_PATH/tempest/etc/logging.conf.sample $DEVSTACK_PATH/tempest/etc/logging.conf (cd $DEVSTACK_PATH/glare/; sudo pip install -r requirements.txt -r test-requirements.txt) (cd $DEVSTACK_PATH/glare/; sudo python setup.py install) (cd $DEVSTACK_PATH/tempest/; sudo rm -rf .testrepository) (cd $DEVSTACK_PATH/tempest/; sudo testr init) echo "running glare tests" (cd $BASE/new/tempest/; sudo -E tox -eall-plugin glare) glare-0.5.0/glare_tempest_plugin/contrib/pre_test_hook.sh000066400000000000000000000011211317401036700236550ustar00rootroot00000000000000#!/usr/bin/env bash # Copyright 2017 - Nokia # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. glare-0.5.0/glare_tempest_plugin/plugin.py000066400000000000000000000034621317401036700206760ustar00rootroot00000000000000# Copyright 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_config import cfg from tempest import config from tempest.test_discover import plugins from glare_tempest_plugin import config as glare_config class GlareTempestPlugin(plugins.TempestPlugin): def load_tests(self): base_path = os.path.split(os.path.dirname( os.path.abspath(__file__)))[0] test_dir = "glare_tempest_plugin/tests" full_test_dir = os.path.join(base_path, test_dir) return full_test_dir, base_path def register_opts(self, conf): try: config.register_opt_group( conf, glare_config.service_available_group, glare_config.ServiceAvailableGroup ) except cfg.DuplicateOptError: pass try: config.register_opt_group(conf, glare_config.artifacts_group, glare_config.ArtifactGroup) except cfg.DuplicateOptError: pass def get_opt_lists(self): return [ (glare_config.service_available_group.name, glare_config.ServiceAvailableGroup), (glare_config.artifacts_group.name, glare_config.ArtifactGroup) ] glare-0.5.0/glare_tempest_plugin/services/000077500000000000000000000000001317401036700206445ustar00rootroot00000000000000glare-0.5.0/glare_tempest_plugin/services/__init__.py000066400000000000000000000000001317401036700227430ustar00rootroot00000000000000glare-0.5.0/glare_tempest_plugin/services/artifacts/000077500000000000000000000000001317401036700226245ustar00rootroot00000000000000glare-0.5.0/glare_tempest_plugin/services/artifacts/__init__.py000066400000000000000000000000001317401036700247230ustar00rootroot00000000000000glare-0.5.0/glare_tempest_plugin/services/artifacts/artifacts_client.py000066400000000000000000000110131317401036700265100ustar00rootroot00000000000000# Copyright (c) 2016 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from tempest import config from tempest.lib.common import rest_client CONF = config.CONF class ArtifactsClient(rest_client.RestClient): def __init__(self, auth_provider): super(ArtifactsClient, self).__init__( auth_provider, CONF.artifacts.catalog_type, CONF.identity.region, endpoint_type=CONF.artifacts.endpoint_type) def create_artifact(self, type_name, name, version='0.0.0', **kwargs): kwargs.update({'name': name, 'version': version}) uri = '/artifacts/{type_name}'.format(type_name=type_name) resp, body = self.post(uri, body=json.dumps(kwargs)) self.expected_success(201, resp.status) parsed = self._parse_resp(body) return parsed def get_artifact(self, type_name, art_id): uri = '/artifacts/{type_name}/{id}'.format( type_name=type_name, id=art_id) resp, body = self.get(uri) self.expected_success(200, resp.status) parsed = self._parse_resp(body) return parsed def update_artifact(self, type_name, art_id, remove_props=None, **kwargs): headers = {'Content-Type': 'application/json-patch+json'} uri = '/artifacts/{type_name}/{id}'.format(type_name=type_name, id=art_id) changes = [] if remove_props: for prop_name in remove_props: if prop_name not in kwargs: if '/' in prop_name: changes.append({'op': 'remove', 'path': '/%s' % prop_name}) else: changes.append({'op': 'replace', 'path': '/%s' % prop_name, 'value': None}) for prop_name in kwargs: changes.append({'op': 'add', 'path': '/%s' % prop_name, 'value': kwargs[prop_name]}) resp, body = self.patch(uri, json.dumps(changes), headers=headers) self.expected_success(200, resp.status) parsed = self._parse_resp(body) return parsed def activate_artifact(self, type_name, art_id): return self.update_artifact(type_name, art_id, status='active') def deactivate_artifact(self, type_name, art_id): return self.update_artifact(type_name, art_id, status='deactivated') def reactivate_artifact(self, type_name, art_id): return self.update_artifact(type_name, art_id, status='active') def publish_artifact(self, type_name, art_id): return self.update_artifact(type_name, art_id, visibility='public') def upload_blob(self, type_name, art_id, blob_property, data): headers = {'Content-Type': 'application/octet-stream'} uri = '/artifacts/{type_name}/{id}/{blob_prop}'.format( type_name=type_name, id=art_id, blob_prop=blob_property) resp, body = self.put(uri, data, headers=headers) self.expected_success(200, resp.status) parsed = self._parse_resp(body) return parsed def download_blob(self, type_name, art_id, blob_property): uri = '/artifacts/{type_name}/{id}/{blob_prop}'.format( type_name=type_name, id=art_id, blob_prop=blob_property) resp, body = self.get(uri) self.expected_success(200, resp.status) parsed = self._parse_resp(body) return parsed def delete_artifact(self, type_name, art_id): uri = '/artifacts/{type_name}/{id}'.format( type_name=type_name, id=art_id) self.delete(uri) def list_artifacts(self, type_name): uri = '/artifacts/{}'.format(type_name) resp, body = self.get(uri) self.expected_success(200, resp.status) parsed = self._parse_resp(body) return parsed glare-0.5.0/glare_tempest_plugin/tests/000077500000000000000000000000001317401036700201635ustar00rootroot00000000000000glare-0.5.0/glare_tempest_plugin/tests/__init__.py000066400000000000000000000000001317401036700222620ustar00rootroot00000000000000glare-0.5.0/glare_tempest_plugin/tests/api/000077500000000000000000000000001317401036700207345ustar00rootroot00000000000000glare-0.5.0/glare_tempest_plugin/tests/api/__init__.py000066400000000000000000000000001317401036700230330ustar00rootroot00000000000000glare-0.5.0/glare_tempest_plugin/tests/api/base.py000066400000000000000000000056501317401036700222260ustar00rootroot00000000000000# Copyright 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glare_tempest_plugin import clients from tempest.common import credentials_factory as common_creds from tempest import config from tempest.lib import base from tempest.lib.common import dynamic_creds CONF = config.CONF class BaseArtifactTest(base.BaseTestCase): @classmethod def setUpClass(cls): super(BaseArtifactTest, cls).setUpClass() cls.resource_setup() pass @classmethod def tearDownClass(cls): pass @classmethod def get_client_with_isolated_creds(cls, type_of_creds="admin"): creds = cls.get_configured_isolated_creds( type_of_creds=type_of_creds) os = clients.Manager(credentials=creds) client = os.artifact_client return client @classmethod def resource_setup(cls): if not CONF.service_available.glare: skip_msg = "Glare is disabled" raise cls.skipException(skip_msg) if not hasattr(cls, "os"): creds = cls.get_configured_isolated_creds( type_of_creds='primary') cls.os_primary = clients.Manager(credentials=creds) cls.artifacts_client = cls.os_primary.artifacts_client @classmethod def get_configured_isolated_creds(cls, type_of_creds='admin'): identity_version = CONF.identity.auth_version if identity_version == 'v3': cls.admin_role = CONF.identity.admin_role cls.identity_uri = CONF.identity.uri_v3 else: cls.admin_role = 'admin' cls.identity_uri = CONF.identity.uri cls.dynamic_cred = dynamic_creds.DynamicCredentialProvider( identity_version=CONF.identity.auth_version, identity_uri=cls.identity_uri, name=cls.__name__, admin_role=cls.admin_role, admin_creds=common_creds.get_configured_admin_credentials( 'identity_admin')) if type_of_creds == 'primary': creds = cls.dynamic_cred.get_primary_creds() elif type_of_creds == 'admin': creds = cls.dynamic_cred.get_admin_creds() elif type_of_creds == 'alt': creds = cls.dynamic_cred.get_alt_creds() else: creds = cls.dynamic_cred.get_credentials(type_of_creds) cls.dynamic_cred.type_of_creds = type_of_creds return creds.credentials glare-0.5.0/glare_tempest_plugin/tests/api/test_blobs.py000066400000000000000000000061661317401036700234570ustar00rootroot00000000000000# Copyright 2017 Nokia, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib import testtools from glare_tempest_plugin.tests.api import base from pprint import pformat class TestDownloadSanity(base.BaseArtifactTest): @testtools.testcase.attr('TestDownloadSanity') def test_blob_dict(self): """Uploading data to a folder and then download it back""" # Create a test artifact art = self.artifacts_client.create_artifact('sample_artifact', 'sample_art1') data = "data" * 100 art = self.artifacts_client.upload_blob( 'sample_artifact', art['id'], '/dict_of_blobs/new_blob', data) art_blob = art['dict_of_blobs']['new_blob'] self.assertEqual(400, art_blob['size']) self.assertEqual('active', art_blob['status'], pformat(art_blob)) encoded_data = data.encode('UTF-8') md5 = hashlib.md5(encoded_data).hexdigest() sha1 = hashlib.sha1(encoded_data).hexdigest() sha256 = hashlib.sha256(encoded_data).hexdigest() self.assertEqual(md5, art_blob['md5']) self.assertEqual(sha1, art_blob['sha1']) self.assertEqual(sha256, art_blob['sha256']) # Download data from the folder (dict_of_blobs) self.assertEqual(data, self.artifacts_client.download_blob( 'sample_artifact', art['id'], '/dict_of_blobs/new_blob'), pformat(art)) @testtools.testcase.attr('TestDownloadSanity') def test_blob_download(self): data = 'some_arbitrary_testing_data' art = self.artifacts_client.create_artifact('sample_artifact', 'test_af') # upload data art = self.artifacts_client.upload_blob('sample_artifact', art['id'], 'blob', data) art_blob = art['blob'] self.assertEqual('active', art_blob['status'], pformat(art)) encoded_data = data.encode('UTF-8') md5 = hashlib.md5(encoded_data).hexdigest() sha1 = hashlib.sha1(encoded_data).hexdigest() sha256 = hashlib.sha256(encoded_data).hexdigest() self.assertEqual(md5, art_blob['md5']) self.assertEqual(sha1, art_blob['sha1']) self.assertEqual(sha256, art_blob['sha256']) # Download data self.assertEqual(data, self.artifacts_client.download_blob( 'sample_artifact', art['id'], '/blob'), pformat(art)) glare-0.5.0/glare_tempest_plugin/tests/api/test_list_artifact.py000066400000000000000000000027341317401036700252030ustar00rootroot00000000000000# Copyright 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testtools from glare_tempest_plugin.tests.api import base from tempest import config CONF = config.CONF class TestListSanity(base.BaseArtifactTest): @testtools.testcase.attr('smoke') def test_list_artifacts(self): art = self.artifacts_client.create_artifact('images', 'tempest_test') self.artifacts_client.list_artifacts('images') self.artifacts_client.get_artifact('images', art['id']) self.artifacts_client.update_artifact(type_name='images', art_id=art['id'], name='newnewname') data = 'dataaaa' self.artifacts_client.upload_blob('images', art['id'], 'image', data) self.artifacts_client.download_blob('images', art['id'], 'image') self.artifacts_client.delete_artifact('images', art['id']) glare-0.5.0/pylintrc000066400000000000000000000014551317401036700144040ustar00rootroot00000000000000[Messages Control] # W0511: TODOs in code comments are fine. # W0142: *args and **kwargs are fine. # W0622: Redefining id is fine. disable-msg=W0511,W0142,W0622 [Basic] # Variable names can be 1 to 31 characters long, with lowercase and underscores variable-rgx=[a-z_][a-z0-9_]{0,30}$ # Argument names can be 2 to 31 characters long, with lowercase and underscores argument-rgx=[a-z_][a-z0-9_]{1,30}$ # Method names should be at least 3 characters long # and be lowercased with underscores method-rgx=[a-z_][a-z0-9_]{2,50}$ # Module names matching nova-* are ok (files in bin/) module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(nova-[a-z0-9_-]+))$ # Don't require docstrings on tests. no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ [Design] max-public-methods=100 min-public-methods=0 max-args=6 glare-0.5.0/requirements.txt000066400000000000000000000031401317401036700160720ustar00rootroot00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. pbr!=2.1.0,>=2.0.0 # Apache-2.0 SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 # MIT alembic>=0.8.10 # MIT eventlet!=0.18.3,!=0.20.1,<0.21.0,>=0.18.2 # MIT PasteDeploy>=1.5.0 # MIT Routes>=2.3.1 # MIT WebOb>=1.7.1 # MIT httplib2>=0.9.1 # MIT oslo.config>=4.6.0 # Apache-2.0 oslo.concurrency>=3.20.0 # Apache-2.0 oslo.context!=2.19.1,>=2.14.0 # Apache-2.0 oslo.service>=1.24.0 # Apache-2.0 oslo.utils>=3.28.0 # Apache-2.0 futurist>=1.2.0 # Apache-2.0 keystoneauth1>=3.2.0 # Apache-2.0 keystonemiddleware>=4.17.0 # Apache-2.0 python-memcached>=1.56 # PSF WSME>=0.8.0 # MIT PyJWT>=1.0.1 # MIT cryptography!=2.0,>=1.9 # BSD/Apache-2.0 # For paste.util.template used in keystone.common.template Paste>=2.0.2 # MIT jsonpatch>=1.16 # BSD jsonschema<3.0.0,>=2.6.0 # MIT pyOpenSSL>=16.2.0 # Apache-2.0 # Required by openstack.common libraries six>=1.9.0 # MIT oslo.db>=4.27.0 # Apache-2.0 oslo.i18n>=3.15.3 # Apache-2.0 oslo.log>=3.30.0 # Apache-2.0 oslo.messaging>=5.29.0 # Apache-2.0 oslo.middleware>=3.31.0 # Apache-2.0 oslo.policy>=1.23.0 # Apache-2.0 oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0 oslo.versionedobjects>=1.28.0 # Apache-2.0 retrying!=1.3.0,>=1.2.3 # Apache-2.0 osprofiler>=1.4.0 # Apache-2.0 # Glance Store glance-store>=0.22.0 # Apache-2.0 # Artifact repository microversion-parse>=0.1.2 # Apache-2.0 semantic-version>=2.3.1 # BSD # timeutils iso8601>=0.1.11 # MIT monotonic>=0.6 # Apache-2.0 glare-0.5.0/setup.cfg000066400000000000000000000034041317401036700144320ustar00rootroot00000000000000[metadata] name = glare summary = OpenStack Artifact Service description-file = README.rst author = OpenStack author-email = openstack-dev@lists.openstack.org home-page = http://docs.openstack.org/developer/glare/ classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 2 Programming Language :: Python :: 2.7 Programming Language :: Python :: 3 Programming Language :: Python :: 3.5 [files] packages = glare glare_tempest_plugin data_files = etc/glare = etc/glare-paste.ini etc/glare-swift.conf.sample [entry_points] console_scripts = glare-api = glare.cmd.api:main glare-db-manage = glare.cmd.db_manage:main glare-scrubber = glare.cmd.scrubber:main oslo.config.opts = glare = glare.opts:list_artifacts_opts oslo.config.opts.defaults = glare = glare.common.utils:set_glance_store_config_defaults oslo.policy.enforcer = glare = glare.common.policy:_get_enforcer oslo.policy.policies = glare = glare.common.policy:list_rules tempest.test_plugins = glare_tempest_tests = glare_tempest_plugin.plugin:GlareTempestPlugin [build_sphinx] all_files = 1 build-dir = doc/build source-dir = doc/source [egg_info] tag_build = tag_date = 0 tag_svn_revision = 0 [compile_catalog] directory = glare/locale domain = glare [update_catalog] domain = glare output_dir = glare/locale input_file = glare/locale/glare.pot [extract_messages] keywords = _ gettext ngettext l_ lazy_gettext mapping_file = babel.cfg output_file = glare/locale/glare.pot [pbr] autodoc_tree_index_modules = True glare-0.5.0/setup.py000066400000000000000000000020061317401036700143200ustar00rootroot00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools # In python < 2.7.4, a lazy loading of package `pbr` will break # setuptools if some other modules registered functions in `atexit`. # solution from: http://bugs.python.org/issue15881#msg170215 try: import multiprocessing # noqa except ImportError: pass setuptools.setup( setup_requires=['pbr>=2.0.0'], pbr=True) glare-0.5.0/test-requirements.txt000066400000000000000000000021721317401036700170530ustar00rootroot00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. # Hacking already pins down pep8, pyflakes and flake8 hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0 # For translations processing Babel!=2.4.0,>=2.3.4 # BSD # Needed for testing bandit>=1.1.0 # Apache-2.0 coverage!=4.4,>=4.0 # Apache-2.0 fixtures>=3.0.0 # Apache-2.0/BSD mox3>=0.20.0 # Apache-2.0 mock>=2.0.0 # BSD sphinx>=1.6.2 # BSD requests>=2.14.2 # Apache-2.0 testrepository>=0.0.18 # Apache-2.0/BSD testresources>=2.0.0 # Apache-2.0/BSD testscenarios>=0.4 # Apache-2.0/BSD testtools>=1.4.0 # MIT psutil>=3.2.2 # BSD oslotest>=1.10.0 # Apache-2.0 os-testr>=1.0.0 # Apache-2.0 # Optional packages that should be installed when testing PyMySQL>=0.7.6 # MIT License psycopg2>=2.6.2 # LGPL/ZPL pysendfile>=2.0.0 # MIT qpid-python>=0.26;python_version=='2.7' # Apache-2.0 xattr>=0.9.2 # MIT python-swiftclient>=3.2.0 # Apache-2.0 # Documentation os-api-ref>=1.4.0 # Apache-2.0 oslosphinx>=4.7.0 # Apache-2.0 reno>=2.5.0 # Apache-2.0 glare-0.5.0/tools/000077500000000000000000000000001317401036700137505ustar00rootroot00000000000000glare-0.5.0/tools/test-setup.sh000077500000000000000000000037061317401036700164320ustar00rootroot00000000000000#!/bin/bash -xe # This script will be run by OpenStack CI before unit tests are run, # it sets up the test system as needed. # Developers should setup their test systems in a similar way. # This setup needs to be run as a user that can run sudo. # The root password for the MySQL database; pass it in via # MYSQL_ROOT_PW. DB_ROOT_PW=${MYSQL_ROOT_PW:-insecure_slave} # This user and its password are used by the tests, if you change it, # your tests might fail. DB_USER=openstack_citest DB_PW=openstack_citest sudo -H mysqladmin -u root password $DB_ROOT_PW # It's best practice to remove anonymous users from the database. If # a anonymous user exists, then it matches first for connections and # other connections from that host will not work. sudo -H mysql -u root -p$DB_ROOT_PW -h localhost -e " DELETE FROM mysql.user WHERE User=''; FLUSH PRIVILEGES; GRANT ALL PRIVILEGES ON *.* TO '$DB_USER'@'%' identified by '$DB_PW' WITH GRANT OPTION;" # Now create our database. mysql -u $DB_USER -p$DB_PW -h 127.0.0.1 -e " SET default_storage_engine=MYISAM; DROP DATABASE IF EXISTS openstack_citest; CREATE DATABASE openstack_citest CHARACTER SET utf8;" # Same for PostgreSQL # The root password for the PostgreSQL database; pass it in via # POSTGRES_ROOT_PW. DB_ROOT_PW=${POSTGRES_ROOT_PW:-insecure_slave} # Setup user root_roles=$(sudo -H -u postgres psql -t -c " SELECT 'HERE' from pg_roles where rolname='$DB_USER'") if [[ ${root_roles} == *HERE ]];then sudo -H -u postgres psql -c "ALTER ROLE $DB_USER WITH SUPERUSER LOGIN PASSWORD '$DB_PW'" else sudo -H -u postgres psql -c "CREATE ROLE $DB_USER WITH SUPERUSER LOGIN PASSWORD '$DB_PW'" fi # Store password for tests cat << EOF > $HOME/.pgpass *:*:*:$DB_USER:$DB_PW EOF chmod 0600 $HOME/.pgpass # Now create our database psql -h 127.0.0.1 -U $DB_USER -d template1 -c "DROP DATABASE IF EXISTS openstack_citest" createdb -h 127.0.0.1 -U $DB_USER -l C -T template0 -E utf8 openstack_citest glare-0.5.0/tox.ini000066400000000000000000000037311317401036700141270ustar00rootroot00000000000000[tox] minversion = 1.6 envlist = py27,pep8,py35 skipsdist = True [testenv] setenv = VIRTUAL_ENV={envdir} PYTHONDONTWRITEBYTECODE = 1 LANGUAGE=en_US usedevelop = True install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages} deps = -r{toxinidir}/test-requirements.txt commands = /bin/rm -f .testrepository/times.dbm ostestr --slowest {posargs} whitelist_externals = bash passenv = *_proxy *_PROXY [testenv:debug] commands = oslo_debug_helper {posargs} [testenv:debug-py27] basepython = python2.7 commands = oslo_debug_helper {posargs} [testenv:debug-py35] basepython = python3.5 commands = oslo_debug_helper {posargs} [testenv:pep8] commands = flake8 {posargs} # Run security linter bandit -c bandit.yaml -r glare -n5 -p gate [testenv:cover] basepython = python2.7 setenv = VIRTUAL_ENV={envdir} commands = coverage erase python setup.py testr --coverage --testr-args='^(?!.*test.*coverage).*$' --omit="*/test*" [testenv:venv] commands = {posargs} [testenv:genconfig] commands = oslo-config-generator --config-file etc/oslo-config-generator/glare.conf [testenv:genpolicy] sitepackages = False envdir = {toxworkdir}/venv commands = oslopolicy-sample-generator --namespace=glare --output-file=etc/policy.yaml.sample [testenv:docs] basepython = python2.7 commands = python setup.py build_sphinx [testenv:bandit] commands = bandit -c bandit.yaml -r glare -n5 -p gate [flake8] # TODO(dmllr): Analyze or fix the warnings blacklisted below # H404 multi line docstring should start with a summary # H405 multi line docstring summary not separated with an empty line ignore = H404,H405 exclude = .venv,.git,.tox,dist,doc,etc,*glare/locale*,*lib/python*,*egg,build # H904 Delay string interpolations at logging calls. enable-extensions = H106,H203,H904 [hacking] local-check-factory = glare.hacking.checks.factory import_exceptions = glare.i18n